prompt
stringlengths 83
241k
| completion
stringlengths 43
275k
| source
stringclasses 3
values |
|---|---|---|
Convert the following Python code into Triton code:
import torch
import torch.nn as nn
class SumAggregator(nn.Module):
def __init__(self):
super(SumAggregator, self).__init__()
def forward(self, neighbor):
return torch.sum(neighbor, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class SumAggregatorNew(nn.Module):
def __init__(self):
super(SumAggregatorNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import math
import torch
import torch.utils.data
import torch.nn as nn
class LinearEmbedding(nn.Module):
def __init__(self, inp_size, d_model):
super(LinearEmbedding, self).__init__()
self.lut = nn.Linear(inp_size, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inp_size': 4, 'd_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class LinearEmbeddingNew(nn.Module):
def __init__(self, inp_size, d_model):
super(LinearEmbeddingNew, self).__init__()
self.lut = nn.Linear(inp_size, d_model)
self.d_model = d_model
def forward(self, input_0):
primals_1 = self.lut.weight
primals_2 = self.lut.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn as nn
class CustomizeLayer(nn.Module):
def __init__(self, in_dim):
super().__init__()
self.in_dim = in_dim
self.scale = nn.Parameter(torch.Tensor(self.in_dim))
self.bias = nn.Parameter(torch.Tensor(self.in_dim))
def forward(self, x):
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt()
x = torch.div(x, norm)
return x * self.scale + self.bias
def __repr__(self):
return 'CustomizedLayer(in_dim=%d)' % self.in_dim
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x3 = xindex // 64
x5 = xindex % 16
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tmp15 = tmp13 * tmp14
tmp17 = tmp15 + tmp16
tl.store(out_ptr0 + x4, tmp17, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_1,
primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class CustomizeLayerNew(nn.Module):
def __init__(self, in_dim):
super().__init__()
self.in_dim = in_dim
self.scale = nn.Parameter(torch.Tensor(self.in_dim))
self.bias = nn.Parameter(torch.Tensor(self.in_dim))
def __repr__(self):
return 'CustomizedLayer(in_dim=%d)' % self.in_dim
def forward(self, input_0):
primals_2 = self.scale
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
def __init__(self, weights, eps=1e-05):
super().__init__()
self.gamma = nn.Parameter(torch.ones(weights))
self.beta = nn.Parameter(torch.zeros(weights))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'weights': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-05
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class LayerNormNew(nn.Module):
def __init__(self, weights, eps=1e-05):
super().__init__()
self.gamma = nn.Parameter(torch.ones(weights))
self.beta = nn.Parameter(torch.zeros(weights))
self.eps = eps
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.utils.data
import torch.nn as nn
class LayerNorm(nn.Module):
"""
Construct a layernorm module (See citation for details).
"""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(dim=-1, keepdim=True)
std = x.std(dim=-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class LayerNormNew(nn.Module):
"""
Construct a layernorm module (See citation for details).
"""
def __init__(self, features, eps=1e-06):
super(LayerNormNew, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, input_0):
primals_2 = self.a_2
primals_3 = self.b_2
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn as nn
class Norm(nn.Module):
def __init__(self, n_state, axis=-1, epsilon=1e-05):
super().__init__()
self.n_state = n_state
self.g = nn.Parameter(torch.ones([self.n_state]))
self.b = nn.Parameter(torch.zeros([self.n_state]))
self.axis = axis
self.epsilon = epsilon
def forward(self, x):
u = torch.mean(x, dim=self.axis, keepdim=True)
s = torch.mean(torch.square(x - u), dim=self.axis, keepdim=True)
x = (x - u) * torch.rsqrt(s + self.epsilon)
x = x * self.g + self.b
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_state': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_rsqrt_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-05
tmp15 = tmp13 + tmp14
tmp16 = libdevice.rsqrt(tmp15)
tmp17 = tmp0 * tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mean_mul_pow_rsqrt_1[grid(256)](buf0,
primals_2, primals_3, buf1, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del primals_2
del primals_3
return buf1, primals_1
class NormNew(nn.Module):
def __init__(self, n_state, axis=-1, epsilon=1e-05):
super().__init__()
self.n_state = n_state
self.g = nn.Parameter(torch.ones([self.n_state]))
self.b = nn.Parameter(torch.zeros([self.n_state]))
self.axis = axis
self.epsilon = epsilon
def forward(self, input_0):
primals_2 = self.g
primals_3 = self.b
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
from torch import nn as nn
class BehlerAngular(nn.Module):
"""
Compute Behler type angular contribution of the angle spanned by three atoms:
:math:`2^{(1-\\zeta)} (1 + \\lambda \\cos( {\\theta}_{ijk} ) )^\\zeta`
Sets of zetas with lambdas of -1 and +1 are generated automatically.
Args:
zetas (set of int): Set of exponents used to compute angular Behler term (default={1})
"""
def __init__(self, zetas={1}):
super(BehlerAngular, self).__init__()
self.zetas = zetas
def forward(self, cos_theta):
"""
Args:
cos_theta (torch.Tensor): Cosines between all pairs of neighbors of the central atom.
Returns:
torch.Tensor: Tensor containing values of the angular filters.
"""
angular_pos = [(2 ** (1 - zeta) * ((1.0 - cos_theta) ** zeta).
unsqueeze(-1)) for zeta in self.zetas]
angular_neg = [(2 ** (1 - zeta) * ((1.0 + cos_theta) ** zeta).
unsqueeze(-1)) for zeta in self.zetas]
angular_all = angular_pos + angular_neg
return torch.cat(angular_all, -1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = 1.0
tmp7 = tmp6 - tmp5
tmp8 = tmp7 * tmp6
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp14 = tl.load(in_ptr0 + x1, tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp14 + tmp6
tmp16 = tmp15 * tmp6
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp11, tmp16, tmp17)
tmp19 = tl.where(tmp4, tmp10, tmp18)
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](arg0_1, buf0, 512, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class BehlerAngularNew(nn.Module):
"""
Compute Behler type angular contribution of the angle spanned by three atoms:
:math:`2^{(1-\\zeta)} (1 + \\lambda \\cos( {\\theta}_{ijk} ) )^\\zeta`
Sets of zetas with lambdas of -1 and +1 are generated automatically.
Args:
zetas (set of int): Set of exponents used to compute angular Behler term (default={1})
"""
def __init__(self, zetas={1}):
super(BehlerAngularNew, self).__init__()
self.zetas = zetas
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import math
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
def init_layer(L):
if isinstance(L, nn.Conv2d):
n = L.kernel_size[0] * L.kernel_size[1] * L.out_channels
L.weight.data.normal_(0, math.sqrt(2.0 / float(n)))
elif isinstance(L, nn.BatchNorm2d):
L.weight.data.fill_(1)
L.bias.data.fill_(0)
class BottleneckBlock(nn.Module):
def __init__(self, indim, outdim, half_res):
super(BottleneckBlock, self).__init__()
bottleneckdim = int(outdim / 4)
self.indim = indim
self.outdim = outdim
self.C1 = nn.Conv2d(indim, bottleneckdim, kernel_size=1, bias=False)
self.BN1 = nn.Identity()
self.C2 = nn.Conv2d(bottleneckdim, bottleneckdim, kernel_size=3,
stride=2 if half_res else 1, padding=1)
self.BN2 = nn.Identity()
self.C3 = nn.Conv2d(bottleneckdim, outdim, kernel_size=1, bias=False)
self.BN3 = nn.Identity()
self.relu = nn.ReLU()
self.parametrized_layers = [self.C1, self.BN1, self.C2, self.BN2,
self.C3, self.BN3]
self.half_res = half_res
if indim != outdim:
self.shortcut = nn.Conv2d(indim, outdim, 1, stride=2 if
half_res else 1, bias=False)
self.parametrized_layers.append(self.shortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, x):
short_out = x if self.shortcut_type == 'identity' else self.shortcut(x)
out = self.C1(x)
out = self.BN1(out)
out = self.relu(out)
out = self.C2(out)
out = self.BN2(out)
out = self.relu(out)
out = self.C3(out)
out = self.BN3(out)
out = out + short_out
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 2, 2])]
def get_init_inputs():
return [[], {'indim': 4, 'outdim': 4, 'half_res': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_2(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (4, 1, 1, 1), (1, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 2, 2), (4, 4, 2, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(4)](buf3, primals_4, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del primals_4
buf4 = extern_kernels.convolution(buf3, primals_5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_2[grid(64)](buf4,
primals_1, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf4
return buf5, primals_1, primals_2, primals_3, primals_5, buf1, buf3, buf6
def init_layer(L):
if isinstance(L, nn.Conv2d):
n = L.kernel_size[0] * L.kernel_size[1] * L.out_channels
L.weight.data.normal_(0, math.sqrt(2.0 / float(n)))
elif isinstance(L, nn.BatchNorm2d):
L.weight.data.fill_(1)
L.bias.data.fill_(0)
class BottleneckBlockNew(nn.Module):
def __init__(self, indim, outdim, half_res):
super(BottleneckBlockNew, self).__init__()
bottleneckdim = int(outdim / 4)
self.indim = indim
self.outdim = outdim
self.C1 = nn.Conv2d(indim, bottleneckdim, kernel_size=1, bias=False)
self.BN1 = nn.Identity()
self.C2 = nn.Conv2d(bottleneckdim, bottleneckdim, kernel_size=3,
stride=2 if half_res else 1, padding=1)
self.BN2 = nn.Identity()
self.C3 = nn.Conv2d(bottleneckdim, outdim, kernel_size=1, bias=False)
self.BN3 = nn.Identity()
self.relu = nn.ReLU()
self.parametrized_layers = [self.C1, self.BN1, self.C2, self.BN2,
self.C3, self.BN3]
self.half_res = half_res
if indim != outdim:
self.shortcut = nn.Conv2d(indim, outdim, 1, stride=2 if
half_res else 1, bias=False)
self.parametrized_layers.append(self.shortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, input_0):
primals_2 = self.C1.weight
primals_3 = self.C2.weight
primals_4 = self.C2.bias
primals_5 = self.C3.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn as nn
class Conv1d(nn.Module):
def __init__(self, nf, nx, stdev=0.02):
super().__init__()
self.nf = nf
self.nx = nx
self.stdev = stdev
self.w = nn.Parameter(torch.normal(size=[1, self.nx, self.nf], mean
=0.0, std=self.stdev))
self.b = nn.Parameter(torch.zeros([self.nf]))
def forward(self, x: 'torch.Tensor'):
shape = x.size()
start, nx = shape[:-1], shape[-1]
return torch.reshape(torch.matmul(torch.reshape(x, [-1, nx]), torch
.reshape(self.w, [-1, self.nf])) + self.b, start + (self.nf,))
class Mlp(nn.Module):
def __init__(self, input_dim, proj_dim):
super().__init__()
self.input_dim = input_dim
self.proj_dim = proj_dim
self.conv_fc = Conv1d(self.proj_dim, self.input_dim)
self.conv_proj = Conv1d(self.input_dim, self.proj_dim)
def forward(self, x):
h = nn.functional.gelu(self.conv_fc(x))
return self.conv_proj(h)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'proj_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (4, 1), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(256)](buf0, primals_3, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_3, buf0, reinterpret_tensor(buf1, (4, 64), (1, 4), 0
), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0)
class Conv1d(nn.Module):
def __init__(self, nf, nx, stdev=0.02):
super().__init__()
self.nf = nf
self.nx = nx
self.stdev = stdev
self.w = nn.Parameter(torch.normal(size=[1, self.nx, self.nf], mean
=0.0, std=self.stdev))
self.b = nn.Parameter(torch.zeros([self.nf]))
def forward(self, x: 'torch.Tensor'):
shape = x.size()
start, nx = shape[:-1], shape[-1]
return torch.reshape(torch.matmul(torch.reshape(x, [-1, nx]), torch
.reshape(self.w, [-1, self.nf])) + self.b, start + (self.nf,))
class MlpNew(nn.Module):
def __init__(self, input_dim, proj_dim):
super().__init__()
self.input_dim = input_dim
self.proj_dim = proj_dim
self.conv_fc = Conv1d(self.proj_dim, self.input_dim)
self.conv_proj = Conv1d(self.input_dim, self.proj_dim)
def forward(self, input_0):
primals_2 = self.conv_fc.w
primals_3 = self.conv_fc.b
primals_4 = self.conv_proj.w
primals_5 = self.conv_proj.b
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
from torch.nn import Module
import math
import torch
import torch.nn.functional as F
import torch.nn as nn
class GraphConvolution(Module):
"""
A Graph Convolution Layer (GCN)
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.W = nn.Linear(in_features, out_features, bias=bias)
self.init()
def init(self):
stdv = 1.0 / math.sqrt(self.W.weight.size(1))
self.W.weight.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = self.W(input)
output = torch.spmm(adj, support)
return output
class GCN(nn.Module):
"""
A Two-layer GCN.
"""
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.gc2 = GraphConvolution(nhid, nclass)
self.dropout = dropout
self.elu = torch.nn.ELU(inplace=True)
def forward(self, x, adj, use_relu=True):
x = self.gc1(x, adj)
if use_relu:
x = self.elu(x)
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, adj)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(in_out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(
primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_4, buf0, out=buf1)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_elu_0[grid(16)](buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf3 = buf0
del buf0
extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_4, buf3, out=buf4)
del buf3
return buf4, primals_3, buf2, reinterpret_tensor(primals_4, (4, 4), (1,
4), 0), primals_5
class GraphConvolution(Module):
"""
A Graph Convolution Layer (GCN)
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.W = nn.Linear(in_features, out_features, bias=bias)
self.init()
def init(self):
stdv = 1.0 / math.sqrt(self.W.weight.size(1))
self.W.weight.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = self.W(input)
output = torch.spmm(adj, support)
return output
class GCNNew(nn.Module):
"""
A Two-layer GCN.
"""
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCNNew, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.gc2 = GraphConvolution(nhid, nclass)
self.dropout = dropout
self.elu = torch.nn.ELU(inplace=True)
def forward(self, input_0, input_1):
primals_1 = self.gc1.W.weight
primals_2 = self.gc1.W.bias
primals_3 = self.gc2.W.weight
primals_6 = self.gc2.W.bias
primals_4 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import math
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
def init_layer(L):
if isinstance(L, nn.Conv2d):
n = L.kernel_size[0] * L.kernel_size[1] * L.out_channels
L.weight.data.normal_(0, math.sqrt(2.0 / float(n)))
elif isinstance(L, nn.BatchNorm2d):
L.weight.data.fill_(1)
L.bias.data.fill_(0)
class SimpleBlock(nn.Module):
maml = False
def __init__(self, indim, outdim, half_res):
super(SimpleBlock, self).__init__()
self.indim = indim
self.outdim = outdim
self.C1 = nn.Conv2d(indim, outdim, kernel_size=3, stride=2 if
half_res else 1, padding=1, bias=False)
self.BN1 = nn.Identity()
self.C2 = nn.Conv2d(outdim, outdim, kernel_size=3, padding=1, bias=
False)
self.BN2 = nn.Identity()
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.parametrized_layers = [self.C1, self.C2, self.BN1, self.BN2]
self.half_res = half_res
if indim != outdim:
self.shortcut = nn.Conv2d(indim, outdim, 1, 2 if half_res else
1, bias=False)
self.BNshortcut = nn.Identity()
self.parametrized_layers.append(self.shortcut)
self.parametrized_layers.append(self.BNshortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, x):
out = self.C1(x)
out = self.BN1(out)
out = self.relu1(out)
out = self.C2(out)
out = self.BN2(out)
short_out = x if self.shortcut_type == 'identity' else self.BNshortcut(
self.shortcut(x))
out = out + short_out
out = self.relu2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 2, 2])]
def get_init_inputs():
return [[], {'indim': 4, 'outdim': 4, 'half_res': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2,
2), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(64)](buf2,
primals_2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf2
return buf3, primals_1, primals_2, primals_3, buf1, buf4
def init_layer(L):
if isinstance(L, nn.Conv2d):
n = L.kernel_size[0] * L.kernel_size[1] * L.out_channels
L.weight.data.normal_(0, math.sqrt(2.0 / float(n)))
elif isinstance(L, nn.BatchNorm2d):
L.weight.data.fill_(1)
L.bias.data.fill_(0)
class SimpleBlockNew(nn.Module):
maml = False
def __init__(self, indim, outdim, half_res):
super(SimpleBlockNew, self).__init__()
self.indim = indim
self.outdim = outdim
self.C1 = nn.Conv2d(indim, outdim, kernel_size=3, stride=2 if
half_res else 1, padding=1, bias=False)
self.BN1 = nn.Identity()
self.C2 = nn.Conv2d(outdim, outdim, kernel_size=3, padding=1, bias=
False)
self.BN2 = nn.Identity()
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.parametrized_layers = [self.C1, self.C2, self.BN1, self.BN2]
self.half_res = half_res
if indim != outdim:
self.shortcut = nn.Conv2d(indim, outdim, 1, 2 if half_res else
1, bias=False)
self.BNshortcut = nn.Identity()
self.parametrized_layers.append(self.shortcut)
self.parametrized_layers.append(self.BNshortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, input_0):
primals_1 = self.C1.weight
primals_3 = self.C2.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
from torch import nn as nn
class Aggregate(nn.Module):
"""Pooling layer based on sum or average with optional masking.
Args:
axis (int): axis along which pooling is done.
mean (bool, optional): if True, use average instead for sum pooling.
keepdim (bool, optional): whether the output tensor has dim retained or not.
"""
def __init__(self, axis, mean=False, keepdim=True):
super(Aggregate, self).__init__()
self.average = mean
self.axis = axis
self.keepdim = keepdim
def forward(self, input, mask=None):
"""Compute layer output.
Args:
input (torch.Tensor): input data.
mask (torch.Tensor, optional): mask to be applied; e.g. neighbors mask.
Returns:
torch.Tensor: layer output.
"""
if mask is not None:
input = input * mask[..., None]
y = torch.sum(input, self.axis)
if self.average:
if mask is not None:
N = torch.sum(mask, self.axis, keepdim=self.keepdim)
N = torch.max(N, other=torch.ones_like(N))
else:
N = input.size(self.axis)
y = y / N
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'axis': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class AggregateNew(nn.Module):
"""Pooling layer based on sum or average with optional masking.
Args:
axis (int): axis along which pooling is done.
mean (bool, optional): if True, use average instead for sum pooling.
keepdim (bool, optional): whether the output tensor has dim retained or not.
"""
def __init__(self, axis, mean=False, keepdim=True):
super(AggregateNew, self).__init__()
self.average = mean
self.axis = axis
self.keepdim = keepdim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
from torch import Tensor
from torch import nn
class KaggleAccuracy(nn.Module):
def __init__(self, threshold: 'float'=0.25, num_patches: 'int'=38, size:
'int'=418) ->None:
super().__init__()
self.threshold = threshold
self.num_patches = num_patches
self.patch_size = size // num_patches
self.resize = nn.Upsample(size=size)
self.unfold = nn.Unfold(kernel_size=self.patch_size, stride=self.
patch_size)
def forward(self, x: 'Tensor', y: 'Tensor') ->Tensor:
x = self.to_patch_value(x)
y = self.to_patch_value(y)
acc = torch.mean((x == y).float())
return acc
def to_patch_value(self, x: 'Tensor') ->Tensor:
x = x.float()
x = self.resize(x)
x = self.unfold(x)
x = x.mean(dim=1)
return (x > self.threshold).float()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import Tensor
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_red_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr, RBLOCK: tl.constexpr):
xnumel = 5776
rnumel = 484
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 1444
x1 = xindex // 1444
_tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = 11 * (x0 // 38) + r2 // 11 % 11
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.009569377990430622
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = 11 * (x0 % 38) + r2 % 11
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * (r2 // 121) + 64 *
x1), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = _tmp11 + tmp10
_tmp11 = tl.where(rmask & xmask, tmp12, _tmp11)
tmp11 = tl.sum(_tmp11, 1)[:, None]
tl.store(out_ptr0 + (x0 + 1472 * x1), tmp11, xmask)
@triton.jit
def triton_red_fused__to_copy_eq_gt_mean_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 5776
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp13 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r0 = rindex % 1444
r1 = rindex // 1444
tmp0 = tl.load(in_ptr0 + (r0 + 1472 * r1), rmask, eviction_policy=
'evict_first', other=0.0)
tmp6 = tl.load(in_ptr1 + (r0 + 1472 * r1), rmask, eviction_policy=
'evict_first', other=0.0)
tmp1 = 484.0
tmp2 = tmp0 / tmp1
tmp3 = 0.25
tmp4 = tmp2 > tmp3
tmp5 = tmp4.to(tl.float32)
tmp7 = tmp6 / tmp1
tmp8 = tmp7 > tmp3
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp5 == tmp9
tmp11 = tmp10.to(tl.float32)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = _tmp13 + tmp12
_tmp13 = tl.where(rmask, tmp14, _tmp13)
tmp13 = tl.sum(_tmp13, 1)[:, None]
tmp15 = 5776.0
tmp16 = tmp13 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1444), (1472, 1), torch.float32)
get_raw_stream(0)
triton_red_fused_mean_0[grid(5776)](arg0_1, buf0, 5776, 484, XBLOCK
=64, RBLOCK=64, num_warps=16, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 1444), (1472, 1), torch.float32)
triton_red_fused_mean_0[grid(5776)](arg1_1, buf1, 5776, 484, XBLOCK
=64, RBLOCK=64, num_warps=16, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_red_fused__to_copy_eq_gt_mean_1[grid(1)](buf3, buf0, buf1, 1,
5776, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del buf0
del buf1
return buf3,
class KaggleAccuracyNew(nn.Module):
def __init__(self, threshold: 'float'=0.25, num_patches: 'int'=38, size:
'int'=418) ->None:
super().__init__()
self.threshold = threshold
self.num_patches = num_patches
self.patch_size = size // num_patches
self.resize = nn.Upsample(size=size)
self.unfold = nn.Unfold(kernel_size=self.patch_size, stride=self.
patch_size)
def to_patch_value(self, x: 'Tensor') ->Tensor:
x = x.float()
x = self.resize(x)
x = self.unfold(x)
x = x.mean(dim=1)
return (x > self.threshold).float()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
class ConstAttention(nn.Module):
def __init__(self, **kwargs):
super(ConstAttention, self).__init__()
def forward(self, neighbor_vecs, self_vecs):
return 1
class GatAttention(ConstAttention):
def __init__(self, num_heads, out_channels):
super(GatAttention, self).__init__()
self.num_heads = num_heads
self.out_channels = out_channels
self.att_self_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.att_neighbor_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, neighbor_vecs, self_vecs):
alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs
* self.att_neighbor_weight).sum(dim=-1)
alpha = F.leaky_relu(alpha, negative_slope=0.2)
return alpha
class CosAttention(GatAttention):
def forward(self, neighbor_vecs, self_vecs):
alpha = (neighbor_vecs * self.att_neighbor_weight * self_vecs *
self.att_self_weight)
alpha = alpha.sum(dim=-1)
return alpha
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_heads': 4, 'out_channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr2 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp26 = tl.load(in_ptr2 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp28 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp9 = tmp7 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 * tmp12
tmp14 = tmp6 + tmp13
tmp17 = tmp15 * tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 * tmp20
tmp22 = tmp14 + tmp21
tmp25 = tmp23 * tmp24
tmp27 = tmp25 * tmp26
tmp29 = tmp27 * tmp28
tmp30 = tmp22 + tmp29
tl.store(out_ptr0 + x2, tmp30, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sum_0[grid(64)](primals_2, primals_1,
primals_3, primals_4, buf0, 64, XBLOCK=64, num_warps=1,
num_stages=1)
return buf0, primals_1, primals_2, primals_3, primals_4
class ConstAttention(nn.Module):
def __init__(self, **kwargs):
super(ConstAttention, self).__init__()
def forward(self, neighbor_vecs, self_vecs):
return 1
class GatAttention(ConstAttention):
def __init__(self, num_heads, out_channels):
super(GatAttention, self).__init__()
self.num_heads = num_heads
self.out_channels = out_channels
self.att_self_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.att_neighbor_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, neighbor_vecs, self_vecs):
alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs
* self.att_neighbor_weight).sum(dim=-1)
alpha = F.leaky_relu(alpha, negative_slope=0.2)
return alpha
class CosAttentionNew(GatAttention):
def forward(self, input_0, input_1):
primals_1 = self.att_self_weight
primals_4 = self.att_neighbor_weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn as nn
import torch.nn.functional as F
class FullyConnectedNet(nn.Module):
"""https://github.com/VainF/Torch-Pruning/issues/21"""
def __init__(self, input_size, num_classes, HIDDEN_UNITS):
super().__init__()
self.fc1 = nn.Linear(input_size, HIDDEN_UNITS)
self.fc2 = nn.Linear(HIDDEN_UNITS, num_classes)
def forward(self, x):
x = F.relu(self.fc1(x))
y_hat = self.fc2(x)
return y_hat
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'num_classes': 4, 'HIDDEN_UNITS': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3
class FullyConnectedNetNew(nn.Module):
"""https://github.com/VainF/Torch-Pruning/issues/21"""
def __init__(self, input_size, num_classes, HIDDEN_UNITS):
super().__init__()
self.fc1 = nn.Linear(input_size, HIDDEN_UNITS)
self.fc2 = nn.Linear(HIDDEN_UNITS, num_classes)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
class ConstAttention(nn.Module):
def __init__(self, **kwargs):
super(ConstAttention, self).__init__()
def forward(self, neighbor_vecs, self_vecs):
return 1
class GatAttention(ConstAttention):
def __init__(self, num_heads, out_channels):
super(GatAttention, self).__init__()
self.num_heads = num_heads
self.out_channels = out_channels
self.att_self_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.att_neighbor_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, neighbor_vecs, self_vecs):
alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs
* self.att_neighbor_weight).sum(dim=-1)
alpha = F.leaky_relu(alpha, negative_slope=0.2)
return alpha
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_heads': 4, 'out_channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_leaky_relu_mul_sum_0(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 4 * x2, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp26 = tl.load(in_ptr2 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = tmp15 * tmp16
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp28 = tmp26 * tmp27
tmp29 = tmp25 + tmp28
tmp30 = tmp14 + tmp29
tmp31 = 0.0
tmp32 = tmp30 > tmp31
tmp33 = 0.2
tmp34 = tmp30 * tmp33
tmp35 = tl.where(tmp32, tmp30, tmp34)
tl.store(out_ptr1 + x2, tmp32, xmask)
tl.store(out_ptr2 + x2, tmp35, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_leaky_relu_mul_sum_0[grid(64)](primals_2,
primals_1, primals_4, primals_3, buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
del primals_3
return buf2, primals_2, primals_4, buf1
class ConstAttention(nn.Module):
def __init__(self, **kwargs):
super(ConstAttention, self).__init__()
def forward(self, neighbor_vecs, self_vecs):
return 1
class GatAttentionNew(ConstAttention):
def __init__(self, num_heads, out_channels):
super(GatAttentionNew, self).__init__()
self.num_heads = num_heads
self.out_channels = out_channels
self.att_self_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.att_neighbor_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, input_0, input_1):
primals_1 = self.att_self_weight
primals_3 = self.att_neighbor_weight
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn as nn
class TripletLoss(nn.Module):
def __init__(self):
super(TripletLoss, self).__init__()
self.margin = 0.5
def distance(self, x, y):
diff = torch.abs(x - y)
diff = torch.pow(diff, 2).sum(-1)
return diff
def forward(self, anchor, pos, neg):
pos_distance = self.distance(anchor, pos)
neg_distance = self.distance(anchor, neg)
loss = torch.clamp(self.margin + pos_distance - neg_distance, min=0.0
).mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_clamp_mean_pow_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tmp3 * tmp3
tmp7 = tmp5 - tmp6
tmp8 = tl_math.abs(tmp7)
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp13 = tmp11 - tmp12
tmp14 = tl_math.abs(tmp13)
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp19 = tmp17 - tmp18
tmp20 = tl_math.abs(tmp19)
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = 0.5
tmp24 = tmp22 + tmp23
tmp26 = tmp0 - tmp25
tmp27 = tl_math.abs(tmp26)
tmp28 = tmp27 * tmp27
tmp30 = tmp5 - tmp29
tmp31 = tl_math.abs(tmp30)
tmp32 = tmp31 * tmp31
tmp33 = tmp28 + tmp32
tmp35 = tmp11 - tmp34
tmp36 = tl_math.abs(tmp35)
tmp37 = tmp36 * tmp36
tmp38 = tmp33 + tmp37
tmp40 = tmp17 - tmp39
tmp41 = tl_math.abs(tmp40)
tmp42 = tmp41 * tmp41
tmp43 = tmp38 + tmp42
tmp44 = tmp24 - tmp43
tmp45 = 0.0
tmp46 = triton_helpers.maximum(tmp44, tmp45)
tmp47 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK])
tmp49 = tl.sum(tmp47, 1)[:, None]
tmp50 = 64.0
tmp51 = tmp49 / tmp50
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp51, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_abs_add_clamp_mean_pow_sub_sum_0[grid(1)](buf2,
arg0_1, arg1_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class TripletLossNew(nn.Module):
def __init__(self):
super(TripletLossNew, self).__init__()
self.margin = 0.5
def distance(self, x, y):
diff = torch.abs(x - y)
diff = torch.pow(diff, 2).sum(-1)
return diff
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
class ConstAttention(nn.Module):
def __init__(self, **kwargs):
super(ConstAttention, self).__init__()
def forward(self, neighbor_vecs, self_vecs):
return 1
class GatAttention(ConstAttention):
def __init__(self, num_heads, out_channels):
super(GatAttention, self).__init__()
self.num_heads = num_heads
self.out_channels = out_channels
self.att_self_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.att_neighbor_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, neighbor_vecs, self_vecs):
alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs
* self.att_neighbor_weight).sum(dim=-1)
alpha = F.leaky_relu(alpha, negative_slope=0.2)
return alpha
class GatSymAttention(GatAttention):
def forward(self, neighbor_vecs, self_vecs):
alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs
* self.att_neighbor_weight).sum(dim=-1)
alpha = F.leaky_relu(alpha, negative_slope=0.2)
alpha_2 = (neighbor_vecs * self.att_self_weight).sum(dim=-1) + (
self_vecs * self.att_neighbor_weight).sum(dim=-1)
alpha = alpha + alpha_2
return alpha
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_heads': 4, 'out_channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_leaky_relu_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 4 * x2, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp26 = tl.load(in_ptr2 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = tmp15 * tmp16
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp28 = tmp26 * tmp27
tmp29 = tmp25 + tmp28
tmp30 = tmp14 + tmp29
tmp31 = tmp15 * tmp1
tmp32 = tmp18 * tmp4
tmp33 = tmp31 + tmp32
tmp34 = tmp22 * tmp8
tmp35 = tmp33 + tmp34
tmp36 = tmp26 * tmp12
tmp37 = tmp35 + tmp36
tmp38 = tmp0 * tmp16
tmp39 = tmp3 * tmp19
tmp40 = tmp38 + tmp39
tmp41 = tmp7 * tmp23
tmp42 = tmp40 + tmp41
tmp43 = tmp11 * tmp27
tmp44 = tmp42 + tmp43
tmp45 = tmp37 + tmp44
tmp46 = 0.0
tmp47 = tmp30 > tmp46
tmp48 = 0.2
tmp49 = tmp30 * tmp48
tmp50 = tl.where(tmp47, tmp30, tmp49)
tmp51 = tmp50 + tmp45
tl.store(out_ptr1 + x2, tmp47, xmask)
tl.store(in_out_ptr0 + x2, tmp51, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf3 = buf2
del buf2
get_raw_stream(0)
triton_poi_fused_add_leaky_relu_mul_sum_0[grid(64)](buf3, primals_2,
primals_1, primals_4, primals_3, buf1, 64, XBLOCK=64, num_warps
=1, num_stages=1)
del primals_1
del primals_3
return buf3, primals_2, primals_4, buf1
class ConstAttention(nn.Module):
def __init__(self, **kwargs):
super(ConstAttention, self).__init__()
def forward(self, neighbor_vecs, self_vecs):
return 1
class GatAttention(ConstAttention):
def __init__(self, num_heads, out_channels):
super(GatAttention, self).__init__()
self.num_heads = num_heads
self.out_channels = out_channels
self.att_self_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.att_neighbor_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, neighbor_vecs, self_vecs):
alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs
* self.att_neighbor_weight).sum(dim=-1)
alpha = F.leaky_relu(alpha, negative_slope=0.2)
return alpha
class GatSymAttentionNew(GatAttention):
def forward(self, input_0, input_1):
primals_1 = self.att_self_weight
primals_3 = self.att_neighbor_weight
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn as nn
import torch.utils.data
class HuberLoss(nn.Module):
def __init__(self, delta=1):
super().__init__()
self.huber_loss_delta1 = nn.SmoothL1Loss()
self.delta = delta
def forward(self, x, x_hat):
loss = self.huber_loss_delta1(x / self.delta, x_hat / self.delta)
return loss * self.delta * self.delta
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tl_math.abs(tmp5)
tmp7 = tmp6 < tmp1
tmp8 = tmp6 * tmp6
tmp9 = 0.5
tmp10 = tmp8 * tmp9
tmp11 = tmp10 * tmp1
tmp12 = tmp6 - tmp9
tmp13 = tl.where(tmp7, tmp11, tmp12)
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_smooth_l1_loss_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class HuberLossNew(nn.Module):
def __init__(self, delta=1):
super().__init__()
self.huber_loss_delta1 = nn.SmoothL1Loss()
self.delta = delta
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn.functional as F
import torch.nn as nn
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
"""
q: 256,8,36,64
k: 256,8,36,64
v: 256,8,36,64
mask: 256,1,1,36
"""
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
"""
mask(256,1,1,36)
attn(256,8,36,36)
这里用到了tensor的broadcast: 两个tensor必须满足,从最后一个维度开始往前算,维度要么相等,要么为1,要么不存在
这里的mask中间两个维度为1,可以与attn做broadcast
将mask的行索引复制到36,得到36×36的mask矩阵,batch中共256个36*36的矩阵,1/256即batch中每个样本的mask再复制到head=8个
每个batch中样本的mask和各自的互注意力矩阵相乘
注意力矩阵是36*36是个混淆矩阵,表示第一个元素和其余36个元素的关系,因此mask行列转置无所谓
"""
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'temperature': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf1
)
del arg1_1
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4
)
del arg2_1
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf3
class ScaledDotProductAttentionNew(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn import init
class ScalarScaleBias(nn.Module):
def __init__(self, scale=True, scale_init=1.0, bias=True, bias_init=0.0
) ->None:
super(ScalarScaleBias, self).__init__()
if scale:
self.weight = Parameter(torch.Tensor(1))
else:
self.register_parameter('weight', None)
if bias:
self.bias = Parameter(torch.Tensor(1))
else:
self.register_parameter('bias', None)
self.weight_init = scale_init
self.bias_init = bias_init
self.reset_parameters()
def reset_parameters(self) ->None:
if self.weight is not None:
init.constant_(self.weight, self.weight_init)
if self.bias is not None:
init.constant_(self.bias, self.bias_init)
def forward(self, x):
if self.weight is not None:
x = x * self.weight
if self.bias is not None:
x = x + self.bias
return x
class ScalarBiasScale(ScalarScaleBias):
def forward(self, x):
if self.bias is not None:
x = x + self.bias
if self.weight is not None:
x = x * self.weight
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp6 = tmp3 * tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_2, primals_1,
primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2, primals_3
class ScalarScaleBias(nn.Module):
def __init__(self, scale=True, scale_init=1.0, bias=True, bias_init=0.0
) ->None:
super(ScalarScaleBias, self).__init__()
if scale:
self.weight = Parameter(torch.Tensor(1))
else:
self.register_parameter('weight', None)
if bias:
self.bias = Parameter(torch.Tensor(1))
else:
self.register_parameter('bias', None)
self.weight_init = scale_init
self.bias_init = bias_init
self.reset_parameters()
def reset_parameters(self) ->None:
if self.weight is not None:
init.constant_(self.weight, self.weight_init)
if self.bias is not None:
init.constant_(self.bias, self.bias_init)
def forward(self, x):
if self.weight is not None:
x = x * self.weight
if self.bias is not None:
x = x + self.bias
return x
class ScalarBiasScaleNew(ScalarScaleBias):
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn as nn
class MeanStd(nn.Module):
def __init__(self):
super(MeanStd, self).__init__()
def forward(self, x):
x = x.view(x.size(0), x.size(1), -1)
mean_x = torch.mean(x, dim=2)
var_x = torch.mean(x ** 2, dim=2) - mean_x * mean_x
return torch.cat([mean_x, var_x], dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_mul_pow_sub_0(in_ptr0, out_ptr2, out_ptr3, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 16.0
tmp11 = tmp9 / tmp10
tmp12 = tmp5 / tmp10
tmp13 = tmp11 * tmp11
tmp14 = tmp12 - tmp13
tl.store(out_ptr2 + (x2 + 8 * x3), tmp11, xmask)
tl.store(out_ptr3 + (x2 + 8 * x3), tmp14, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
buf2 = reinterpret_tensor(buf4, (4, 4), (8, 1), 0)
buf3 = reinterpret_tensor(buf4, (4, 4), (8, 1), 4)
get_raw_stream(0)
triton_per_fused_mean_mul_pow_sub_0[grid(16)](arg0_1, buf2, buf3,
16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf4,
class MeanStdNew(nn.Module):
def __init__(self):
super(MeanStdNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
import torch.nn.init as init
class L2Norm(nn.Module):
def __init__(self, n_channels, scale):
super(L2Norm, self).__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
init.constant_(self.weight, self.gamma)
def forward(self, x):
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
x = torch.div(x, norm)
out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x
) * x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_channels': 4, 'scale': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = libdevice.sqrt(tmp12)
tmp14 = 1e-10
tmp15 = tmp13 + tmp14
tmp16 = tmp1 / tmp15
tmp17 = tmp0 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_2,
primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf0, primals_1
class L2NormNew(nn.Module):
def __init__(self, n_channels, scale):
super(L2NormNew, self).__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
init.constant_(self.weight, self.gamma)
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn.functional as F
import torch.nn as nn
class PositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid)
self.w_2 = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_in': 4, 'd_hid': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(64)](buf2, primals_1,
buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(256)](buf2, primals_1,
buf3, buf4, primals_6, primals_7, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
del buf4
del primals_7
return buf5, primals_1, primals_6, reinterpret_tensor(buf1, (64, 4), (4,
1), 0), buf2, primals_4, buf6
class PositionwiseFeedForwardNew(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid)
self.w_2 = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
def forward(self, input_0):
primals_2 = self.w_1.weight
primals_3 = self.w_1.bias
primals_4 = self.w_2.weight
primals_5 = self.w_2.bias
primals_6 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn as nn
class NormalizationLayer(nn.Module):
def __init__(self):
super(NormalizationLayer, self).__init__()
def forward(self, x, epsilon=1e-08):
return x * ((x ** 2).mean(dim=1, keepdim=True) + epsilon).rsqrt()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mean_mul_pow_rsqrt_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-08
tmp15 = tmp13 + tmp14
tmp16 = libdevice.rsqrt(tmp15)
tmp17 = tmp0 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_mul_pow_rsqrt_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class NormalizationLayerNew(nn.Module):
def __init__(self):
super(NormalizationLayerNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn as nn
class Decoder(nn.Module):
def __init__(self, z_dim):
super(Decoder, self).__init__()
self.deconv1 = nn.ConvTranspose2d(z_dim, 128, kernel_size=4, stride
=1, bias=False)
self.deconv2 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2,
padding=1, bias=False)
self.deconv3 = nn.ConvTranspose2d(64, 1, kernel_size=4, stride=2,
padding=1, bias=False)
self.lrelu = nn.LeakyReLU()
def forward(self, x):
x = self.deconv1(x)
x = self.lrelu(x)
x = self.deconv2(x)
x = self.lrelu(x)
x = self.deconv3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'z_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 25088
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.01
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.01
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_4, (64, 1, 4, 4), (16, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 128, 7, 7), (6272, 49, 7, 1))
buf1 = empty_strided_cuda((4, 128, 7, 7), (6272, 49, 7, 1), torch.bool)
buf2 = empty_strided_cuda((4, 128, 7, 7), (6272, 49, 7, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(25088)](buf0, buf1, buf2, 25088,
XBLOCK=128, num_warps=4, num_stages=1)
del buf0
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 64, 14, 14), (12544, 196, 14, 1))
buf4 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.bool)
buf5 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.float32)
triton_poi_fused_leaky_relu_1[grid(50176)](buf3, buf4, buf5, 50176,
XBLOCK=512, num_warps=4, num_stages=1)
del buf3
buf6 = extern_kernels.convolution(buf5, primals_4, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 1, 28, 28), (784, 784, 28, 1))
return (buf6, primals_1, primals_2, primals_3, primals_4, buf1, buf2,
buf4, buf5)
class DecoderNew(nn.Module):
def __init__(self, z_dim):
super(DecoderNew, self).__init__()
self.deconv1 = nn.ConvTranspose2d(z_dim, 128, kernel_size=4, stride
=1, bias=False)
self.deconv2 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2,
padding=1, bias=False)
self.deconv3 = nn.ConvTranspose2d(64, 1, kernel_size=4, stride=2,
padding=1, bias=False)
self.lrelu = nn.LeakyReLU()
def forward(self, input_0):
primals_1 = self.deconv1.weight
primals_3 = self.deconv2.weight
primals_4 = self.deconv3.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn as nn
import torch.utils.data
class LayerNorm(nn.Module):
"""
Simple 1D LayerNorm.
"""
def __init__(self, features, center=True, scale=False, eps=1e-06):
super().__init__()
self.center = center
self.scale = scale
self.eps = eps
if self.scale:
self.scale_param = nn.Parameter(torch.ones(features))
else:
self.scale_param = None
if self.center:
self.center_param = nn.Parameter(torch.zeros(features))
else:
self.center_param = None
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
output = (x - mean) / (std + self.eps)
if self.scale:
output = output * self.scale_param
if self.center:
output = output + self.center_param
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_std_sub_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 1e-06
tmp26 = tmp24 + tmp25
tmp27 = tmp10 / tmp26
tmp29 = tmp27 + tmp28
tl.store(out_ptr0 + x2, tmp29, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_std_sub_0[grid(256)](primals_1,
primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class LayerNormNew(nn.Module):
"""
Simple 1D LayerNorm.
"""
def __init__(self, features, center=True, scale=False, eps=1e-06):
super().__init__()
self.center = center
self.scale = scale
self.eps = eps
if self.scale:
self.scale_param = nn.Parameter(torch.ones(features))
else:
self.scale_param = None
if self.center:
self.center_param = nn.Parameter(torch.zeros(features))
else:
self.center_param = None
def forward(self, input_0):
primals_2 = self.center_param
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn as nn
class MaskedMSELoss(nn.Module):
def __init__(self):
super(MaskedMSELoss, self).__init__()
self.loss = nn.MSELoss(reduction='sum')
def forward(self, pred, target, mask):
"""
pred -> batch*seq_len
target -> batch*seq_len
mask -> batch*seq_len
"""
loss = self.loss(pred * mask, target) / torch.sum(mask)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mse_loss_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp3 = tl.load(in_ptr2 + r0, None)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = tmp8 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mse_loss_mul_sum_0[grid(1)](buf2, arg0_1,
arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class MaskedMSELossNew(nn.Module):
def __init__(self):
super(MaskedMSELossNew, self).__init__()
self.loss = nn.MSELoss(reduction='sum')
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.data
class SimpleMLP(nn.Module):
def __init__(self, n_inputs, n_outputs, dropout_probability):
super(SimpleMLP, self).__init__()
self.n_inputs = n_inputs
self.n_outputs = n_outputs
self.dropout_probability = dropout_probability
self.l1 = nn.Linear(self.n_inputs, self.n_inputs * 2)
self.l2 = nn.Linear(self.n_inputs * 2, self.n_inputs * 4)
self.l3 = nn.Linear(self.n_inputs * 4, self.n_inputs * 8)
self.l4 = nn.Linear(self.n_inputs * 8, self.n_outputs)
self.dropout = nn.Dropout(self.dropout_probability)
def forward(self, X):
X = F.relu(self.l1(X))
X = self.dropout(X)
X = F.relu(self.l2(X))
X = self.dropout(X)
X = F.relu(self.l3(X))
X = self.dropout(X)
X = self.l4(X)
return X
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_inputs': 4, 'n_outputs': 4, 'dropout_probability': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 8), (8, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (32, 16), (16, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (4, 32), (32, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(512)](buf1,
primals_2, buf9, 512, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_4, (8, 16), (1, 8), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(1024)](buf3,
primals_5, buf8, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 16), (16, 1), 0),
reinterpret_tensor(primals_6, (16, 32), (1, 16), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf4
buf7 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(2048)](buf5,
primals_7, buf7, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 32),
(32, 1), 0), reinterpret_tensor(primals_8, (32, 4), (1, 32), 0),
alpha=1, beta=1, out=buf6)
del primals_9
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 8), (8, 1), 0), reinterpret_tensor(
buf3, (64, 16), (16, 1), 0), reinterpret_tensor(buf5, (64, 32), (32,
1), 0), primals_8, buf7, primals_6, buf8, primals_4, buf9
class SimpleMLPNew(nn.Module):
def __init__(self, n_inputs, n_outputs, dropout_probability):
super(SimpleMLPNew, self).__init__()
self.n_inputs = n_inputs
self.n_outputs = n_outputs
self.dropout_probability = dropout_probability
self.l1 = nn.Linear(self.n_inputs, self.n_inputs * 2)
self.l2 = nn.Linear(self.n_inputs * 2, self.n_inputs * 4)
self.l3 = nn.Linear(self.n_inputs * 4, self.n_inputs * 8)
self.l4 = nn.Linear(self.n_inputs * 8, self.n_outputs)
self.dropout = nn.Dropout(self.dropout_probability)
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_6 = self.l3.weight
primals_7 = self.l3.bias
primals_8 = self.l4.weight
primals_9 = self.l4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn.functional as F
import torch.nn as nn
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
"""
q: 256,8,36,64
k: 256,8,36,64
v: 256,8,36,64
mask: 256,1,1,36
"""
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
"""
mask(256,1,1,36)
attn(256,8,36,36)
这里用到了tensor的broadcast: 两个tensor必须满足,从最后一个维度开始往前算,维度要么相等,要么为1,要么不存在
这里的mask中间两个维度为1,可以与attn做broadcast
将mask的行索引复制到36,得到36×36的mask矩阵,batch中共256个36*36的矩阵,1/256即batch中每个样本的mask再复制到head=8个
每个batch中样本的mask和各自的互注意力矩阵相乘
注意力矩阵是36*36是个混淆矩阵,表示第一个元素和其余36个元素的关系,因此mask行列转置无所谓
"""
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask=mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'n_head': 4, 'd_model': 4, 'd_k': 4, 'd_v': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (4, 16), (16, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf0)
del primals_4
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_div_0[grid(256)](buf0, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(64, 4)](buf1, buf4, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_7, (16, 4), (1, 16), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](buf11, primals_1,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](buf11, primals_1,
buf12, buf13, primals_8, primals_9, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf12
del buf13
del primals_9
return buf14, buf7, primals_1, primals_8, reinterpret_tensor(primals_2,
(16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0
), buf11, primals_7, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0)
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
"""
q: 256,8,36,64
k: 256,8,36,64
v: 256,8,36,64
mask: 256,1,1,36
"""
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
"""
mask(256,1,1,36)
attn(256,8,36,36)
这里用到了tensor的broadcast: 两个tensor必须满足,从最后一个维度开始往前算,维度要么相等,要么为1,要么不存在
这里的mask中间两个维度为1,可以与attn做broadcast
将mask的行索引复制到36,得到36×36的mask矩阵,batch中共256个36*36的矩阵,1/256即batch中每个样本的mask再复制到head=8个
每个batch中样本的mask和各自的互注意力矩阵相乘
注意力矩阵是36*36是个混淆矩阵,表示第一个元素和其余36个元素的关系,因此mask行列转置无所谓
"""
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
class MultiHeadAttentionNew(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, input_0, input_1, input_2):
primals_4 = self.w_qs.weight
primals_5 = self.w_ks.weight
primals_6 = self.w_vs.weight
primals_7 = self.fc.weight
primals_8 = self.layer_norm.weight
primals_9 = self.layer_norm.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn as tnn
class Net(tnn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = tnn.Conv2d(3, 6, 5)
self.pool = tnn.MaxPool2d(2, 2)
self.conv2 = tnn.Conv2d(6, 16, 5)
self.fc1 = tnn.Linear(16 * 5 * 5, 120)
self.fc2 = tnn.Linear(120, 84)
self.fc3 = tnn.Linear(84, 10)
def forward(self, x):
x = self.pool(self.conv1(x))
x = self.pool(self.conv2(x))
x = x.view(-1, 16 * 5 * 5)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as tnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 1176
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (120, 400), (400, 1))
assert_size_stride(primals_7, (120,), (1,))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84,), (1,))
assert_size_stride(primals_10, (10, 84), (84, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(18816)](buf1, primals_2, 18816,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2,
buf3, 4704, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(6400)](buf5, primals_5, 6400,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6,
buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf7, (4, 400),
(400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400
), 0), alpha=1, beta=1, out=buf8)
del primals_7
buf9 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8,
(120, 84), (1, 120), 0), alpha=1, beta=1, out=buf9)
del primals_9
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, buf9, reinterpret_tensor(
primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf10)
del primals_11
return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf8, buf9,
primals_10, primals_8, primals_6)
class NetNew(tnn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = tnn.Conv2d(3, 6, 5)
self.pool = tnn.MaxPool2d(2, 2)
self.conv2 = tnn.Conv2d(6, 16, 5)
self.fc1 = tnn.Linear(16 * 5 * 5, 120)
self.fc2 = tnn.Linear(120, 84)
self.fc3 = tnn.Linear(84, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
import torch.nn.functional as F
import torch.nn as nn
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
"""
q: 256,8,36,64
k: 256,8,36,64
v: 256,8,36,64
mask: 256,1,1,36
"""
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
"""
mask(256,1,1,36)
attn(256,8,36,36)
这里用到了tensor的broadcast: 两个tensor必须满足,从最后一个维度开始往前算,维度要么相等,要么为1,要么不存在
这里的mask中间两个维度为1,可以与attn做broadcast
将mask的行索引复制到36,得到36×36的mask矩阵,batch中共256个36*36的矩阵,1/256即batch中每个样本的mask再复制到head=8个
每个batch中样本的mask和各自的互注意力矩阵相乘
注意力矩阵是36*36是个混淆矩阵,表示第一个元素和其余36个元素的关系,因此mask行列转置无所谓
"""
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask=mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid)
self.w_2 = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class EncoderLayer(nn.Module):
""" Compose with two layers """
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=
dropout)
def forward(self, enc_input, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(enc_input, enc_input,
enc_input, mask=slf_attn_mask)
enc_output = self.pos_ffn(enc_output)
return enc_output, enc_slf_attn
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_inner': 4, 'n_head': 4, 'd_k': 4, 'd_v': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (4, 16), (16, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_div_0[grid(256)](buf0, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(64, 4)](buf1, buf4, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_5, (16, 4), (1, 16), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](buf11, primals_1,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](buf11, primals_1,
buf12, buf13, primals_6, primals_7, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_7
buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf15)
buf16 = reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0)
del buf15
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_7[grid(64)](buf16,
primals_9, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf17)
buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0)
del buf17
triton_poi_fused_add_8[grid(64)](buf18, primals_11, buf14, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_11
buf19 = buf13
del buf13
buf20 = buf12
del buf12
triton_poi_fused_native_layer_norm_9[grid(16)](buf18, buf19, buf20,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_10[grid(64)](buf18, buf19, buf20,
primals_12, primals_13, buf21, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf19
del buf20
del primals_13
return (buf21, buf7, primals_1, primals_6, primals_12, buf7,
reinterpret_tensor(buf10, (16, 16), (16, 1), 0), buf11,
reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(
buf16, (16, 4), (4, 1), 0), buf18, primals_10, buf22, primals_8,
primals_5, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0))
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
"""
q: 256,8,36,64
k: 256,8,36,64
v: 256,8,36,64
mask: 256,1,1,36
"""
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
"""
mask(256,1,1,36)
attn(256,8,36,36)
这里用到了tensor的broadcast: 两个tensor必须满足,从最后一个维度开始往前算,维度要么相等,要么为1,要么不存在
这里的mask中间两个维度为1,可以与attn做broadcast
将mask的行索引复制到36,得到36×36的mask矩阵,batch中共256个36*36的矩阵,1/256即batch中每个样本的mask再复制到head=8个
每个batch中样本的mask和各自的互注意力矩阵相乘
注意力矩阵是36*36是个混淆矩阵,表示第一个元素和其余36个元素的关系,因此mask行列转置无所谓
"""
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask=mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid)
self.w_2 = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class EncoderLayerNew(nn.Module):
""" Compose with two layers """
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(EncoderLayerNew, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v,
dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=
dropout)
def forward(self, input_0):
primals_2 = self.slf_attn.w_qs.weight
primals_3 = self.slf_attn.w_ks.weight
primals_4 = self.slf_attn.w_vs.weight
primals_5 = self.slf_attn.fc.weight
primals_6 = self.slf_attn.layer_norm.weight
primals_7 = self.slf_attn.layer_norm.bias
primals_8 = self.pos_ffn.w_1.weight
primals_9 = self.pos_ffn.w_1.bias
primals_10 = self.pos_ffn.w_2.weight
primals_11 = self.pos_ffn.w_2.bias
primals_12 = self.pos_ffn.layer_norm.weight
primals_13 = self.pos_ffn.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
|
Kernelbook
|
Convert the following Python code into Triton code:
import torch
from torch import nn
class MishAutoFn(torch.autograd.Function):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
Experimental memory-efficient variant
"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
y = x.mul(torch.tanh(nn.Softplus()(x)))
return y
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
x_sigmoid = torch.sigmoid(x)
x_tanh_sp = nn.Softplus()(x).tanh()
return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp *
x_tanh_sp))
class Mish(nn.Module):
def __init__(self, inplace: 'bool'=False):
super(Mish, self).__init__()
self.inplace = inplace
def forward(self, x):
return MishAutoFn.apply(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tmp6 * tmp1
tmp8 = tl.where(tmp4, tmp0, tmp7)
tmp9 = libdevice.tanh(tmp8)
tmp10 = tmp0 * tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_softplus_tanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MishAutoFn(torch.autograd.Function):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
Experimental memory-efficient variant
"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
y = x.mul(torch.tanh(nn.Softplus()(x)))
return y
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
x_sigmoid = torch.sigmoid(x)
x_tanh_sp = nn.Softplus()(x).tanh()
return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp *
x_tanh_sp))
class MishNew(nn.Module):
def __init__(self, inplace: 'bool'=False):
super(MishNew, self).__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Kernelbook
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 12