Created
February 12, 2021 01:38
-
-
Save masahi/5717246dc71d89b484f89be78864a85a to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| type Storage { | |
| } | |
| def @main(%data: Tensor[(1, 3, 224, 224), float32], %bn_data_gamma: Tensor[(3), float32], %bn_data_beta: Tensor[(3), float32], %bn_data_moving_mean: Tensor[(3), float32], %bn_data_moving_var: Tensor[(3), float32], %conv0_weight: Tensor[(64, 3, 7, 7), float32], %bn0_gamma: Tensor[(64), float32], %bn0_beta: Tensor[(64), float32], %bn0_moving_mean: Tensor[(64), float32], %bn0_moving_var: Tensor[(64), float32], %stage1_unit1_bn1_gamma: Tensor[(64), float32], %stage1_unit1_bn1_beta: Tensor[(64), float32], %stage1_unit1_bn1_moving_mean: Tensor[(64), float32], %stage1_unit1_bn1_moving_var: Tensor[(64), float32], %stage1_unit1_conv1_weight: Tensor[(64, 64, 1, 1), float32], %stage1_unit1_bn2_gamma: Tensor[(64), float32], %stage1_unit1_bn2_beta: Tensor[(64), float32], %stage1_unit1_bn2_moving_mean: Tensor[(64), float32], %stage1_unit1_bn2_moving_var: Tensor[(64), float32], %stage1_unit1_conv2_weight: Tensor[(64, 64, 3, 3), float32], %stage1_unit1_bn3_gamma: Tensor[(64), float32], %stage1_unit1_bn3_beta: Tensor[(64), float32], %stage1_unit1_bn3_moving_mean: Tensor[(64), float32], %stage1_unit1_bn3_moving_var: Tensor[(64), float32], %stage1_unit1_conv3_weight: Tensor[(256, 64, 1, 1), float32], %stage1_unit1_sc_weight: Tensor[(256, 64, 1, 1), float32], %stage1_unit2_bn1_gamma: Tensor[(256), float32], %stage1_unit2_bn1_beta: Tensor[(256), float32], %stage1_unit2_bn1_moving_mean: Tensor[(256), float32], %stage1_unit2_bn1_moving_var: Tensor[(256), float32], %stage1_unit2_conv1_weight: Tensor[(64, 256, 1, 1), float32], %stage1_unit2_bn2_gamma: Tensor[(64), float32], %stage1_unit2_bn2_beta: Tensor[(64), float32], %stage1_unit2_bn2_moving_mean: Tensor[(64), float32], %stage1_unit2_bn2_moving_var: Tensor[(64), float32], %stage1_unit2_conv2_weight: Tensor[(64, 64, 3, 3), float32], %stage1_unit2_bn3_gamma: Tensor[(64), float32], %stage1_unit2_bn3_beta: Tensor[(64), float32], %stage1_unit2_bn3_moving_mean: Tensor[(64), float32], %stage1_unit2_bn3_moving_var: Tensor[(64), float32], %stage1_unit2_conv3_weight: Tensor[(256, 64, 1, 1), float32], %stage1_unit3_bn1_gamma: Tensor[(256), float32], %stage1_unit3_bn1_beta: Tensor[(256), float32], %stage1_unit3_bn1_moving_mean: Tensor[(256), float32], %stage1_unit3_bn1_moving_var: Tensor[(256), float32], %stage1_unit3_conv1_weight: Tensor[(64, 256, 1, 1), float32], %stage1_unit3_bn2_gamma: Tensor[(64), float32], %stage1_unit3_bn2_beta: Tensor[(64), float32], %stage1_unit3_bn2_moving_mean: Tensor[(64), float32], %stage1_unit3_bn2_moving_var: Tensor[(64), float32], %stage1_unit3_conv2_weight: Tensor[(64, 64, 3, 3), float32], %stage1_unit3_bn3_gamma: Tensor[(64), float32], %stage1_unit3_bn3_beta: Tensor[(64), float32], %stage1_unit3_bn3_moving_mean: Tensor[(64), float32], %stage1_unit3_bn3_moving_var: Tensor[(64), float32], %stage1_unit3_conv3_weight: Tensor[(256, 64, 1, 1), float32], %stage2_unit1_bn1_gamma: Tensor[(256), float32], %stage2_unit1_bn1_beta: Tensor[(256), float32], %stage2_unit1_bn1_moving_mean: Tensor[(256), float32], %stage2_unit1_bn1_moving_var: Tensor[(256), float32], %stage2_unit1_conv1_weight: Tensor[(128, 256, 1, 1), float32], %stage2_unit1_bn2_gamma: Tensor[(128), float32], %stage2_unit1_bn2_beta: Tensor[(128), float32], %stage2_unit1_bn2_moving_mean: Tensor[(128), float32], %stage2_unit1_bn2_moving_var: Tensor[(128), float32], %stage2_unit1_conv2_weight: Tensor[(128, 128, 3, 3), float32], %stage2_unit1_bn3_gamma: Tensor[(128), float32], %stage2_unit1_bn3_beta: Tensor[(128), float32], %stage2_unit1_bn3_moving_mean: Tensor[(128), float32], %stage2_unit1_bn3_moving_var: Tensor[(128), float32], %stage2_unit1_conv3_weight: Tensor[(512, 128, 1, 1), float32], %stage2_unit1_sc_weight: Tensor[(512, 256, 1, 1), float32], %stage2_unit2_bn1_gamma: Tensor[(512), float32], %stage2_unit2_bn1_beta: Tensor[(512), float32], %stage2_unit2_bn1_moving_mean: Tensor[(512), float32], %stage2_unit2_bn1_moving_var: Tensor[(512), float32], %stage2_unit2_conv1_weight: Tensor[(128, 512, 1, 1), float32], %stage2_unit2_bn2_gamma: Tensor[(128), float32], %stage2_unit2_bn2_beta: Tensor[(128), float32], %stage2_unit2_bn2_moving_mean: Tensor[(128), float32], %stage2_unit2_bn2_moving_var: Tensor[(128), float32], %stage2_unit2_conv2_weight: Tensor[(128, 128, 3, 3), float32], %stage2_unit2_bn3_gamma: Tensor[(128), float32], %stage2_unit2_bn3_beta: Tensor[(128), float32], %stage2_unit2_bn3_moving_mean: Tensor[(128), float32], %stage2_unit2_bn3_moving_var: Tensor[(128), float32], %stage2_unit2_conv3_weight: Tensor[(512, 128, 1, 1), float32], %stage2_unit3_bn1_gamma: Tensor[(512), float32], %stage2_unit3_bn1_beta: Tensor[(512), float32], %stage2_unit3_bn1_moving_mean: Tensor[(512), float32], %stage2_unit3_bn1_moving_var: Tensor[(512), float32], %stage2_unit3_conv1_weight: Tensor[(128, 512, 1, 1), float32], %stage2_unit3_bn2_gamma: Tensor[(128), float32], %stage2_unit3_bn2_beta: Tensor[(128), float32], %stage2_unit3_bn2_moving_mean: Tensor[(128), float32], %stage2_unit3_bn2_moving_var: Tensor[(128), float32], %stage2_unit3_conv2_weight: Tensor[(128, 128, 3, 3), float32], %stage2_unit3_bn3_gamma: Tensor[(128), float32], %stage2_unit3_bn3_beta: Tensor[(128), float32], %stage2_unit3_bn3_moving_mean: Tensor[(128), float32], %stage2_unit3_bn3_moving_var: Tensor[(128), float32], %stage2_unit3_conv3_weight: Tensor[(512, 128, 1, 1), float32], %stage2_unit4_bn1_gamma: Tensor[(512), float32], %stage2_unit4_bn1_beta: Tensor[(512), float32], %stage2_unit4_bn1_moving_mean: Tensor[(512), float32], %stage2_unit4_bn1_moving_var: Tensor[(512), float32], %stage2_unit4_conv1_weight: Tensor[(128, 512, 1, 1), float32], %stage2_unit4_bn2_gamma: Tensor[(128), float32], %stage2_unit4_bn2_beta: Tensor[(128), float32], %stage2_unit4_bn2_moving_mean: Tensor[(128), float32], %stage2_unit4_bn2_moving_var: Tensor[(128), float32], %stage2_unit4_conv2_weight: Tensor[(128, 128, 3, 3), float32], %stage2_unit4_bn3_gamma: Tensor[(128), float32], %stage2_unit4_bn3_beta: Tensor[(128), float32], %stage2_unit4_bn3_moving_mean: Tensor[(128), float32], %stage2_unit4_bn3_moving_var: Tensor[(128), float32], %stage2_unit4_conv3_weight: Tensor[(512, 128, 1, 1), float32], %stage3_unit1_bn1_gamma: Tensor[(512), float32], %stage3_unit1_bn1_beta: Tensor[(512), float32], %stage3_unit1_bn1_moving_mean: Tensor[(512), float32], %stage3_unit1_bn1_moving_var: Tensor[(512), float32], %stage3_unit1_conv1_weight: Tensor[(256, 512, 1, 1), float32], %stage3_unit1_bn2_gamma: Tensor[(256), float32], %stage3_unit1_bn2_beta: Tensor[(256), float32], %stage3_unit1_bn2_moving_mean: Tensor[(256), float32], %stage3_unit1_bn2_moving_var: Tensor[(256), float32], %stage3_unit1_conv2_weight: Tensor[(256, 256, 3, 3), float32], %stage3_unit1_bn3_gamma: Tensor[(256), float32], %stage3_unit1_bn3_beta: Tensor[(256), float32], %stage3_unit1_bn3_moving_mean: Tensor[(256), float32], %stage3_unit1_bn3_moving_var: Tensor[(256), float32], %stage3_unit1_conv3_weight: Tensor[(1024, 256, 1, 1), float32], %stage3_unit1_sc_weight: Tensor[(1024, 512, 1, 1), float32], %stage3_unit2_bn1_gamma: Tensor[(1024), float32], %stage3_unit2_bn1_beta: Tensor[(1024), float32], %stage3_unit2_bn1_moving_mean: Tensor[(1024), float32], %stage3_unit2_bn1_moving_var: Tensor[(1024), float32], %stage3_unit2_conv1_weight: Tensor[(256, 1024, 1, 1), float32], %stage3_unit2_bn2_gamma: Tensor[(256), float32], %stage3_unit2_bn2_beta: Tensor[(256), float32], %stage3_unit2_bn2_moving_mean: Tensor[(256), float32], %stage3_unit2_bn2_moving_var: Tensor[(256), float32], %stage3_unit2_conv2_weight: Tensor[(256, 256, 3, 3), float32], %stage3_unit2_bn3_gamma: Tensor[(256), float32], %stage3_unit2_bn3_beta: Tensor[(256), float32], %stage3_unit2_bn3_moving_mean: Tensor[(256), float32], %stage3_unit2_bn3_moving_var: Tensor[(256), float32], %stage3_unit2_conv3_weight: Tensor[(1024, 256, 1, 1), float32], %stage3_unit3_bn1_gamma: Tensor[(1024), float32], %stage3_unit3_bn1_beta: Tensor[(1024), float32], %stage3_unit3_bn1_moving_mean: Tensor[(1024), float32], %stage3_unit3_bn1_moving_var: Tensor[(1024), float32], %stage3_unit3_conv1_weight: Tensor[(256, 1024, 1, 1), float32], %stage3_unit3_bn2_gamma: Tensor[(256), float32], %stage3_unit3_bn2_beta: Tensor[(256), float32], %stage3_unit3_bn2_moving_mean: Tensor[(256), float32], %stage3_unit3_bn2_moving_var: Tensor[(256), float32], %stage3_unit3_conv2_weight: Tensor[(256, 256, 3, 3), float32], %stage3_unit3_bn3_gamma: Tensor[(256), float32], %stage3_unit3_bn3_beta: Tensor[(256), float32], %stage3_unit3_bn3_moving_mean: Tensor[(256), float32], %stage3_unit3_bn3_moving_var: Tensor[(256), float32], %stage3_unit3_conv3_weight: Tensor[(1024, 256, 1, 1), float32], %stage3_unit4_bn1_gamma: Tensor[(1024), float32], %stage3_unit4_bn1_beta: Tensor[(1024), float32], %stage3_unit4_bn1_moving_mean: Tensor[(1024), float32], %stage3_unit4_bn1_moving_var: Tensor[(1024), float32], %stage3_unit4_conv1_weight: Tensor[(256, 1024, 1, 1), float32], %stage3_unit4_bn2_gamma: Tensor[(256), float32], %stage3_unit4_bn2_beta: Tensor[(256), float32], %stage3_unit4_bn2_moving_mean: Tensor[(256), float32], %stage3_unit4_bn2_moving_var: Tensor[(256), float32], %stage3_unit4_conv2_weight: Tensor[(256, 256, 3, 3), float32], %stage3_unit4_bn3_gamma: Tensor[(256), float32], %stage3_unit4_bn3_beta: Tensor[(256), float32], %stage3_unit4_bn3_moving_mean: Tensor[(256), float32], %stage3_unit4_bn3_moving_var: Tensor[(256), float32], %stage3_unit4_conv3_weight: Tensor[(1024, 256, 1, 1), float32], %stage3_unit5_bn1_gamma: Tensor[(1024), float32], %stage3_unit5_bn1_beta: Tensor[(1024), float32], %stage3_unit5_bn1_moving_mean: Tensor[(1024), float32], %stage3_unit5_bn1_moving_var: Tensor[(1024), float32], %stage3_unit5_conv1_weight: Tensor[(256, 1024, 1, 1), float32], %stage3_unit5_bn2_gamma: Tensor[(256), float32], %stage3_unit5_bn2_beta: Tensor[(256), float32], %stage3_unit5_bn2_moving_mean: Tensor[(256), float32], %stage3_unit5_bn2_moving_var: Tensor[(256), float32], %stage3_unit5_conv2_weight: Tensor[(256, 256, 3, 3), float32], %stage3_unit5_bn3_gamma: Tensor[(256), float32], %stage3_unit5_bn3_beta: Tensor[(256), float32], %stage3_unit5_bn3_moving_mean: Tensor[(256), float32], %stage3_unit5_bn3_moving_var: Tensor[(256), float32], %stage3_unit5_conv3_weight: Tensor[(1024, 256, 1, 1), float32], %stage3_unit6_bn1_gamma: Tensor[(1024), float32], %stage3_unit6_bn1_beta: Tensor[(1024), float32], %stage3_unit6_bn1_moving_mean: Tensor[(1024), float32], %stage3_unit6_bn1_moving_var: Tensor[(1024), float32], %stage3_unit6_conv1_weight: Tensor[(256, 1024, 1, 1), float32], %stage3_unit6_bn2_gamma: Tensor[(256), float32], %stage3_unit6_bn2_beta: Tensor[(256), float32], %stage3_unit6_bn2_moving_mean: Tensor[(256), float32], %stage3_unit6_bn2_moving_var: Tensor[(256), float32], %stage3_unit6_conv2_weight: Tensor[(256, 256, 3, 3), float32], %stage3_unit6_bn3_gamma: Tensor[(256), float32], %stage3_unit6_bn3_beta: Tensor[(256), float32], %stage3_unit6_bn3_moving_mean: Tensor[(256), float32], %stage3_unit6_bn3_moving_var: Tensor[(256), float32], %stage3_unit6_conv3_weight: Tensor[(1024, 256, 1, 1), float32], %stage4_unit1_bn1_gamma: Tensor[(1024), float32], %stage4_unit1_bn1_beta: Tensor[(1024), float32], %stage4_unit1_bn1_moving_mean: Tensor[(1024), float32], %stage4_unit1_bn1_moving_var: Tensor[(1024), float32], %stage4_unit1_conv1_weight: Tensor[(512, 1024, 1, 1), float32], %stage4_unit1_bn2_gamma: Tensor[(512), float32], %stage4_unit1_bn2_beta: Tensor[(512), float32], %stage4_unit1_bn2_moving_mean: Tensor[(512), float32], %stage4_unit1_bn2_moving_var: Tensor[(512), float32], %stage4_unit1_conv2_weight: Tensor[(512, 512, 3, 3), float32], %stage4_unit1_bn3_gamma: Tensor[(512), float32], %stage4_unit1_bn3_beta: Tensor[(512), float32], %stage4_unit1_bn3_moving_mean: Tensor[(512), float32], %stage4_unit1_bn3_moving_var: Tensor[(512), float32], %stage4_unit1_conv3_weight: Tensor[(2048, 512, 1, 1), float32], %stage4_unit1_sc_weight: Tensor[(2048, 1024, 1, 1), float32], %stage4_unit2_bn1_gamma: Tensor[(2048), float32], %stage4_unit2_bn1_beta: Tensor[(2048), float32], %stage4_unit2_bn1_moving_mean: Tensor[(2048), float32], %stage4_unit2_bn1_moving_var: Tensor[(2048), float32], %stage4_unit2_conv1_weight: Tensor[(512, 2048, 1, 1), float32], %stage4_unit2_bn2_gamma: Tensor[(512), float32], %stage4_unit2_bn2_beta: Tensor[(512), float32], %stage4_unit2_bn2_moving_mean: Tensor[(512), float32], %stage4_unit2_bn2_moving_var: Tensor[(512), float32], %stage4_unit2_conv2_weight: Tensor[(512, 512, 3, 3), float32], %stage4_unit2_bn3_gamma: Tensor[(512), float32], %stage4_unit2_bn3_beta: Tensor[(512), float32], %stage4_unit2_bn3_moving_mean: Tensor[(512), float32], %stage4_unit2_bn3_moving_var: Tensor[(512), float32], %stage4_unit2_conv3_weight: Tensor[(2048, 512, 1, 1), float32], %stage4_unit3_bn1_gamma: Tensor[(2048), float32], %stage4_unit3_bn1_beta: Tensor[(2048), float32], %stage4_unit3_bn1_moving_mean: Tensor[(2048), float32], %stage4_unit3_bn1_moving_var: Tensor[(2048), float32], %stage4_unit3_conv1_weight: Tensor[(512, 2048, 1, 1), float32], %stage4_unit3_bn2_gamma: Tensor[(512), float32], %stage4_unit3_bn2_beta: Tensor[(512), float32], %stage4_unit3_bn2_moving_mean: Tensor[(512), float32], %stage4_unit3_bn2_moving_var: Tensor[(512), float32], %stage4_unit3_conv2_weight: Tensor[(512, 512, 3, 3), float32], %stage4_unit3_bn3_gamma: Tensor[(512), float32], %stage4_unit3_bn3_beta: Tensor[(512), float32], %stage4_unit3_bn3_moving_mean: Tensor[(512), float32], %stage4_unit3_bn3_moving_var: Tensor[(512), float32], %stage4_unit3_conv3_weight: Tensor[(2048, 512, 1, 1), float32], %bn1_gamma: Tensor[(2048), float32], %bn1_beta: Tensor[(2048), float32], %bn1_moving_mean: Tensor[(2048), float32], %bn1_moving_var: Tensor[(2048), float32], %fc1_weight: Tensor[(1000, 2048), float32], %fc1_bias: Tensor[(1000), float32]) -> Tensor[(1, 1000), float32] { | |
| let %storage_0: Storage[] = memory.alloc_storage(602112 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][0]) /* ty=Storage[] */; | |
| let %tensor_0: Tensor[(1, 224, 224, 3), float32] = memory.alloc_tensor(%storage_0, 0 /* ty=int64 */, meta[relay.Constant][0] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][0]) /* ty=Tensor[(1, 224, 224, 3), float32] */; | |
| %10 = fn (%p0: Tensor[(3), float32], %p1: Tensor[(1, 3, 224, 224), float32], %p2: Tensor[(3), float32], %p3: Tensor[(3), float32], Primitive=1) -> Tensor[(1, 224, 224, 3), float32] { | |
| %0 = add(%p0, 2e-05f /* ty=float32 */) /* ty=Tensor[(3), float32] */; | |
| %1 = sqrt(%0) /* ty=Tensor[(3), float32] */; | |
| %2 = divide(1f /* ty=float32 */, %1) /* ty=Tensor[(3), float32] */; | |
| %3 = expand_dims(%2, axis=1, num_newaxis=2) /* ty=Tensor[(3, 1, 1), float32] */; | |
| %4 = multiply(%p1, %3) /* ty=Tensor[(1, 3, 224, 224), float32] */; | |
| %5 = negative(%p2) /* ty=Tensor[(3), float32] */; | |
| %6 = multiply(%5, %2) /* ty=Tensor[(3), float32] */; | |
| %7 = add(%6, %p3) /* ty=Tensor[(3), float32] */; | |
| %8 = expand_dims(%7, axis=1, num_newaxis=2) /* ty=Tensor[(3, 1, 1), float32] */; | |
| %9 = add(%4, %8) /* ty=Tensor[(1, 3, 224, 224), float32] */; | |
| layout_transform(%9, src_layout="NCHW", dst_layout="NHWC") /* ty=Tensor[(1, 224, 224, 3), float32] */ | |
| }; | |
| %11 = (%bn_data_moving_var, %data, %bn_data_moving_mean, %bn_data_beta); | |
| %12 = (%tensor_0,); | |
| let %x: () = vm.invoke_tvm_op(%10, %11, %12) /* ty=() */; | |
| let %x1: Tensor[(1, 224, 224, 3), float32] = %tensor_0; | |
| let %storage_01: Storage[] = memory.alloc_storage(256 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][1]) /* ty=Storage[] */; | |
| let %tensor_01: Tensor[(64), float32] = memory.alloc_tensor(%storage_01, 0 /* ty=int64 */, meta[relay.Constant][1] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][1]) /* ty=Tensor[(64), float32] */; | |
| %16 = fn (%p01: Tensor[(64), float32], %p11: Tensor[(64), float32], Primitive=1) -> Tensor[(64), float32] { | |
| %13 = add(%p01, 2e-05f /* ty=float32 */) /* ty=Tensor[(64), float32] */; | |
| %14 = sqrt(%13) /* ty=Tensor[(64), float32] */; | |
| %15 = divide(1f /* ty=float32 */, %14) /* ty=Tensor[(64), float32] */; | |
| multiply(%15, %p11) /* ty=Tensor[(64), float32] */ | |
| }; | |
| %17 = (%bn0_moving_var, %bn0_gamma); | |
| %18 = (%tensor_01,); | |
| let %x2: () = vm.invoke_tvm_op(%16, %17, %18) /* ty=() */; | |
| let %x3: Tensor[(64), float32] = %tensor_01; | |
| let %storage_02: Storage[] = memory.alloc_storage(37632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][2]) /* ty=Storage[] */; | |
| let %tensor_02: Tensor[(7, 7, 3, 64), float32] = memory.alloc_tensor(%storage_02, 0 /* ty=int64 */, meta[relay.Constant][2] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][2]) /* ty=Tensor[(7, 7, 3, 64), float32] */; | |
| %20 = fn (%p02: Tensor[(64, 3, 7, 7), float32], %p12: Tensor[(64), float32], Primitive=1) -> Tensor[(7, 7, 3, 64), float32] { | |
| %19 = layout_transform(%p02, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(7, 7, 3, 64), float32] */; | |
| multiply(%19, %p12) /* ty=Tensor[(7, 7, 3, 64), float32] */ | |
| }; | |
| %21 = (%conv0_weight, %x3); | |
| %22 = (%tensor_02,); | |
| let %x4: () = vm.invoke_tvm_op(%20, %21, %22) /* ty=() */; | |
| let %x5: Tensor[(7, 7, 3, 64), float32] = %tensor_02; | |
| let %storage_03: Storage[] = memory.alloc_storage(3211264 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][3]) /* ty=Storage[] */; | |
| let %tensor_03: Tensor[(1, 112, 112, 64), float32] = memory.alloc_tensor(%storage_03, 0 /* ty=int64 */, meta[relay.Constant][3] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][3]) /* ty=Tensor[(1, 112, 112, 64), float32] */; | |
| %29 = fn (%p03: Tensor[(1, 224, 224, 3), float32], %p13: Tensor[(7, 7, 3, 64), float32], %p21: Tensor[(64), float32], %p31: Tensor[(64), float32], %p4: Tensor[(64), float32], Primitive=1) -> Tensor[(1, 112, 112, 64), float32] { | |
| %23 = nn.conv2d(%p03, %p13, strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 112, 112, 64), float32] */; | |
| %24 = negative(%p21) /* ty=Tensor[(64), float32] */; | |
| %25 = multiply(%24, %p31) /* ty=Tensor[(64), float32] */; | |
| %26 = add(%25, %p4) /* ty=Tensor[(64), float32] */; | |
| %27 = expand_dims(%26, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 64), float32] */; | |
| %28 = add(%23, %27) /* ty=Tensor[(1, 112, 112, 64), float32] */; | |
| nn.relu(%28) /* ty=Tensor[(1, 112, 112, 64), float32] */ | |
| }; | |
| %30 = (%x1, %x5, %bn0_moving_mean, %x3, %bn0_beta); | |
| %31 = (%tensor_03,); | |
| let %x6: () = vm.invoke_tvm_op(%29, %30, %31) /* ty=() */; | |
| let %x7: Tensor[(1, 112, 112, 64), float32] = %tensor_03; | |
| let %storage_04: Storage[] = memory.alloc_storage(256 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][4]) /* ty=Storage[] */; | |
| let %tensor_04: Tensor[(64), float32] = memory.alloc_tensor(%storage_04, 0 /* ty=int64 */, meta[relay.Constant][4] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][4]) /* ty=Tensor[(64), float32] */; | |
| %35 = fn (%p04: Tensor[(64), float32], %p14: Tensor[(64), float32], Primitive=1) -> Tensor[(64), float32] { | |
| %32 = add(%p04, 2e-05f /* ty=float32 */) /* ty=Tensor[(64), float32] */; | |
| %33 = sqrt(%32) /* ty=Tensor[(64), float32] */; | |
| %34 = divide(1f /* ty=float32 */, %33) /* ty=Tensor[(64), float32] */; | |
| multiply(%34, %p14) /* ty=Tensor[(64), float32] */ | |
| }; | |
| %36 = (%stage1_unit1_bn1_moving_var, %stage1_unit1_bn1_gamma); | |
| %37 = (%tensor_04,); | |
| let %x8: () = vm.invoke_tvm_op(%35, %36, %37) /* ty=() */; | |
| let %x9: Tensor[(64), float32] = %tensor_04; | |
| let %storage_05: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][5]) /* ty=Storage[] */; | |
| let %tensor_05: Tensor[(1, 56, 56, 64), float32] = memory.alloc_tensor(%storage_05, 0 /* ty=int64 */, meta[relay.Constant][5] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][5]) /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| %46 = fn (%p05: Tensor[(1, 112, 112, 64), float32], %p15: Tensor[(64), float32], %p22: Tensor[(64), float32], %p32: Tensor[(64), float32], Primitive=1) -> Tensor[(1, 56, 56, 64), float32] { | |
| %38 = nn.max_pool2d(%p05, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1], layout="NHWC") /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| %39 = expand_dims(%p15, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 64), float32] */; | |
| %40 = multiply(%38, %39) /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| %41 = negative(%p22) /* ty=Tensor[(64), float32] */; | |
| %42 = multiply(%41, %p15) /* ty=Tensor[(64), float32] */; | |
| %43 = add(%42, %p32) /* ty=Tensor[(64), float32] */; | |
| %44 = expand_dims(%43, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 64), float32] */; | |
| %45 = add(%40, %44) /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| nn.relu(%45) /* ty=Tensor[(1, 56, 56, 64), float32] */ | |
| }; | |
| %47 = (%x7, %x9, %stage1_unit1_bn1_moving_mean, %stage1_unit1_bn1_beta); | |
| %48 = (%tensor_05,); | |
| let %x10: () = vm.invoke_tvm_op(%46, %47, %48) /* ty=() */; | |
| let %x11: Tensor[(1, 56, 56, 64), float32] = %tensor_05; | |
| let %storage_06: Storage[] = memory.alloc_storage(256 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][6]) /* ty=Storage[] */; | |
| let %tensor_06: Tensor[(64), float32] = memory.alloc_tensor(%storage_06, 0 /* ty=int64 */, meta[relay.Constant][6] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][6]) /* ty=Tensor[(64), float32] */; | |
| %52 = fn (%p06: Tensor[(64), float32], %p16: Tensor[(64), float32], Primitive=1) -> Tensor[(64), float32] { | |
| %49 = add(%p06, 2e-05f /* ty=float32 */) /* ty=Tensor[(64), float32] */; | |
| %50 = sqrt(%49) /* ty=Tensor[(64), float32] */; | |
| %51 = divide(1f /* ty=float32 */, %50) /* ty=Tensor[(64), float32] */; | |
| multiply(%51, %p16) /* ty=Tensor[(64), float32] */ | |
| }; | |
| %53 = (%stage1_unit1_bn2_moving_var, %stage1_unit1_bn2_gamma); | |
| %54 = (%tensor_06,); | |
| let %x12: () = vm.invoke_tvm_op(%52, %53, %54) /* ty=() */; | |
| let %x13: Tensor[(64), float32] = %tensor_06; | |
| let %storage_07: Storage[] = memory.alloc_storage(16384 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][7]) /* ty=Storage[] */; | |
| let %tensor_07: Tensor[(1, 1, 64, 64), float32] = memory.alloc_tensor(%storage_07, 0 /* ty=int64 */, meta[relay.Constant][7] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][7]) /* ty=Tensor[(1, 1, 64, 64), float32] */; | |
| %56 = fn (%p07: Tensor[(64, 64, 1, 1), float32], %p17: Tensor[(64), float32], Primitive=1) -> Tensor[(1, 1, 64, 64), float32] { | |
| %55 = layout_transform(%p07, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 64, 64), float32] */; | |
| multiply(%55, %p17) /* ty=Tensor[(1, 1, 64, 64), float32] */ | |
| }; | |
| %57 = (%stage1_unit1_conv1_weight, %x13); | |
| %58 = (%tensor_07,); | |
| let %x14: () = vm.invoke_tvm_op(%56, %57, %58) /* ty=() */; | |
| let %x15: Tensor[(1, 1, 64, 64), float32] = %tensor_07; | |
| let %storage_08: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][8]) /* ty=Storage[] */; | |
| let %tensor_08: Tensor[(1, 56, 56, 64), float32] = memory.alloc_tensor(%storage_08, 0 /* ty=int64 */, meta[relay.Constant][8] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][8]) /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| %65 = fn (%p08: Tensor[(1, 56, 56, 64), float32], %p18: Tensor[(1, 1, 64, 64), float32], %p23: Tensor[(64), float32], %p33: Tensor[(64), float32], %p41: Tensor[(64), float32], Primitive=1) -> Tensor[(1, 56, 56, 64), float32] { | |
| %59 = nn.conv2d(%p08, %p18, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| %60 = negative(%p23) /* ty=Tensor[(64), float32] */; | |
| %61 = multiply(%60, %p33) /* ty=Tensor[(64), float32] */; | |
| %62 = add(%61, %p41) /* ty=Tensor[(64), float32] */; | |
| %63 = expand_dims(%62, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 64), float32] */; | |
| %64 = add(%59, %63) /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| nn.relu(%64) /* ty=Tensor[(1, 56, 56, 64), float32] */ | |
| }; | |
| %66 = (%x11, %x15, %stage1_unit1_bn2_moving_mean, %x13, %stage1_unit1_bn2_beta); | |
| %67 = (%tensor_08,); | |
| let %x16: () = vm.invoke_tvm_op(%65, %66, %67) /* ty=() */; | |
| let %x17: Tensor[(1, 56, 56, 64), float32] = %tensor_08; | |
| let %storage_09: Storage[] = memory.alloc_storage(256 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][9]) /* ty=Storage[] */; | |
| let %tensor_09: Tensor[(64), float32] = memory.alloc_tensor(%storage_09, 0 /* ty=int64 */, meta[relay.Constant][9] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][9]) /* ty=Tensor[(64), float32] */; | |
| %71 = fn (%p09: Tensor[(64), float32], %p19: Tensor[(64), float32], Primitive=1) -> Tensor[(64), float32] { | |
| %68 = add(%p09, 2e-05f /* ty=float32 */) /* ty=Tensor[(64), float32] */; | |
| %69 = sqrt(%68) /* ty=Tensor[(64), float32] */; | |
| %70 = divide(1f /* ty=float32 */, %69) /* ty=Tensor[(64), float32] */; | |
| multiply(%70, %p19) /* ty=Tensor[(64), float32] */ | |
| }; | |
| %72 = (%stage1_unit1_bn3_moving_var, %stage1_unit1_bn3_gamma); | |
| %73 = (%tensor_09,); | |
| let %x18: () = vm.invoke_tvm_op(%71, %72, %73) /* ty=() */; | |
| let %x19: Tensor[(64), float32] = %tensor_09; | |
| let %storage_010: Storage[] = memory.alloc_storage(147456 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][10]) /* ty=Storage[] */; | |
| let %tensor_010: Tensor[(3, 3, 64, 64), float32] = memory.alloc_tensor(%storage_010, 0 /* ty=int64 */, meta[relay.Constant][10] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][10]) /* ty=Tensor[(3, 3, 64, 64), float32] */; | |
| %75 = fn (%p010: Tensor[(64, 64, 3, 3), float32], %p110: Tensor[(64), float32], Primitive=1) -> Tensor[(3, 3, 64, 64), float32] { | |
| %74 = layout_transform(%p010, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(3, 3, 64, 64), float32] */; | |
| multiply(%74, %p110) /* ty=Tensor[(3, 3, 64, 64), float32] */ | |
| }; | |
| %76 = (%stage1_unit1_conv2_weight, %x19); | |
| %77 = (%tensor_010,); | |
| let %x20: () = vm.invoke_tvm_op(%75, %76, %77) /* ty=() */; | |
| let %x21: Tensor[(3, 3, 64, 64), float32] = %tensor_010; | |
| let %storage_011: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][11]) /* ty=Storage[] */; | |
| let %tensor_011: Tensor[(1, 56, 56, 64), float32] = memory.alloc_tensor(%storage_011, 0 /* ty=int64 */, meta[relay.Constant][11] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][11]) /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| %84 = fn (%p011: Tensor[(1, 56, 56, 64), float32], %p111: Tensor[(3, 3, 64, 64), float32], %p24: Tensor[(64), float32], %p34: Tensor[(64), float32], %p42: Tensor[(64), float32], Primitive=1) -> Tensor[(1, 56, 56, 64), float32] { | |
| %78 = nn.conv2d(%p011, %p111, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| %79 = negative(%p24) /* ty=Tensor[(64), float32] */; | |
| %80 = multiply(%79, %p34) /* ty=Tensor[(64), float32] */; | |
| %81 = add(%80, %p42) /* ty=Tensor[(64), float32] */; | |
| %82 = expand_dims(%81, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 64), float32] */; | |
| %83 = add(%78, %82) /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| nn.relu(%83) /* ty=Tensor[(1, 56, 56, 64), float32] */ | |
| }; | |
| %85 = (%x17, %x21, %stage1_unit1_bn3_moving_mean, %x19, %stage1_unit1_bn3_beta); | |
| %86 = (%tensor_011,); | |
| let %x22: () = vm.invoke_tvm_op(%84, %85, %86) /* ty=() */; | |
| let %x23: Tensor[(1, 56, 56, 64), float32] = %tensor_011; | |
| let %storage_012: Storage[] = memory.alloc_storage(65536 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][12]) /* ty=Storage[] */; | |
| let %tensor_012: Tensor[(1, 1, 64, 256), float32] = memory.alloc_tensor(%storage_012, 0 /* ty=int64 */, meta[relay.Constant][12] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][12]) /* ty=Tensor[(1, 1, 64, 256), float32] */; | |
| %87 = fn (%p012: Tensor[(256, 64, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 64, 256), float32] { | |
| layout_transform(%p012, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 64, 256), float32] */ | |
| }; | |
| %88 = (%stage1_unit1_conv3_weight,); | |
| %89 = (%tensor_012,); | |
| let %x24: () = vm.invoke_tvm_op(%87, %88, %89) /* ty=() */; | |
| let %x25: Tensor[(1, 1, 64, 256), float32] = %tensor_012; | |
| let %storage_013: Storage[] = memory.alloc_storage(65536 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][13]) /* ty=Storage[] */; | |
| let %tensor_013: Tensor[(1, 1, 64, 256), float32] = memory.alloc_tensor(%storage_013, 0 /* ty=int64 */, meta[relay.Constant][13] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][13]) /* ty=Tensor[(1, 1, 64, 256), float32] */; | |
| %90 = fn (%p013: Tensor[(256, 64, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 64, 256), float32] { | |
| layout_transform(%p013, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 64, 256), float32] */ | |
| }; | |
| %91 = (%stage1_unit1_sc_weight,); | |
| %92 = (%tensor_013,); | |
| let %x26: () = vm.invoke_tvm_op(%90, %91, %92) /* ty=() */; | |
| let %x27: Tensor[(1, 1, 64, 256), float32] = %tensor_013; | |
| let %storage_014: Storage[] = memory.alloc_storage(3211264 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][14]) /* ty=Storage[] */; | |
| let %tensor_014: Tensor[(1, 56, 56, 256), float32] = memory.alloc_tensor(%storage_014, 0 /* ty=int64 */, meta[relay.Constant][14] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][14]) /* ty=Tensor[(1, 56, 56, 256), float32] */; | |
| %93 = fn (%p014: Tensor[(1, 56, 56, 64), float32], %p112: Tensor[(1, 1, 64, 256), float32], Primitive=1) -> Tensor[(1, 56, 56, 256), float32] { | |
| nn.conv2d(%p014, %p112, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 256), float32] */ | |
| }; | |
| %94 = (%x11, %x27); | |
| %95 = (%tensor_014,); | |
| let %x28: () = vm.invoke_tvm_op(%93, %94, %95) /* ty=() */; | |
| let %x29: Tensor[(1, 56, 56, 256), float32] = %tensor_014; | |
| let %storage_015: Storage[] = memory.alloc_storage(3211264 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][15]) /* ty=Storage[] */; | |
| let %tensor_015: Tensor[(1, 56, 56, 256), float32] = memory.alloc_tensor(%storage_015, 0 /* ty=int64 */, meta[relay.Constant][15] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][15]) /* ty=Tensor[(1, 56, 56, 256), float32] */; | |
| %97 = fn (%p015: Tensor[(1, 56, 56, 64), float32], %p113: Tensor[(1, 1, 64, 256), float32], %p25: Tensor[(1, 56, 56, 256), float32], Primitive=1) -> Tensor[(1, 56, 56, 256), float32] { | |
| %96 = nn.conv2d(%p015, %p113, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 256), float32] */; | |
| add(%96, %p25) /* ty=Tensor[(1, 56, 56, 256), float32] */ | |
| }; | |
| %98 = (%x23, %x25, %x29); | |
| %99 = (%tensor_015,); | |
| let %x30: () = vm.invoke_tvm_op(%97, %98, %99) /* ty=() */; | |
| let %x31: Tensor[(1, 56, 56, 256), float32] = %tensor_015; | |
| let %storage_016: Storage[] = memory.alloc_storage(3211264 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][16]) /* ty=Storage[] */; | |
| let %tensor_016: Tensor[(1, 56, 56, 256), float32] = memory.alloc_tensor(%storage_016, 0 /* ty=int64 */, meta[relay.Constant][16] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][16]) /* ty=Tensor[(1, 56, 56, 256), float32] */; | |
| %111 = fn (%p016: Tensor[(256), float32], %p114: Tensor[(256), float32], %p26: Tensor[(1, 56, 56, 256), float32], %p35: Tensor[(256), float32], %p43: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 56, 56, 256), float32] { | |
| %100 = add(%p016, 2e-05f /* ty=float32 */) /* ty=Tensor[(256), float32] */; | |
| %101 = sqrt(%100) /* ty=Tensor[(256), float32] */; | |
| %102 = divide(1f /* ty=float32 */, %101) /* ty=Tensor[(256), float32] */; | |
| %103 = multiply(%102, %p114) /* ty=Tensor[(256), float32] */; | |
| %104 = expand_dims(%103, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %105 = multiply(%p26, %104) /* ty=Tensor[(1, 56, 56, 256), float32] */; | |
| %106 = negative(%p35) /* ty=Tensor[(256), float32] */; | |
| %107 = multiply(%106, %103) /* ty=Tensor[(256), float32] */; | |
| %108 = add(%107, %p43) /* ty=Tensor[(256), float32] */; | |
| %109 = expand_dims(%108, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %110 = add(%105, %109) /* ty=Tensor[(1, 56, 56, 256), float32] */; | |
| nn.relu(%110) /* ty=Tensor[(1, 56, 56, 256), float32] */ | |
| }; | |
| %112 = (%stage1_unit2_bn1_moving_var, %stage1_unit2_bn1_gamma, %x31, %stage1_unit2_bn1_moving_mean, %stage1_unit2_bn1_beta); | |
| %113 = (%tensor_016,); | |
| let %x32: () = vm.invoke_tvm_op(%111, %112, %113) /* ty=() */; | |
| let %x33: Tensor[(1, 56, 56, 256), float32] = %tensor_016; | |
| let %storage_017: Storage[] = memory.alloc_storage(256 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][17]) /* ty=Storage[] */; | |
| let %tensor_017: Tensor[(64), float32] = memory.alloc_tensor(%storage_017, 0 /* ty=int64 */, meta[relay.Constant][17] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][17]) /* ty=Tensor[(64), float32] */; | |
| %117 = fn (%p017: Tensor[(64), float32], %p115: Tensor[(64), float32], Primitive=1) -> Tensor[(64), float32] { | |
| %114 = add(%p017, 2e-05f /* ty=float32 */) /* ty=Tensor[(64), float32] */; | |
| %115 = sqrt(%114) /* ty=Tensor[(64), float32] */; | |
| %116 = divide(1f /* ty=float32 */, %115) /* ty=Tensor[(64), float32] */; | |
| multiply(%116, %p115) /* ty=Tensor[(64), float32] */ | |
| }; | |
| %118 = (%stage1_unit2_bn2_moving_var, %stage1_unit2_bn2_gamma); | |
| %119 = (%tensor_017,); | |
| let %x34: () = vm.invoke_tvm_op(%117, %118, %119) /* ty=() */; | |
| let %x35: Tensor[(64), float32] = %tensor_017; | |
| let %storage_018: Storage[] = memory.alloc_storage(65536 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][18]) /* ty=Storage[] */; | |
| let %tensor_018: Tensor[(1, 1, 256, 64), float32] = memory.alloc_tensor(%storage_018, 0 /* ty=int64 */, meta[relay.Constant][18] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][18]) /* ty=Tensor[(1, 1, 256, 64), float32] */; | |
| %121 = fn (%p018: Tensor[(64, 256, 1, 1), float32], %p116: Tensor[(64), float32], Primitive=1) -> Tensor[(1, 1, 256, 64), float32] { | |
| %120 = layout_transform(%p018, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 256, 64), float32] */; | |
| multiply(%120, %p116) /* ty=Tensor[(1, 1, 256, 64), float32] */ | |
| }; | |
| %122 = (%stage1_unit2_conv1_weight, %x35); | |
| %123 = (%tensor_018,); | |
| let %x36: () = vm.invoke_tvm_op(%121, %122, %123) /* ty=() */; | |
| let %x37: Tensor[(1, 1, 256, 64), float32] = %tensor_018; | |
| let %storage_019: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][19]) /* ty=Storage[] */; | |
| let %tensor_019: Tensor[(1, 56, 56, 64), float32] = memory.alloc_tensor(%storage_019, 0 /* ty=int64 */, meta[relay.Constant][19] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][19]) /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| %130 = fn (%p019: Tensor[(1, 56, 56, 256), float32], %p117: Tensor[(1, 1, 256, 64), float32], %p27: Tensor[(64), float32], %p36: Tensor[(64), float32], %p44: Tensor[(64), float32], Primitive=1) -> Tensor[(1, 56, 56, 64), float32] { | |
| %124 = nn.conv2d(%p019, %p117, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| %125 = negative(%p27) /* ty=Tensor[(64), float32] */; | |
| %126 = multiply(%125, %p36) /* ty=Tensor[(64), float32] */; | |
| %127 = add(%126, %p44) /* ty=Tensor[(64), float32] */; | |
| %128 = expand_dims(%127, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 64), float32] */; | |
| %129 = add(%124, %128) /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| nn.relu(%129) /* ty=Tensor[(1, 56, 56, 64), float32] */ | |
| }; | |
| %131 = (%x33, %x37, %stage1_unit2_bn2_moving_mean, %x35, %stage1_unit2_bn2_beta); | |
| %132 = (%tensor_019,); | |
| let %x38: () = vm.invoke_tvm_op(%130, %131, %132) /* ty=() */; | |
| let %x39: Tensor[(1, 56, 56, 64), float32] = %tensor_019; | |
| let %storage_020: Storage[] = memory.alloc_storage(256 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][20]) /* ty=Storage[] */; | |
| let %tensor_020: Tensor[(64), float32] = memory.alloc_tensor(%storage_020, 0 /* ty=int64 */, meta[relay.Constant][20] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][20]) /* ty=Tensor[(64), float32] */; | |
| %136 = fn (%p020: Tensor[(64), float32], %p118: Tensor[(64), float32], Primitive=1) -> Tensor[(64), float32] { | |
| %133 = add(%p020, 2e-05f /* ty=float32 */) /* ty=Tensor[(64), float32] */; | |
| %134 = sqrt(%133) /* ty=Tensor[(64), float32] */; | |
| %135 = divide(1f /* ty=float32 */, %134) /* ty=Tensor[(64), float32] */; | |
| multiply(%135, %p118) /* ty=Tensor[(64), float32] */ | |
| }; | |
| %137 = (%stage1_unit2_bn3_moving_var, %stage1_unit2_bn3_gamma); | |
| %138 = (%tensor_020,); | |
| let %x40: () = vm.invoke_tvm_op(%136, %137, %138) /* ty=() */; | |
| let %x41: Tensor[(64), float32] = %tensor_020; | |
| let %storage_021: Storage[] = memory.alloc_storage(147456 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][21]) /* ty=Storage[] */; | |
| let %tensor_021: Tensor[(3, 3, 64, 64), float32] = memory.alloc_tensor(%storage_021, 0 /* ty=int64 */, meta[relay.Constant][21] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][21]) /* ty=Tensor[(3, 3, 64, 64), float32] */; | |
| %140 = fn (%p021: Tensor[(64, 64, 3, 3), float32], %p119: Tensor[(64), float32], Primitive=1) -> Tensor[(3, 3, 64, 64), float32] { | |
| %139 = layout_transform(%p021, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(3, 3, 64, 64), float32] */; | |
| multiply(%139, %p119) /* ty=Tensor[(3, 3, 64, 64), float32] */ | |
| }; | |
| %141 = (%stage1_unit2_conv2_weight, %x41); | |
| %142 = (%tensor_021,); | |
| let %x42: () = vm.invoke_tvm_op(%140, %141, %142) /* ty=() */; | |
| let %x43: Tensor[(3, 3, 64, 64), float32] = %tensor_021; | |
| let %storage_022: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][22]) /* ty=Storage[] */; | |
| let %tensor_022: Tensor[(1, 56, 56, 64), float32] = memory.alloc_tensor(%storage_022, 0 /* ty=int64 */, meta[relay.Constant][22] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][22]) /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| %149 = fn (%p022: Tensor[(1, 56, 56, 64), float32], %p120: Tensor[(3, 3, 64, 64), float32], %p28: Tensor[(64), float32], %p37: Tensor[(64), float32], %p45: Tensor[(64), float32], Primitive=1) -> Tensor[(1, 56, 56, 64), float32] { | |
| %143 = nn.conv2d(%p022, %p120, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| %144 = negative(%p28) /* ty=Tensor[(64), float32] */; | |
| %145 = multiply(%144, %p37) /* ty=Tensor[(64), float32] */; | |
| %146 = add(%145, %p45) /* ty=Tensor[(64), float32] */; | |
| %147 = expand_dims(%146, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 64), float32] */; | |
| %148 = add(%143, %147) /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| nn.relu(%148) /* ty=Tensor[(1, 56, 56, 64), float32] */ | |
| }; | |
| %150 = (%x39, %x43, %stage1_unit2_bn3_moving_mean, %x41, %stage1_unit2_bn3_beta); | |
| %151 = (%tensor_022,); | |
| let %x44: () = vm.invoke_tvm_op(%149, %150, %151) /* ty=() */; | |
| let %x45: Tensor[(1, 56, 56, 64), float32] = %tensor_022; | |
| let %storage_023: Storage[] = memory.alloc_storage(65536 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][23]) /* ty=Storage[] */; | |
| let %tensor_023: Tensor[(1, 1, 64, 256), float32] = memory.alloc_tensor(%storage_023, 0 /* ty=int64 */, meta[relay.Constant][23] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][23]) /* ty=Tensor[(1, 1, 64, 256), float32] */; | |
| %152 = fn (%p023: Tensor[(256, 64, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 64, 256), float32] { | |
| layout_transform(%p023, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 64, 256), float32] */ | |
| }; | |
| %153 = (%stage1_unit2_conv3_weight,); | |
| %154 = (%tensor_023,); | |
| let %x46: () = vm.invoke_tvm_op(%152, %153, %154) /* ty=() */; | |
| let %x47: Tensor[(1, 1, 64, 256), float32] = %tensor_023; | |
| let %storage_024: Storage[] = memory.alloc_storage(3211264 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][24]) /* ty=Storage[] */; | |
| let %tensor_024: Tensor[(1, 56, 56, 256), float32] = memory.alloc_tensor(%storage_024, 0 /* ty=int64 */, meta[relay.Constant][24] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][24]) /* ty=Tensor[(1, 56, 56, 256), float32] */; | |
| %156 = fn (%p024: Tensor[(1, 56, 56, 64), float32], %p121: Tensor[(1, 1, 64, 256), float32], %p29: Tensor[(1, 56, 56, 256), float32], Primitive=1) -> Tensor[(1, 56, 56, 256), float32] { | |
| %155 = nn.conv2d(%p024, %p121, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 256), float32] */; | |
| add(%155, %p29) /* ty=Tensor[(1, 56, 56, 256), float32] */ | |
| }; | |
| %157 = (%x45, %x47, %x31); | |
| %158 = (%tensor_024,); | |
| let %x48: () = vm.invoke_tvm_op(%156, %157, %158) /* ty=() */; | |
| let %x49: Tensor[(1, 56, 56, 256), float32] = %tensor_024; | |
| let %storage_025: Storage[] = memory.alloc_storage(3211264 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][25]) /* ty=Storage[] */; | |
| let %tensor_025: Tensor[(1, 56, 56, 256), float32] = memory.alloc_tensor(%storage_025, 0 /* ty=int64 */, meta[relay.Constant][25] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][25]) /* ty=Tensor[(1, 56, 56, 256), float32] */; | |
| %170 = fn (%p025: Tensor[(256), float32], %p122: Tensor[(256), float32], %p210: Tensor[(1, 56, 56, 256), float32], %p38: Tensor[(256), float32], %p46: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 56, 56, 256), float32] { | |
| %159 = add(%p025, 2e-05f /* ty=float32 */) /* ty=Tensor[(256), float32] */; | |
| %160 = sqrt(%159) /* ty=Tensor[(256), float32] */; | |
| %161 = divide(1f /* ty=float32 */, %160) /* ty=Tensor[(256), float32] */; | |
| %162 = multiply(%161, %p122) /* ty=Tensor[(256), float32] */; | |
| %163 = expand_dims(%162, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %164 = multiply(%p210, %163) /* ty=Tensor[(1, 56, 56, 256), float32] */; | |
| %165 = negative(%p38) /* ty=Tensor[(256), float32] */; | |
| %166 = multiply(%165, %162) /* ty=Tensor[(256), float32] */; | |
| %167 = add(%166, %p46) /* ty=Tensor[(256), float32] */; | |
| %168 = expand_dims(%167, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %169 = add(%164, %168) /* ty=Tensor[(1, 56, 56, 256), float32] */; | |
| nn.relu(%169) /* ty=Tensor[(1, 56, 56, 256), float32] */ | |
| }; | |
| %171 = (%stage1_unit3_bn1_moving_var, %stage1_unit3_bn1_gamma, %x49, %stage1_unit3_bn1_moving_mean, %stage1_unit3_bn1_beta); | |
| %172 = (%tensor_025,); | |
| let %x50: () = vm.invoke_tvm_op(%170, %171, %172) /* ty=() */; | |
| let %x51: Tensor[(1, 56, 56, 256), float32] = %tensor_025; | |
| let %storage_026: Storage[] = memory.alloc_storage(256 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][26]) /* ty=Storage[] */; | |
| let %tensor_026: Tensor[(64), float32] = memory.alloc_tensor(%storage_026, 0 /* ty=int64 */, meta[relay.Constant][26] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][26]) /* ty=Tensor[(64), float32] */; | |
| %176 = fn (%p026: Tensor[(64), float32], %p123: Tensor[(64), float32], Primitive=1) -> Tensor[(64), float32] { | |
| %173 = add(%p026, 2e-05f /* ty=float32 */) /* ty=Tensor[(64), float32] */; | |
| %174 = sqrt(%173) /* ty=Tensor[(64), float32] */; | |
| %175 = divide(1f /* ty=float32 */, %174) /* ty=Tensor[(64), float32] */; | |
| multiply(%175, %p123) /* ty=Tensor[(64), float32] */ | |
| }; | |
| %177 = (%stage1_unit3_bn2_moving_var, %stage1_unit3_bn2_gamma); | |
| %178 = (%tensor_026,); | |
| let %x52: () = vm.invoke_tvm_op(%176, %177, %178) /* ty=() */; | |
| let %x53: Tensor[(64), float32] = %tensor_026; | |
| let %storage_027: Storage[] = memory.alloc_storage(65536 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][27]) /* ty=Storage[] */; | |
| let %tensor_027: Tensor[(1, 1, 256, 64), float32] = memory.alloc_tensor(%storage_027, 0 /* ty=int64 */, meta[relay.Constant][27] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][27]) /* ty=Tensor[(1, 1, 256, 64), float32] */; | |
| %180 = fn (%p027: Tensor[(64, 256, 1, 1), float32], %p124: Tensor[(64), float32], Primitive=1) -> Tensor[(1, 1, 256, 64), float32] { | |
| %179 = layout_transform(%p027, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 256, 64), float32] */; | |
| multiply(%179, %p124) /* ty=Tensor[(1, 1, 256, 64), float32] */ | |
| }; | |
| %181 = (%stage1_unit3_conv1_weight, %x53); | |
| %182 = (%tensor_027,); | |
| let %x54: () = vm.invoke_tvm_op(%180, %181, %182) /* ty=() */; | |
| let %x55: Tensor[(1, 1, 256, 64), float32] = %tensor_027; | |
| let %storage_028: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][28]) /* ty=Storage[] */; | |
| let %tensor_028: Tensor[(1, 56, 56, 64), float32] = memory.alloc_tensor(%storage_028, 0 /* ty=int64 */, meta[relay.Constant][28] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][28]) /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| %189 = fn (%p028: Tensor[(1, 56, 56, 256), float32], %p125: Tensor[(1, 1, 256, 64), float32], %p211: Tensor[(64), float32], %p39: Tensor[(64), float32], %p47: Tensor[(64), float32], Primitive=1) -> Tensor[(1, 56, 56, 64), float32] { | |
| %183 = nn.conv2d(%p028, %p125, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| %184 = negative(%p211) /* ty=Tensor[(64), float32] */; | |
| %185 = multiply(%184, %p39) /* ty=Tensor[(64), float32] */; | |
| %186 = add(%185, %p47) /* ty=Tensor[(64), float32] */; | |
| %187 = expand_dims(%186, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 64), float32] */; | |
| %188 = add(%183, %187) /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| nn.relu(%188) /* ty=Tensor[(1, 56, 56, 64), float32] */ | |
| }; | |
| %190 = (%x51, %x55, %stage1_unit3_bn2_moving_mean, %x53, %stage1_unit3_bn2_beta); | |
| %191 = (%tensor_028,); | |
| let %x56: () = vm.invoke_tvm_op(%189, %190, %191) /* ty=() */; | |
| let %x57: Tensor[(1, 56, 56, 64), float32] = %tensor_028; | |
| let %storage_029: Storage[] = memory.alloc_storage(256 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][29]) /* ty=Storage[] */; | |
| let %tensor_029: Tensor[(64), float32] = memory.alloc_tensor(%storage_029, 0 /* ty=int64 */, meta[relay.Constant][29] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][29]) /* ty=Tensor[(64), float32] */; | |
| %195 = fn (%p029: Tensor[(64), float32], %p126: Tensor[(64), float32], Primitive=1) -> Tensor[(64), float32] { | |
| %192 = add(%p029, 2e-05f /* ty=float32 */) /* ty=Tensor[(64), float32] */; | |
| %193 = sqrt(%192) /* ty=Tensor[(64), float32] */; | |
| %194 = divide(1f /* ty=float32 */, %193) /* ty=Tensor[(64), float32] */; | |
| multiply(%194, %p126) /* ty=Tensor[(64), float32] */ | |
| }; | |
| %196 = (%stage1_unit3_bn3_moving_var, %stage1_unit3_bn3_gamma); | |
| %197 = (%tensor_029,); | |
| let %x58: () = vm.invoke_tvm_op(%195, %196, %197) /* ty=() */; | |
| let %x59: Tensor[(64), float32] = %tensor_029; | |
| let %storage_030: Storage[] = memory.alloc_storage(147456 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][30]) /* ty=Storage[] */; | |
| let %tensor_030: Tensor[(3, 3, 64, 64), float32] = memory.alloc_tensor(%storage_030, 0 /* ty=int64 */, meta[relay.Constant][30] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][30]) /* ty=Tensor[(3, 3, 64, 64), float32] */; | |
| %199 = fn (%p030: Tensor[(64, 64, 3, 3), float32], %p127: Tensor[(64), float32], Primitive=1) -> Tensor[(3, 3, 64, 64), float32] { | |
| %198 = layout_transform(%p030, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(3, 3, 64, 64), float32] */; | |
| multiply(%198, %p127) /* ty=Tensor[(3, 3, 64, 64), float32] */ | |
| }; | |
| %200 = (%stage1_unit3_conv2_weight, %x59); | |
| %201 = (%tensor_030,); | |
| let %x60: () = vm.invoke_tvm_op(%199, %200, %201) /* ty=() */; | |
| let %x61: Tensor[(3, 3, 64, 64), float32] = %tensor_030; | |
| let %storage_031: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][31]) /* ty=Storage[] */; | |
| let %tensor_031: Tensor[(1, 56, 56, 64), float32] = memory.alloc_tensor(%storage_031, 0 /* ty=int64 */, meta[relay.Constant][31] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][31]) /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| %208 = fn (%p031: Tensor[(1, 56, 56, 64), float32], %p128: Tensor[(3, 3, 64, 64), float32], %p212: Tensor[(64), float32], %p310: Tensor[(64), float32], %p48: Tensor[(64), float32], Primitive=1) -> Tensor[(1, 56, 56, 64), float32] { | |
| %202 = nn.conv2d(%p031, %p128, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| %203 = negative(%p212) /* ty=Tensor[(64), float32] */; | |
| %204 = multiply(%203, %p310) /* ty=Tensor[(64), float32] */; | |
| %205 = add(%204, %p48) /* ty=Tensor[(64), float32] */; | |
| %206 = expand_dims(%205, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 64), float32] */; | |
| %207 = add(%202, %206) /* ty=Tensor[(1, 56, 56, 64), float32] */; | |
| nn.relu(%207) /* ty=Tensor[(1, 56, 56, 64), float32] */ | |
| }; | |
| %209 = (%x57, %x61, %stage1_unit3_bn3_moving_mean, %x59, %stage1_unit3_bn3_beta); | |
| %210 = (%tensor_031,); | |
| let %x62: () = vm.invoke_tvm_op(%208, %209, %210) /* ty=() */; | |
| let %x63: Tensor[(1, 56, 56, 64), float32] = %tensor_031; | |
| let %storage_032: Storage[] = memory.alloc_storage(65536 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][32]) /* ty=Storage[] */; | |
| let %tensor_032: Tensor[(1, 1, 64, 256), float32] = memory.alloc_tensor(%storage_032, 0 /* ty=int64 */, meta[relay.Constant][32] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][32]) /* ty=Tensor[(1, 1, 64, 256), float32] */; | |
| %211 = fn (%p032: Tensor[(256, 64, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 64, 256), float32] { | |
| layout_transform(%p032, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 64, 256), float32] */ | |
| }; | |
| %212 = (%stage1_unit3_conv3_weight,); | |
| %213 = (%tensor_032,); | |
| let %x64: () = vm.invoke_tvm_op(%211, %212, %213) /* ty=() */; | |
| let %x65: Tensor[(1, 1, 64, 256), float32] = %tensor_032; | |
| let %storage_033: Storage[] = memory.alloc_storage(1024 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][33]) /* ty=Storage[] */; | |
| let %tensor_033: Tensor[(256), float32] = memory.alloc_tensor(%storage_033, 0 /* ty=int64 */, meta[relay.Constant][33] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][33]) /* ty=Tensor[(256), float32] */; | |
| %217 = fn (%p033: Tensor[(256), float32], %p129: Tensor[(256), float32], Primitive=1) -> Tensor[(256), float32] { | |
| %214 = add(%p033, 2e-05f /* ty=float32 */) /* ty=Tensor[(256), float32] */; | |
| %215 = sqrt(%214) /* ty=Tensor[(256), float32] */; | |
| %216 = divide(1f /* ty=float32 */, %215) /* ty=Tensor[(256), float32] */; | |
| multiply(%216, %p129) /* ty=Tensor[(256), float32] */ | |
| }; | |
| %218 = (%stage2_unit1_bn1_moving_var, %stage2_unit1_bn1_gamma); | |
| %219 = (%tensor_033,); | |
| let %x66: () = vm.invoke_tvm_op(%217, %218, %219) /* ty=() */; | |
| let %x67: Tensor[(256), float32] = %tensor_033; | |
| let %storage_034: Storage[] = memory.alloc_storage(3211264 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][34]) /* ty=Storage[] */; | |
| let %tensor_034: Tensor[(1, 56, 56, 256), float32] = memory.alloc_tensor(%storage_034, 0 /* ty=int64 */, meta[relay.Constant][34] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][34]) /* ty=Tensor[(1, 56, 56, 256), float32] */; | |
| %229 = fn (%p034: Tensor[(1, 56, 56, 64), float32], %p130: Tensor[(1, 1, 64, 256), float32], %p213: Tensor[(1, 56, 56, 256), float32], %p311: Tensor[(256), float32], %p49: Tensor[(256), float32], %p5: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 56, 56, 256), float32] { | |
| %220 = nn.conv2d(%p034, %p130, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 56, 56, 256), float32] */; | |
| %221 = add(%220, %p213) /* ty=Tensor[(1, 56, 56, 256), float32] */; | |
| %222 = expand_dims(%p311, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %223 = multiply(%221, %222) /* ty=Tensor[(1, 56, 56, 256), float32] */; | |
| %224 = negative(%p49) /* ty=Tensor[(256), float32] */; | |
| %225 = multiply(%224, %p311) /* ty=Tensor[(256), float32] */; | |
| %226 = add(%225, %p5) /* ty=Tensor[(256), float32] */; | |
| %227 = expand_dims(%226, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %228 = add(%223, %227) /* ty=Tensor[(1, 56, 56, 256), float32] */; | |
| nn.relu(%228) /* ty=Tensor[(1, 56, 56, 256), float32] */ | |
| }; | |
| %230 = (%x63, %x65, %x49, %x67, %stage2_unit1_bn1_moving_mean, %stage2_unit1_bn1_beta); | |
| %231 = (%tensor_034,); | |
| let %x68: () = vm.invoke_tvm_op(%229, %230, %231) /* ty=() */; | |
| let %x69: Tensor[(1, 56, 56, 256), float32] = %tensor_034; | |
| let %storage_035: Storage[] = memory.alloc_storage(512 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][35]) /* ty=Storage[] */; | |
| let %tensor_035: Tensor[(128), float32] = memory.alloc_tensor(%storage_035, 0 /* ty=int64 */, meta[relay.Constant][35] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][35]) /* ty=Tensor[(128), float32] */; | |
| %235 = fn (%p035: Tensor[(128), float32], %p131: Tensor[(128), float32], Primitive=1) -> Tensor[(128), float32] { | |
| %232 = add(%p035, 2e-05f /* ty=float32 */) /* ty=Tensor[(128), float32] */; | |
| %233 = sqrt(%232) /* ty=Tensor[(128), float32] */; | |
| %234 = divide(1f /* ty=float32 */, %233) /* ty=Tensor[(128), float32] */; | |
| multiply(%234, %p131) /* ty=Tensor[(128), float32] */ | |
| }; | |
| %236 = (%stage2_unit1_bn2_moving_var, %stage2_unit1_bn2_gamma); | |
| %237 = (%tensor_035,); | |
| let %x70: () = vm.invoke_tvm_op(%235, %236, %237) /* ty=() */; | |
| let %x71: Tensor[(128), float32] = %tensor_035; | |
| let %storage_036: Storage[] = memory.alloc_storage(131072 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][36]) /* ty=Storage[] */; | |
| let %tensor_036: Tensor[(1, 1, 256, 128), float32] = memory.alloc_tensor(%storage_036, 0 /* ty=int64 */, meta[relay.Constant][36] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][36]) /* ty=Tensor[(1, 1, 256, 128), float32] */; | |
| %239 = fn (%p036: Tensor[(128, 256, 1, 1), float32], %p132: Tensor[(128), float32], Primitive=1) -> Tensor[(1, 1, 256, 128), float32] { | |
| %238 = layout_transform(%p036, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 256, 128), float32] */; | |
| multiply(%238, %p132) /* ty=Tensor[(1, 1, 256, 128), float32] */ | |
| }; | |
| %240 = (%stage2_unit1_conv1_weight, %x71); | |
| %241 = (%tensor_036,); | |
| let %x72: () = vm.invoke_tvm_op(%239, %240, %241) /* ty=() */; | |
| let %x73: Tensor[(1, 1, 256, 128), float32] = %tensor_036; | |
| let %storage_037: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][37]) /* ty=Storage[] */; | |
| let %tensor_037: Tensor[(1, 28, 28, 128), float32] = memory.alloc_tensor(%storage_037, 0 /* ty=int64 */, meta[relay.Constant][37] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][37]) /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| %248 = fn (%p037: Tensor[(1, 56, 56, 256), float32], %p133: Tensor[(1, 1, 256, 128), float32], %p214: Tensor[(128), float32], %p312: Tensor[(128), float32], %p410: Tensor[(128), float32], Primitive=1) -> Tensor[(1, 28, 28, 128), float32] { | |
| %242 = nn.conv2d(%p037, %p133, strides=[2, 2], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| %243 = negative(%p214) /* ty=Tensor[(128), float32] */; | |
| %244 = multiply(%243, %p312) /* ty=Tensor[(128), float32] */; | |
| %245 = add(%244, %p410) /* ty=Tensor[(128), float32] */; | |
| %246 = expand_dims(%245, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 128), float32] */; | |
| %247 = add(%242, %246) /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| nn.relu(%247) /* ty=Tensor[(1, 28, 28, 128), float32] */ | |
| }; | |
| %249 = (%x69, %x73, %stage2_unit1_bn2_moving_mean, %x71, %stage2_unit1_bn2_beta); | |
| %250 = (%tensor_037,); | |
| let %x74: () = vm.invoke_tvm_op(%248, %249, %250) /* ty=() */; | |
| let %x75: Tensor[(1, 28, 28, 128), float32] = %tensor_037; | |
| let %storage_038: Storage[] = memory.alloc_storage(512 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][38]) /* ty=Storage[] */; | |
| let %tensor_038: Tensor[(128), float32] = memory.alloc_tensor(%storage_038, 0 /* ty=int64 */, meta[relay.Constant][38] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][38]) /* ty=Tensor[(128), float32] */; | |
| %254 = fn (%p038: Tensor[(128), float32], %p134: Tensor[(128), float32], Primitive=1) -> Tensor[(128), float32] { | |
| %251 = add(%p038, 2e-05f /* ty=float32 */) /* ty=Tensor[(128), float32] */; | |
| %252 = sqrt(%251) /* ty=Tensor[(128), float32] */; | |
| %253 = divide(1f /* ty=float32 */, %252) /* ty=Tensor[(128), float32] */; | |
| multiply(%253, %p134) /* ty=Tensor[(128), float32] */ | |
| }; | |
| %255 = (%stage2_unit1_bn3_moving_var, %stage2_unit1_bn3_gamma); | |
| %256 = (%tensor_038,); | |
| let %x76: () = vm.invoke_tvm_op(%254, %255, %256) /* ty=() */; | |
| let %x77: Tensor[(128), float32] = %tensor_038; | |
| let %storage_039: Storage[] = memory.alloc_storage(589824 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][39]) /* ty=Storage[] */; | |
| let %tensor_039: Tensor[(3, 3, 128, 128), float32] = memory.alloc_tensor(%storage_039, 0 /* ty=int64 */, meta[relay.Constant][39] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][39]) /* ty=Tensor[(3, 3, 128, 128), float32] */; | |
| %258 = fn (%p039: Tensor[(128, 128, 3, 3), float32], %p135: Tensor[(128), float32], Primitive=1) -> Tensor[(3, 3, 128, 128), float32] { | |
| %257 = layout_transform(%p039, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(3, 3, 128, 128), float32] */; | |
| multiply(%257, %p135) /* ty=Tensor[(3, 3, 128, 128), float32] */ | |
| }; | |
| %259 = (%stage2_unit1_conv2_weight, %x77); | |
| %260 = (%tensor_039,); | |
| let %x78: () = vm.invoke_tvm_op(%258, %259, %260) /* ty=() */; | |
| let %x79: Tensor[(3, 3, 128, 128), float32] = %tensor_039; | |
| let %storage_040: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][40]) /* ty=Storage[] */; | |
| let %tensor_040: Tensor[(1, 28, 28, 128), float32] = memory.alloc_tensor(%storage_040, 0 /* ty=int64 */, meta[relay.Constant][40] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][40]) /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| %267 = fn (%p040: Tensor[(1, 28, 28, 128), float32], %p136: Tensor[(3, 3, 128, 128), float32], %p215: Tensor[(128), float32], %p313: Tensor[(128), float32], %p411: Tensor[(128), float32], Primitive=1) -> Tensor[(1, 28, 28, 128), float32] { | |
| %261 = nn.conv2d(%p040, %p136, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| %262 = negative(%p215) /* ty=Tensor[(128), float32] */; | |
| %263 = multiply(%262, %p313) /* ty=Tensor[(128), float32] */; | |
| %264 = add(%263, %p411) /* ty=Tensor[(128), float32] */; | |
| %265 = expand_dims(%264, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 128), float32] */; | |
| %266 = add(%261, %265) /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| nn.relu(%266) /* ty=Tensor[(1, 28, 28, 128), float32] */ | |
| }; | |
| %268 = (%x75, %x79, %stage2_unit1_bn3_moving_mean, %x77, %stage2_unit1_bn3_beta); | |
| %269 = (%tensor_040,); | |
| let %x80: () = vm.invoke_tvm_op(%267, %268, %269) /* ty=() */; | |
| let %x81: Tensor[(1, 28, 28, 128), float32] = %tensor_040; | |
| let %storage_041: Storage[] = memory.alloc_storage(262144 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][41]) /* ty=Storage[] */; | |
| let %tensor_041: Tensor[(1, 1, 128, 512), float32] = memory.alloc_tensor(%storage_041, 0 /* ty=int64 */, meta[relay.Constant][41] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][41]) /* ty=Tensor[(1, 1, 128, 512), float32] */; | |
| %270 = fn (%p041: Tensor[(512, 128, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 128, 512), float32] { | |
| layout_transform(%p041, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 128, 512), float32] */ | |
| }; | |
| %271 = (%stage2_unit1_conv3_weight,); | |
| %272 = (%tensor_041,); | |
| let %x82: () = vm.invoke_tvm_op(%270, %271, %272) /* ty=() */; | |
| let %x83: Tensor[(1, 1, 128, 512), float32] = %tensor_041; | |
| let %storage_042: Storage[] = memory.alloc_storage(524288 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][42]) /* ty=Storage[] */; | |
| let %tensor_042: Tensor[(1, 1, 256, 512), float32] = memory.alloc_tensor(%storage_042, 0 /* ty=int64 */, meta[relay.Constant][42] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][42]) /* ty=Tensor[(1, 1, 256, 512), float32] */; | |
| %273 = fn (%p042: Tensor[(512, 256, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 256, 512), float32] { | |
| layout_transform(%p042, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 256, 512), float32] */ | |
| }; | |
| %274 = (%stage2_unit1_sc_weight,); | |
| %275 = (%tensor_042,); | |
| let %x84: () = vm.invoke_tvm_op(%273, %274, %275) /* ty=() */; | |
| let %x85: Tensor[(1, 1, 256, 512), float32] = %tensor_042; | |
| let %storage_043: Storage[] = memory.alloc_storage(1605632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][43]) /* ty=Storage[] */; | |
| let %tensor_043: Tensor[(1, 28, 28, 512), float32] = memory.alloc_tensor(%storage_043, 0 /* ty=int64 */, meta[relay.Constant][43] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][43]) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| %276 = fn (%p043: Tensor[(1, 56, 56, 256), float32], %p137: Tensor[(1, 1, 256, 512), float32], Primitive=1) -> Tensor[(1, 28, 28, 512), float32] { | |
| nn.conv2d(%p043, %p137, strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 512), float32] */ | |
| }; | |
| %277 = (%x69, %x85); | |
| %278 = (%tensor_043,); | |
| let %x86: () = vm.invoke_tvm_op(%276, %277, %278) /* ty=() */; | |
| let %x87: Tensor[(1, 28, 28, 512), float32] = %tensor_043; | |
| let %storage_044: Storage[] = memory.alloc_storage(1605632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][44]) /* ty=Storage[] */; | |
| let %tensor_044: Tensor[(1, 28, 28, 512), float32] = memory.alloc_tensor(%storage_044, 0 /* ty=int64 */, meta[relay.Constant][44] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][44]) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| %280 = fn (%p044: Tensor[(1, 28, 28, 128), float32], %p138: Tensor[(1, 1, 128, 512), float32], %p216: Tensor[(1, 28, 28, 512), float32], Primitive=1) -> Tensor[(1, 28, 28, 512), float32] { | |
| %279 = nn.conv2d(%p044, %p138, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| add(%279, %p216) /* ty=Tensor[(1, 28, 28, 512), float32] */ | |
| }; | |
| %281 = (%x81, %x83, %x87); | |
| %282 = (%tensor_044,); | |
| let %x88: () = vm.invoke_tvm_op(%280, %281, %282) /* ty=() */; | |
| let %x89: Tensor[(1, 28, 28, 512), float32] = %tensor_044; | |
| let %storage_045: Storage[] = memory.alloc_storage(1605632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][45]) /* ty=Storage[] */; | |
| let %tensor_045: Tensor[(1, 28, 28, 512), float32] = memory.alloc_tensor(%storage_045, 0 /* ty=int64 */, meta[relay.Constant][45] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][45]) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| %294 = fn (%p045: Tensor[(512), float32], %p139: Tensor[(512), float32], %p217: Tensor[(1, 28, 28, 512), float32], %p314: Tensor[(512), float32], %p412: Tensor[(512), float32], Primitive=1) -> Tensor[(1, 28, 28, 512), float32] { | |
| %283 = add(%p045, 2e-05f /* ty=float32 */) /* ty=Tensor[(512), float32] */; | |
| %284 = sqrt(%283) /* ty=Tensor[(512), float32] */; | |
| %285 = divide(1f /* ty=float32 */, %284) /* ty=Tensor[(512), float32] */; | |
| %286 = multiply(%285, %p139) /* ty=Tensor[(512), float32] */; | |
| %287 = expand_dims(%286, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 512), float32] */; | |
| %288 = multiply(%p217, %287) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| %289 = negative(%p314) /* ty=Tensor[(512), float32] */; | |
| %290 = multiply(%289, %286) /* ty=Tensor[(512), float32] */; | |
| %291 = add(%290, %p412) /* ty=Tensor[(512), float32] */; | |
| %292 = expand_dims(%291, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 512), float32] */; | |
| %293 = add(%288, %292) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| nn.relu(%293) /* ty=Tensor[(1, 28, 28, 512), float32] */ | |
| }; | |
| %295 = (%stage2_unit2_bn1_moving_var, %stage2_unit2_bn1_gamma, %x89, %stage2_unit2_bn1_moving_mean, %stage2_unit2_bn1_beta); | |
| %296 = (%tensor_045,); | |
| let %x90: () = vm.invoke_tvm_op(%294, %295, %296) /* ty=() */; | |
| let %x91: Tensor[(1, 28, 28, 512), float32] = %tensor_045; | |
| let %storage_046: Storage[] = memory.alloc_storage(512 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][46]) /* ty=Storage[] */; | |
| let %tensor_046: Tensor[(128), float32] = memory.alloc_tensor(%storage_046, 0 /* ty=int64 */, meta[relay.Constant][46] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][46]) /* ty=Tensor[(128), float32] */; | |
| %300 = fn (%p046: Tensor[(128), float32], %p140: Tensor[(128), float32], Primitive=1) -> Tensor[(128), float32] { | |
| %297 = add(%p046, 2e-05f /* ty=float32 */) /* ty=Tensor[(128), float32] */; | |
| %298 = sqrt(%297) /* ty=Tensor[(128), float32] */; | |
| %299 = divide(1f /* ty=float32 */, %298) /* ty=Tensor[(128), float32] */; | |
| multiply(%299, %p140) /* ty=Tensor[(128), float32] */ | |
| }; | |
| %301 = (%stage2_unit2_bn2_moving_var, %stage2_unit2_bn2_gamma); | |
| %302 = (%tensor_046,); | |
| let %x92: () = vm.invoke_tvm_op(%300, %301, %302) /* ty=() */; | |
| let %x93: Tensor[(128), float32] = %tensor_046; | |
| let %storage_047: Storage[] = memory.alloc_storage(262144 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][47]) /* ty=Storage[] */; | |
| let %tensor_047: Tensor[(1, 1, 512, 128), float32] = memory.alloc_tensor(%storage_047, 0 /* ty=int64 */, meta[relay.Constant][47] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][47]) /* ty=Tensor[(1, 1, 512, 128), float32] */; | |
| %304 = fn (%p047: Tensor[(128, 512, 1, 1), float32], %p141: Tensor[(128), float32], Primitive=1) -> Tensor[(1, 1, 512, 128), float32] { | |
| %303 = layout_transform(%p047, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 512, 128), float32] */; | |
| multiply(%303, %p141) /* ty=Tensor[(1, 1, 512, 128), float32] */ | |
| }; | |
| %305 = (%stage2_unit2_conv1_weight, %x93); | |
| %306 = (%tensor_047,); | |
| let %x94: () = vm.invoke_tvm_op(%304, %305, %306) /* ty=() */; | |
| let %x95: Tensor[(1, 1, 512, 128), float32] = %tensor_047; | |
| let %storage_048: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][48]) /* ty=Storage[] */; | |
| let %tensor_048: Tensor[(1, 28, 28, 128), float32] = memory.alloc_tensor(%storage_048, 0 /* ty=int64 */, meta[relay.Constant][48] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][48]) /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| %313 = fn (%p048: Tensor[(1, 28, 28, 512), float32], %p142: Tensor[(1, 1, 512, 128), float32], %p218: Tensor[(128), float32], %p315: Tensor[(128), float32], %p413: Tensor[(128), float32], Primitive=1) -> Tensor[(1, 28, 28, 128), float32] { | |
| %307 = nn.conv2d(%p048, %p142, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| %308 = negative(%p218) /* ty=Tensor[(128), float32] */; | |
| %309 = multiply(%308, %p315) /* ty=Tensor[(128), float32] */; | |
| %310 = add(%309, %p413) /* ty=Tensor[(128), float32] */; | |
| %311 = expand_dims(%310, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 128), float32] */; | |
| %312 = add(%307, %311) /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| nn.relu(%312) /* ty=Tensor[(1, 28, 28, 128), float32] */ | |
| }; | |
| %314 = (%x91, %x95, %stage2_unit2_bn2_moving_mean, %x93, %stage2_unit2_bn2_beta); | |
| %315 = (%tensor_048,); | |
| let %x96: () = vm.invoke_tvm_op(%313, %314, %315) /* ty=() */; | |
| let %x97: Tensor[(1, 28, 28, 128), float32] = %tensor_048; | |
| let %storage_049: Storage[] = memory.alloc_storage(512 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][49]) /* ty=Storage[] */; | |
| let %tensor_049: Tensor[(128), float32] = memory.alloc_tensor(%storage_049, 0 /* ty=int64 */, meta[relay.Constant][49] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][49]) /* ty=Tensor[(128), float32] */; | |
| %319 = fn (%p049: Tensor[(128), float32], %p143: Tensor[(128), float32], Primitive=1) -> Tensor[(128), float32] { | |
| %316 = add(%p049, 2e-05f /* ty=float32 */) /* ty=Tensor[(128), float32] */; | |
| %317 = sqrt(%316) /* ty=Tensor[(128), float32] */; | |
| %318 = divide(1f /* ty=float32 */, %317) /* ty=Tensor[(128), float32] */; | |
| multiply(%318, %p143) /* ty=Tensor[(128), float32] */ | |
| }; | |
| %320 = (%stage2_unit2_bn3_moving_var, %stage2_unit2_bn3_gamma); | |
| %321 = (%tensor_049,); | |
| let %x98: () = vm.invoke_tvm_op(%319, %320, %321) /* ty=() */; | |
| let %x99: Tensor[(128), float32] = %tensor_049; | |
| let %storage_050: Storage[] = memory.alloc_storage(589824 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][50]) /* ty=Storage[] */; | |
| let %tensor_050: Tensor[(3, 3, 128, 128), float32] = memory.alloc_tensor(%storage_050, 0 /* ty=int64 */, meta[relay.Constant][50] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][50]) /* ty=Tensor[(3, 3, 128, 128), float32] */; | |
| %323 = fn (%p050: Tensor[(128, 128, 3, 3), float32], %p144: Tensor[(128), float32], Primitive=1) -> Tensor[(3, 3, 128, 128), float32] { | |
| %322 = layout_transform(%p050, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(3, 3, 128, 128), float32] */; | |
| multiply(%322, %p144) /* ty=Tensor[(3, 3, 128, 128), float32] */ | |
| }; | |
| %324 = (%stage2_unit2_conv2_weight, %x99); | |
| %325 = (%tensor_050,); | |
| let %x100: () = vm.invoke_tvm_op(%323, %324, %325) /* ty=() */; | |
| let %x101: Tensor[(3, 3, 128, 128), float32] = %tensor_050; | |
| let %storage_051: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][51]) /* ty=Storage[] */; | |
| let %tensor_051: Tensor[(1, 28, 28, 128), float32] = memory.alloc_tensor(%storage_051, 0 /* ty=int64 */, meta[relay.Constant][51] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][51]) /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| %332 = fn (%p051: Tensor[(1, 28, 28, 128), float32], %p145: Tensor[(3, 3, 128, 128), float32], %p219: Tensor[(128), float32], %p316: Tensor[(128), float32], %p414: Tensor[(128), float32], Primitive=1) -> Tensor[(1, 28, 28, 128), float32] { | |
| %326 = nn.conv2d(%p051, %p145, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| %327 = negative(%p219) /* ty=Tensor[(128), float32] */; | |
| %328 = multiply(%327, %p316) /* ty=Tensor[(128), float32] */; | |
| %329 = add(%328, %p414) /* ty=Tensor[(128), float32] */; | |
| %330 = expand_dims(%329, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 128), float32] */; | |
| %331 = add(%326, %330) /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| nn.relu(%331) /* ty=Tensor[(1, 28, 28, 128), float32] */ | |
| }; | |
| %333 = (%x97, %x101, %stage2_unit2_bn3_moving_mean, %x99, %stage2_unit2_bn3_beta); | |
| %334 = (%tensor_051,); | |
| let %x102: () = vm.invoke_tvm_op(%332, %333, %334) /* ty=() */; | |
| let %x103: Tensor[(1, 28, 28, 128), float32] = %tensor_051; | |
| let %storage_052: Storage[] = memory.alloc_storage(262144 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][52]) /* ty=Storage[] */; | |
| let %tensor_052: Tensor[(1, 1, 128, 512), float32] = memory.alloc_tensor(%storage_052, 0 /* ty=int64 */, meta[relay.Constant][52] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][52]) /* ty=Tensor[(1, 1, 128, 512), float32] */; | |
| %335 = fn (%p052: Tensor[(512, 128, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 128, 512), float32] { | |
| layout_transform(%p052, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 128, 512), float32] */ | |
| }; | |
| %336 = (%stage2_unit2_conv3_weight,); | |
| %337 = (%tensor_052,); | |
| let %x104: () = vm.invoke_tvm_op(%335, %336, %337) /* ty=() */; | |
| let %x105: Tensor[(1, 1, 128, 512), float32] = %tensor_052; | |
| let %storage_053: Storage[] = memory.alloc_storage(1605632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][53]) /* ty=Storage[] */; | |
| let %tensor_053: Tensor[(1, 28, 28, 512), float32] = memory.alloc_tensor(%storage_053, 0 /* ty=int64 */, meta[relay.Constant][53] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][53]) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| %339 = fn (%p053: Tensor[(1, 28, 28, 128), float32], %p146: Tensor[(1, 1, 128, 512), float32], %p220: Tensor[(1, 28, 28, 512), float32], Primitive=1) -> Tensor[(1, 28, 28, 512), float32] { | |
| %338 = nn.conv2d(%p053, %p146, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| add(%338, %p220) /* ty=Tensor[(1, 28, 28, 512), float32] */ | |
| }; | |
| %340 = (%x103, %x105, %x89); | |
| %341 = (%tensor_053,); | |
| let %x106: () = vm.invoke_tvm_op(%339, %340, %341) /* ty=() */; | |
| let %x107: Tensor[(1, 28, 28, 512), float32] = %tensor_053; | |
| let %storage_054: Storage[] = memory.alloc_storage(1605632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][54]) /* ty=Storage[] */; | |
| let %tensor_054: Tensor[(1, 28, 28, 512), float32] = memory.alloc_tensor(%storage_054, 0 /* ty=int64 */, meta[relay.Constant][54] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][54]) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| %353 = fn (%p054: Tensor[(512), float32], %p147: Tensor[(512), float32], %p221: Tensor[(1, 28, 28, 512), float32], %p317: Tensor[(512), float32], %p415: Tensor[(512), float32], Primitive=1) -> Tensor[(1, 28, 28, 512), float32] { | |
| %342 = add(%p054, 2e-05f /* ty=float32 */) /* ty=Tensor[(512), float32] */; | |
| %343 = sqrt(%342) /* ty=Tensor[(512), float32] */; | |
| %344 = divide(1f /* ty=float32 */, %343) /* ty=Tensor[(512), float32] */; | |
| %345 = multiply(%344, %p147) /* ty=Tensor[(512), float32] */; | |
| %346 = expand_dims(%345, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 512), float32] */; | |
| %347 = multiply(%p221, %346) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| %348 = negative(%p317) /* ty=Tensor[(512), float32] */; | |
| %349 = multiply(%348, %345) /* ty=Tensor[(512), float32] */; | |
| %350 = add(%349, %p415) /* ty=Tensor[(512), float32] */; | |
| %351 = expand_dims(%350, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 512), float32] */; | |
| %352 = add(%347, %351) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| nn.relu(%352) /* ty=Tensor[(1, 28, 28, 512), float32] */ | |
| }; | |
| %354 = (%stage2_unit3_bn1_moving_var, %stage2_unit3_bn1_gamma, %x107, %stage2_unit3_bn1_moving_mean, %stage2_unit3_bn1_beta); | |
| %355 = (%tensor_054,); | |
| let %x108: () = vm.invoke_tvm_op(%353, %354, %355) /* ty=() */; | |
| let %x109: Tensor[(1, 28, 28, 512), float32] = %tensor_054; | |
| let %storage_055: Storage[] = memory.alloc_storage(512 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][55]) /* ty=Storage[] */; | |
| let %tensor_055: Tensor[(128), float32] = memory.alloc_tensor(%storage_055, 0 /* ty=int64 */, meta[relay.Constant][55] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][55]) /* ty=Tensor[(128), float32] */; | |
| %359 = fn (%p055: Tensor[(128), float32], %p148: Tensor[(128), float32], Primitive=1) -> Tensor[(128), float32] { | |
| %356 = add(%p055, 2e-05f /* ty=float32 */) /* ty=Tensor[(128), float32] */; | |
| %357 = sqrt(%356) /* ty=Tensor[(128), float32] */; | |
| %358 = divide(1f /* ty=float32 */, %357) /* ty=Tensor[(128), float32] */; | |
| multiply(%358, %p148) /* ty=Tensor[(128), float32] */ | |
| }; | |
| %360 = (%stage2_unit3_bn2_moving_var, %stage2_unit3_bn2_gamma); | |
| %361 = (%tensor_055,); | |
| let %x110: () = vm.invoke_tvm_op(%359, %360, %361) /* ty=() */; | |
| let %x111: Tensor[(128), float32] = %tensor_055; | |
| let %storage_056: Storage[] = memory.alloc_storage(262144 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][56]) /* ty=Storage[] */; | |
| let %tensor_056: Tensor[(1, 1, 512, 128), float32] = memory.alloc_tensor(%storage_056, 0 /* ty=int64 */, meta[relay.Constant][56] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][56]) /* ty=Tensor[(1, 1, 512, 128), float32] */; | |
| %363 = fn (%p056: Tensor[(128, 512, 1, 1), float32], %p149: Tensor[(128), float32], Primitive=1) -> Tensor[(1, 1, 512, 128), float32] { | |
| %362 = layout_transform(%p056, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 512, 128), float32] */; | |
| multiply(%362, %p149) /* ty=Tensor[(1, 1, 512, 128), float32] */ | |
| }; | |
| %364 = (%stage2_unit3_conv1_weight, %x111); | |
| %365 = (%tensor_056,); | |
| let %x112: () = vm.invoke_tvm_op(%363, %364, %365) /* ty=() */; | |
| let %x113: Tensor[(1, 1, 512, 128), float32] = %tensor_056; | |
| let %storage_057: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][57]) /* ty=Storage[] */; | |
| let %tensor_057: Tensor[(1, 28, 28, 128), float32] = memory.alloc_tensor(%storage_057, 0 /* ty=int64 */, meta[relay.Constant][57] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][57]) /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| %372 = fn (%p057: Tensor[(1, 28, 28, 512), float32], %p150: Tensor[(1, 1, 512, 128), float32], %p222: Tensor[(128), float32], %p318: Tensor[(128), float32], %p416: Tensor[(128), float32], Primitive=1) -> Tensor[(1, 28, 28, 128), float32] { | |
| %366 = nn.conv2d(%p057, %p150, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| %367 = negative(%p222) /* ty=Tensor[(128), float32] */; | |
| %368 = multiply(%367, %p318) /* ty=Tensor[(128), float32] */; | |
| %369 = add(%368, %p416) /* ty=Tensor[(128), float32] */; | |
| %370 = expand_dims(%369, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 128), float32] */; | |
| %371 = add(%366, %370) /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| nn.relu(%371) /* ty=Tensor[(1, 28, 28, 128), float32] */ | |
| }; | |
| %373 = (%x109, %x113, %stage2_unit3_bn2_moving_mean, %x111, %stage2_unit3_bn2_beta); | |
| %374 = (%tensor_057,); | |
| let %x114: () = vm.invoke_tvm_op(%372, %373, %374) /* ty=() */; | |
| let %x115: Tensor[(1, 28, 28, 128), float32] = %tensor_057; | |
| let %storage_058: Storage[] = memory.alloc_storage(512 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][58]) /* ty=Storage[] */; | |
| let %tensor_058: Tensor[(128), float32] = memory.alloc_tensor(%storage_058, 0 /* ty=int64 */, meta[relay.Constant][58] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][58]) /* ty=Tensor[(128), float32] */; | |
| %378 = fn (%p058: Tensor[(128), float32], %p151: Tensor[(128), float32], Primitive=1) -> Tensor[(128), float32] { | |
| %375 = add(%p058, 2e-05f /* ty=float32 */) /* ty=Tensor[(128), float32] */; | |
| %376 = sqrt(%375) /* ty=Tensor[(128), float32] */; | |
| %377 = divide(1f /* ty=float32 */, %376) /* ty=Tensor[(128), float32] */; | |
| multiply(%377, %p151) /* ty=Tensor[(128), float32] */ | |
| }; | |
| %379 = (%stage2_unit3_bn3_moving_var, %stage2_unit3_bn3_gamma); | |
| %380 = (%tensor_058,); | |
| let %x116: () = vm.invoke_tvm_op(%378, %379, %380) /* ty=() */; | |
| let %x117: Tensor[(128), float32] = %tensor_058; | |
| let %storage_059: Storage[] = memory.alloc_storage(589824 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][59]) /* ty=Storage[] */; | |
| let %tensor_059: Tensor[(3, 3, 128, 128), float32] = memory.alloc_tensor(%storage_059, 0 /* ty=int64 */, meta[relay.Constant][59] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][59]) /* ty=Tensor[(3, 3, 128, 128), float32] */; | |
| %382 = fn (%p059: Tensor[(128, 128, 3, 3), float32], %p152: Tensor[(128), float32], Primitive=1) -> Tensor[(3, 3, 128, 128), float32] { | |
| %381 = layout_transform(%p059, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(3, 3, 128, 128), float32] */; | |
| multiply(%381, %p152) /* ty=Tensor[(3, 3, 128, 128), float32] */ | |
| }; | |
| %383 = (%stage2_unit3_conv2_weight, %x117); | |
| %384 = (%tensor_059,); | |
| let %x118: () = vm.invoke_tvm_op(%382, %383, %384) /* ty=() */; | |
| let %x119: Tensor[(3, 3, 128, 128), float32] = %tensor_059; | |
| let %storage_060: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][60]) /* ty=Storage[] */; | |
| let %tensor_060: Tensor[(1, 28, 28, 128), float32] = memory.alloc_tensor(%storage_060, 0 /* ty=int64 */, meta[relay.Constant][60] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][60]) /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| %391 = fn (%p060: Tensor[(1, 28, 28, 128), float32], %p153: Tensor[(3, 3, 128, 128), float32], %p223: Tensor[(128), float32], %p319: Tensor[(128), float32], %p417: Tensor[(128), float32], Primitive=1) -> Tensor[(1, 28, 28, 128), float32] { | |
| %385 = nn.conv2d(%p060, %p153, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| %386 = negative(%p223) /* ty=Tensor[(128), float32] */; | |
| %387 = multiply(%386, %p319) /* ty=Tensor[(128), float32] */; | |
| %388 = add(%387, %p417) /* ty=Tensor[(128), float32] */; | |
| %389 = expand_dims(%388, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 128), float32] */; | |
| %390 = add(%385, %389) /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| nn.relu(%390) /* ty=Tensor[(1, 28, 28, 128), float32] */ | |
| }; | |
| %392 = (%x115, %x119, %stage2_unit3_bn3_moving_mean, %x117, %stage2_unit3_bn3_beta); | |
| %393 = (%tensor_060,); | |
| let %x120: () = vm.invoke_tvm_op(%391, %392, %393) /* ty=() */; | |
| let %x121: Tensor[(1, 28, 28, 128), float32] = %tensor_060; | |
| let %storage_061: Storage[] = memory.alloc_storage(262144 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][61]) /* ty=Storage[] */; | |
| let %tensor_061: Tensor[(1, 1, 128, 512), float32] = memory.alloc_tensor(%storage_061, 0 /* ty=int64 */, meta[relay.Constant][61] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][61]) /* ty=Tensor[(1, 1, 128, 512), float32] */; | |
| %394 = fn (%p061: Tensor[(512, 128, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 128, 512), float32] { | |
| layout_transform(%p061, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 128, 512), float32] */ | |
| }; | |
| %395 = (%stage2_unit3_conv3_weight,); | |
| %396 = (%tensor_061,); | |
| let %x122: () = vm.invoke_tvm_op(%394, %395, %396) /* ty=() */; | |
| let %x123: Tensor[(1, 1, 128, 512), float32] = %tensor_061; | |
| let %storage_062: Storage[] = memory.alloc_storage(1605632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][62]) /* ty=Storage[] */; | |
| let %tensor_062: Tensor[(1, 28, 28, 512), float32] = memory.alloc_tensor(%storage_062, 0 /* ty=int64 */, meta[relay.Constant][62] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][62]) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| %398 = fn (%p062: Tensor[(1, 28, 28, 128), float32], %p154: Tensor[(1, 1, 128, 512), float32], %p224: Tensor[(1, 28, 28, 512), float32], Primitive=1) -> Tensor[(1, 28, 28, 512), float32] { | |
| %397 = nn.conv2d(%p062, %p154, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| add(%397, %p224) /* ty=Tensor[(1, 28, 28, 512), float32] */ | |
| }; | |
| %399 = (%x121, %x123, %x107); | |
| %400 = (%tensor_062,); | |
| let %x124: () = vm.invoke_tvm_op(%398, %399, %400) /* ty=() */; | |
| let %x125: Tensor[(1, 28, 28, 512), float32] = %tensor_062; | |
| let %storage_063: Storage[] = memory.alloc_storage(1605632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][63]) /* ty=Storage[] */; | |
| let %tensor_063: Tensor[(1, 28, 28, 512), float32] = memory.alloc_tensor(%storage_063, 0 /* ty=int64 */, meta[relay.Constant][63] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][63]) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| %412 = fn (%p063: Tensor[(512), float32], %p155: Tensor[(512), float32], %p225: Tensor[(1, 28, 28, 512), float32], %p320: Tensor[(512), float32], %p418: Tensor[(512), float32], Primitive=1) -> Tensor[(1, 28, 28, 512), float32] { | |
| %401 = add(%p063, 2e-05f /* ty=float32 */) /* ty=Tensor[(512), float32] */; | |
| %402 = sqrt(%401) /* ty=Tensor[(512), float32] */; | |
| %403 = divide(1f /* ty=float32 */, %402) /* ty=Tensor[(512), float32] */; | |
| %404 = multiply(%403, %p155) /* ty=Tensor[(512), float32] */; | |
| %405 = expand_dims(%404, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 512), float32] */; | |
| %406 = multiply(%p225, %405) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| %407 = negative(%p320) /* ty=Tensor[(512), float32] */; | |
| %408 = multiply(%407, %404) /* ty=Tensor[(512), float32] */; | |
| %409 = add(%408, %p418) /* ty=Tensor[(512), float32] */; | |
| %410 = expand_dims(%409, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 512), float32] */; | |
| %411 = add(%406, %410) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| nn.relu(%411) /* ty=Tensor[(1, 28, 28, 512), float32] */ | |
| }; | |
| %413 = (%stage2_unit4_bn1_moving_var, %stage2_unit4_bn1_gamma, %x125, %stage2_unit4_bn1_moving_mean, %stage2_unit4_bn1_beta); | |
| %414 = (%tensor_063,); | |
| let %x126: () = vm.invoke_tvm_op(%412, %413, %414) /* ty=() */; | |
| let %x127: Tensor[(1, 28, 28, 512), float32] = %tensor_063; | |
| let %storage_064: Storage[] = memory.alloc_storage(512 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][64]) /* ty=Storage[] */; | |
| let %tensor_064: Tensor[(128), float32] = memory.alloc_tensor(%storage_064, 0 /* ty=int64 */, meta[relay.Constant][64] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][64]) /* ty=Tensor[(128), float32] */; | |
| %418 = fn (%p064: Tensor[(128), float32], %p156: Tensor[(128), float32], Primitive=1) -> Tensor[(128), float32] { | |
| %415 = add(%p064, 2e-05f /* ty=float32 */) /* ty=Tensor[(128), float32] */; | |
| %416 = sqrt(%415) /* ty=Tensor[(128), float32] */; | |
| %417 = divide(1f /* ty=float32 */, %416) /* ty=Tensor[(128), float32] */; | |
| multiply(%417, %p156) /* ty=Tensor[(128), float32] */ | |
| }; | |
| %419 = (%stage2_unit4_bn2_moving_var, %stage2_unit4_bn2_gamma); | |
| %420 = (%tensor_064,); | |
| let %x128: () = vm.invoke_tvm_op(%418, %419, %420) /* ty=() */; | |
| let %x129: Tensor[(128), float32] = %tensor_064; | |
| let %storage_065: Storage[] = memory.alloc_storage(262144 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][65]) /* ty=Storage[] */; | |
| let %tensor_065: Tensor[(1, 1, 512, 128), float32] = memory.alloc_tensor(%storage_065, 0 /* ty=int64 */, meta[relay.Constant][65] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][65]) /* ty=Tensor[(1, 1, 512, 128), float32] */; | |
| %422 = fn (%p065: Tensor[(128, 512, 1, 1), float32], %p157: Tensor[(128), float32], Primitive=1) -> Tensor[(1, 1, 512, 128), float32] { | |
| %421 = layout_transform(%p065, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 512, 128), float32] */; | |
| multiply(%421, %p157) /* ty=Tensor[(1, 1, 512, 128), float32] */ | |
| }; | |
| %423 = (%stage2_unit4_conv1_weight, %x129); | |
| %424 = (%tensor_065,); | |
| let %x130: () = vm.invoke_tvm_op(%422, %423, %424) /* ty=() */; | |
| let %x131: Tensor[(1, 1, 512, 128), float32] = %tensor_065; | |
| let %storage_066: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][66]) /* ty=Storage[] */; | |
| let %tensor_066: Tensor[(1, 28, 28, 128), float32] = memory.alloc_tensor(%storage_066, 0 /* ty=int64 */, meta[relay.Constant][66] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][66]) /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| %431 = fn (%p066: Tensor[(1, 28, 28, 512), float32], %p158: Tensor[(1, 1, 512, 128), float32], %p226: Tensor[(128), float32], %p321: Tensor[(128), float32], %p419: Tensor[(128), float32], Primitive=1) -> Tensor[(1, 28, 28, 128), float32] { | |
| %425 = nn.conv2d(%p066, %p158, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| %426 = negative(%p226) /* ty=Tensor[(128), float32] */; | |
| %427 = multiply(%426, %p321) /* ty=Tensor[(128), float32] */; | |
| %428 = add(%427, %p419) /* ty=Tensor[(128), float32] */; | |
| %429 = expand_dims(%428, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 128), float32] */; | |
| %430 = add(%425, %429) /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| nn.relu(%430) /* ty=Tensor[(1, 28, 28, 128), float32] */ | |
| }; | |
| %432 = (%x127, %x131, %stage2_unit4_bn2_moving_mean, %x129, %stage2_unit4_bn2_beta); | |
| %433 = (%tensor_066,); | |
| let %x132: () = vm.invoke_tvm_op(%431, %432, %433) /* ty=() */; | |
| let %x133: Tensor[(1, 28, 28, 128), float32] = %tensor_066; | |
| let %storage_067: Storage[] = memory.alloc_storage(512 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][67]) /* ty=Storage[] */; | |
| let %tensor_067: Tensor[(128), float32] = memory.alloc_tensor(%storage_067, 0 /* ty=int64 */, meta[relay.Constant][67] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][67]) /* ty=Tensor[(128), float32] */; | |
| %437 = fn (%p067: Tensor[(128), float32], %p159: Tensor[(128), float32], Primitive=1) -> Tensor[(128), float32] { | |
| %434 = add(%p067, 2e-05f /* ty=float32 */) /* ty=Tensor[(128), float32] */; | |
| %435 = sqrt(%434) /* ty=Tensor[(128), float32] */; | |
| %436 = divide(1f /* ty=float32 */, %435) /* ty=Tensor[(128), float32] */; | |
| multiply(%436, %p159) /* ty=Tensor[(128), float32] */ | |
| }; | |
| %438 = (%stage2_unit4_bn3_moving_var, %stage2_unit4_bn3_gamma); | |
| %439 = (%tensor_067,); | |
| let %x134: () = vm.invoke_tvm_op(%437, %438, %439) /* ty=() */; | |
| let %x135: Tensor[(128), float32] = %tensor_067; | |
| let %storage_068: Storage[] = memory.alloc_storage(589824 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][68]) /* ty=Storage[] */; | |
| let %tensor_068: Tensor[(3, 3, 128, 128), float32] = memory.alloc_tensor(%storage_068, 0 /* ty=int64 */, meta[relay.Constant][68] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][68]) /* ty=Tensor[(3, 3, 128, 128), float32] */; | |
| %441 = fn (%p068: Tensor[(128, 128, 3, 3), float32], %p160: Tensor[(128), float32], Primitive=1) -> Tensor[(3, 3, 128, 128), float32] { | |
| %440 = layout_transform(%p068, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(3, 3, 128, 128), float32] */; | |
| multiply(%440, %p160) /* ty=Tensor[(3, 3, 128, 128), float32] */ | |
| }; | |
| %442 = (%stage2_unit4_conv2_weight, %x135); | |
| %443 = (%tensor_068,); | |
| let %x136: () = vm.invoke_tvm_op(%441, %442, %443) /* ty=() */; | |
| let %x137: Tensor[(3, 3, 128, 128), float32] = %tensor_068; | |
| let %storage_069: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][69]) /* ty=Storage[] */; | |
| let %tensor_069: Tensor[(1, 28, 28, 128), float32] = memory.alloc_tensor(%storage_069, 0 /* ty=int64 */, meta[relay.Constant][69] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][69]) /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| %450 = fn (%p069: Tensor[(1, 28, 28, 128), float32], %p161: Tensor[(3, 3, 128, 128), float32], %p227: Tensor[(128), float32], %p322: Tensor[(128), float32], %p420: Tensor[(128), float32], Primitive=1) -> Tensor[(1, 28, 28, 128), float32] { | |
| %444 = nn.conv2d(%p069, %p161, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| %445 = negative(%p227) /* ty=Tensor[(128), float32] */; | |
| %446 = multiply(%445, %p322) /* ty=Tensor[(128), float32] */; | |
| %447 = add(%446, %p420) /* ty=Tensor[(128), float32] */; | |
| %448 = expand_dims(%447, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 128), float32] */; | |
| %449 = add(%444, %448) /* ty=Tensor[(1, 28, 28, 128), float32] */; | |
| nn.relu(%449) /* ty=Tensor[(1, 28, 28, 128), float32] */ | |
| }; | |
| %451 = (%x133, %x137, %stage2_unit4_bn3_moving_mean, %x135, %stage2_unit4_bn3_beta); | |
| %452 = (%tensor_069,); | |
| let %x138: () = vm.invoke_tvm_op(%450, %451, %452) /* ty=() */; | |
| let %x139: Tensor[(1, 28, 28, 128), float32] = %tensor_069; | |
| let %storage_070: Storage[] = memory.alloc_storage(262144 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][70]) /* ty=Storage[] */; | |
| let %tensor_070: Tensor[(1, 1, 128, 512), float32] = memory.alloc_tensor(%storage_070, 0 /* ty=int64 */, meta[relay.Constant][70] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][70]) /* ty=Tensor[(1, 1, 128, 512), float32] */; | |
| %453 = fn (%p070: Tensor[(512, 128, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 128, 512), float32] { | |
| layout_transform(%p070, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 128, 512), float32] */ | |
| }; | |
| %454 = (%stage2_unit4_conv3_weight,); | |
| %455 = (%tensor_070,); | |
| let %x140: () = vm.invoke_tvm_op(%453, %454, %455) /* ty=() */; | |
| let %x141: Tensor[(1, 1, 128, 512), float32] = %tensor_070; | |
| let %storage_071: Storage[] = memory.alloc_storage(2048 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][71]) /* ty=Storage[] */; | |
| let %tensor_071: Tensor[(512), float32] = memory.alloc_tensor(%storage_071, 0 /* ty=int64 */, meta[relay.Constant][71] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][71]) /* ty=Tensor[(512), float32] */; | |
| %459 = fn (%p071: Tensor[(512), float32], %p162: Tensor[(512), float32], Primitive=1) -> Tensor[(512), float32] { | |
| %456 = add(%p071, 2e-05f /* ty=float32 */) /* ty=Tensor[(512), float32] */; | |
| %457 = sqrt(%456) /* ty=Tensor[(512), float32] */; | |
| %458 = divide(1f /* ty=float32 */, %457) /* ty=Tensor[(512), float32] */; | |
| multiply(%458, %p162) /* ty=Tensor[(512), float32] */ | |
| }; | |
| %460 = (%stage3_unit1_bn1_moving_var, %stage3_unit1_bn1_gamma); | |
| %461 = (%tensor_071,); | |
| let %x142: () = vm.invoke_tvm_op(%459, %460, %461) /* ty=() */; | |
| let %x143: Tensor[(512), float32] = %tensor_071; | |
| let %storage_072: Storage[] = memory.alloc_storage(1605632 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][72]) /* ty=Storage[] */; | |
| let %tensor_072: Tensor[(1, 28, 28, 512), float32] = memory.alloc_tensor(%storage_072, 0 /* ty=int64 */, meta[relay.Constant][72] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][72]) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| %471 = fn (%p072: Tensor[(1, 28, 28, 128), float32], %p163: Tensor[(1, 1, 128, 512), float32], %p228: Tensor[(1, 28, 28, 512), float32], %p323: Tensor[(512), float32], %p421: Tensor[(512), float32], %p51: Tensor[(512), float32], Primitive=1) -> Tensor[(1, 28, 28, 512), float32] { | |
| %462 = nn.conv2d(%p072, %p163, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| %463 = add(%462, %p228) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| %464 = expand_dims(%p323, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 512), float32] */; | |
| %465 = multiply(%463, %464) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| %466 = negative(%p421) /* ty=Tensor[(512), float32] */; | |
| %467 = multiply(%466, %p323) /* ty=Tensor[(512), float32] */; | |
| %468 = add(%467, %p51) /* ty=Tensor[(512), float32] */; | |
| %469 = expand_dims(%468, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 512), float32] */; | |
| %470 = add(%465, %469) /* ty=Tensor[(1, 28, 28, 512), float32] */; | |
| nn.relu(%470) /* ty=Tensor[(1, 28, 28, 512), float32] */ | |
| }; | |
| %472 = (%x139, %x141, %x125, %x143, %stage3_unit1_bn1_moving_mean, %stage3_unit1_bn1_beta); | |
| %473 = (%tensor_072,); | |
| let %x144: () = vm.invoke_tvm_op(%471, %472, %473) /* ty=() */; | |
| let %x145: Tensor[(1, 28, 28, 512), float32] = %tensor_072; | |
| let %storage_073: Storage[] = memory.alloc_storage(1024 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][73]) /* ty=Storage[] */; | |
| let %tensor_073: Tensor[(256), float32] = memory.alloc_tensor(%storage_073, 0 /* ty=int64 */, meta[relay.Constant][73] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][73]) /* ty=Tensor[(256), float32] */; | |
| %477 = fn (%p073: Tensor[(256), float32], %p164: Tensor[(256), float32], Primitive=1) -> Tensor[(256), float32] { | |
| %474 = add(%p073, 2e-05f /* ty=float32 */) /* ty=Tensor[(256), float32] */; | |
| %475 = sqrt(%474) /* ty=Tensor[(256), float32] */; | |
| %476 = divide(1f /* ty=float32 */, %475) /* ty=Tensor[(256), float32] */; | |
| multiply(%476, %p164) /* ty=Tensor[(256), float32] */ | |
| }; | |
| %478 = (%stage3_unit1_bn2_moving_var, %stage3_unit1_bn2_gamma); | |
| %479 = (%tensor_073,); | |
| let %x146: () = vm.invoke_tvm_op(%477, %478, %479) /* ty=() */; | |
| let %x147: Tensor[(256), float32] = %tensor_073; | |
| let %storage_074: Storage[] = memory.alloc_storage(524288 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][74]) /* ty=Storage[] */; | |
| let %tensor_074: Tensor[(1, 1, 512, 256), float32] = memory.alloc_tensor(%storage_074, 0 /* ty=int64 */, meta[relay.Constant][74] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][74]) /* ty=Tensor[(1, 1, 512, 256), float32] */; | |
| %481 = fn (%p074: Tensor[(256, 512, 1, 1), float32], %p165: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 1, 512, 256), float32] { | |
| %480 = layout_transform(%p074, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 512, 256), float32] */; | |
| multiply(%480, %p165) /* ty=Tensor[(1, 1, 512, 256), float32] */ | |
| }; | |
| %482 = (%stage3_unit1_conv1_weight, %x147); | |
| %483 = (%tensor_074,); | |
| let %x148: () = vm.invoke_tvm_op(%481, %482, %483) /* ty=() */; | |
| let %x149: Tensor[(1, 1, 512, 256), float32] = %tensor_074; | |
| let %storage_075: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][75]) /* ty=Storage[] */; | |
| let %tensor_075: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_075, 0 /* ty=int64 */, meta[relay.Constant][75] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][75]) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %490 = fn (%p075: Tensor[(1, 28, 28, 512), float32], %p166: Tensor[(1, 1, 512, 256), float32], %p229: Tensor[(256), float32], %p324: Tensor[(256), float32], %p422: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] { | |
| %484 = nn.conv2d(%p075, %p166, strides=[2, 2], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %485 = negative(%p229) /* ty=Tensor[(256), float32] */; | |
| %486 = multiply(%485, %p324) /* ty=Tensor[(256), float32] */; | |
| %487 = add(%486, %p422) /* ty=Tensor[(256), float32] */; | |
| %488 = expand_dims(%487, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %489 = add(%484, %488) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| nn.relu(%489) /* ty=Tensor[(1, 14, 14, 256), float32] */ | |
| }; | |
| %491 = (%x145, %x149, %stage3_unit1_bn2_moving_mean, %x147, %stage3_unit1_bn2_beta); | |
| %492 = (%tensor_075,); | |
| let %x150: () = vm.invoke_tvm_op(%490, %491, %492) /* ty=() */; | |
| let %x151: Tensor[(1, 14, 14, 256), float32] = %tensor_075; | |
| let %storage_076: Storage[] = memory.alloc_storage(1024 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][76]) /* ty=Storage[] */; | |
| let %tensor_076: Tensor[(256), float32] = memory.alloc_tensor(%storage_076, 0 /* ty=int64 */, meta[relay.Constant][76] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][76]) /* ty=Tensor[(256), float32] */; | |
| %496 = fn (%p076: Tensor[(256), float32], %p167: Tensor[(256), float32], Primitive=1) -> Tensor[(256), float32] { | |
| %493 = add(%p076, 2e-05f /* ty=float32 */) /* ty=Tensor[(256), float32] */; | |
| %494 = sqrt(%493) /* ty=Tensor[(256), float32] */; | |
| %495 = divide(1f /* ty=float32 */, %494) /* ty=Tensor[(256), float32] */; | |
| multiply(%495, %p167) /* ty=Tensor[(256), float32] */ | |
| }; | |
| %497 = (%stage3_unit1_bn3_moving_var, %stage3_unit1_bn3_gamma); | |
| %498 = (%tensor_076,); | |
| let %x152: () = vm.invoke_tvm_op(%496, %497, %498) /* ty=() */; | |
| let %x153: Tensor[(256), float32] = %tensor_076; | |
| let %storage_077: Storage[] = memory.alloc_storage(2359296 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][77]) /* ty=Storage[] */; | |
| let %tensor_077: Tensor[(3, 3, 256, 256), float32] = memory.alloc_tensor(%storage_077, 0 /* ty=int64 */, meta[relay.Constant][77] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][77]) /* ty=Tensor[(3, 3, 256, 256), float32] */; | |
| %500 = fn (%p077: Tensor[(256, 256, 3, 3), float32], %p168: Tensor[(256), float32], Primitive=1) -> Tensor[(3, 3, 256, 256), float32] { | |
| %499 = layout_transform(%p077, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(3, 3, 256, 256), float32] */; | |
| multiply(%499, %p168) /* ty=Tensor[(3, 3, 256, 256), float32] */ | |
| }; | |
| %501 = (%stage3_unit1_conv2_weight, %x153); | |
| %502 = (%tensor_077,); | |
| let %x154: () = vm.invoke_tvm_op(%500, %501, %502) /* ty=() */; | |
| let %x155: Tensor[(3, 3, 256, 256), float32] = %tensor_077; | |
| let %storage_078: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][78]) /* ty=Storage[] */; | |
| let %tensor_078: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_078, 0 /* ty=int64 */, meta[relay.Constant][78] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][78]) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %509 = fn (%p078: Tensor[(1, 14, 14, 256), float32], %p169: Tensor[(3, 3, 256, 256), float32], %p230: Tensor[(256), float32], %p325: Tensor[(256), float32], %p423: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] { | |
| %503 = nn.conv2d(%p078, %p169, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %504 = negative(%p230) /* ty=Tensor[(256), float32] */; | |
| %505 = multiply(%504, %p325) /* ty=Tensor[(256), float32] */; | |
| %506 = add(%505, %p423) /* ty=Tensor[(256), float32] */; | |
| %507 = expand_dims(%506, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %508 = add(%503, %507) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| nn.relu(%508) /* ty=Tensor[(1, 14, 14, 256), float32] */ | |
| }; | |
| %510 = (%x151, %x155, %stage3_unit1_bn3_moving_mean, %x153, %stage3_unit1_bn3_beta); | |
| %511 = (%tensor_078,); | |
| let %x156: () = vm.invoke_tvm_op(%509, %510, %511) /* ty=() */; | |
| let %x157: Tensor[(1, 14, 14, 256), float32] = %tensor_078; | |
| let %storage_079: Storage[] = memory.alloc_storage(1048576 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][79]) /* ty=Storage[] */; | |
| let %tensor_079: Tensor[(1, 1, 256, 1024), float32] = memory.alloc_tensor(%storage_079, 0 /* ty=int64 */, meta[relay.Constant][79] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][79]) /* ty=Tensor[(1, 1, 256, 1024), float32] */; | |
| %512 = fn (%p079: Tensor[(1024, 256, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 256, 1024), float32] { | |
| layout_transform(%p079, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 256, 1024), float32] */ | |
| }; | |
| %513 = (%stage3_unit1_conv3_weight,); | |
| %514 = (%tensor_079,); | |
| let %x158: () = vm.invoke_tvm_op(%512, %513, %514) /* ty=() */; | |
| let %x159: Tensor[(1, 1, 256, 1024), float32] = %tensor_079; | |
| let %storage_080: Storage[] = memory.alloc_storage(2097152 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][80]) /* ty=Storage[] */; | |
| let %tensor_080: Tensor[(1, 1, 512, 1024), float32] = memory.alloc_tensor(%storage_080, 0 /* ty=int64 */, meta[relay.Constant][80] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][80]) /* ty=Tensor[(1, 1, 512, 1024), float32] */; | |
| %515 = fn (%p080: Tensor[(1024, 512, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 512, 1024), float32] { | |
| layout_transform(%p080, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 512, 1024), float32] */ | |
| }; | |
| %516 = (%stage3_unit1_sc_weight,); | |
| %517 = (%tensor_080,); | |
| let %x160: () = vm.invoke_tvm_op(%515, %516, %517) /* ty=() */; | |
| let %x161: Tensor[(1, 1, 512, 1024), float32] = %tensor_080; | |
| let %storage_081: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][81]) /* ty=Storage[] */; | |
| let %tensor_081: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_081, 0 /* ty=int64 */, meta[relay.Constant][81] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][81]) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %518 = fn (%p081: Tensor[(1, 28, 28, 512), float32], %p170: Tensor[(1, 1, 512, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] { | |
| nn.conv2d(%p081, %p170, strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 1024), float32] */ | |
| }; | |
| %519 = (%x145, %x161); | |
| %520 = (%tensor_081,); | |
| let %x162: () = vm.invoke_tvm_op(%518, %519, %520) /* ty=() */; | |
| let %x163: Tensor[(1, 14, 14, 1024), float32] = %tensor_081; | |
| let %storage_082: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][82]) /* ty=Storage[] */; | |
| let %tensor_082: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_082, 0 /* ty=int64 */, meta[relay.Constant][82] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][82]) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %522 = fn (%p082: Tensor[(1, 14, 14, 256), float32], %p171: Tensor[(1, 1, 256, 1024), float32], %p231: Tensor[(1, 14, 14, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] { | |
| %521 = nn.conv2d(%p082, %p171, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| add(%521, %p231) /* ty=Tensor[(1, 14, 14, 1024), float32] */ | |
| }; | |
| %523 = (%x157, %x159, %x163); | |
| %524 = (%tensor_082,); | |
| let %x164: () = vm.invoke_tvm_op(%522, %523, %524) /* ty=() */; | |
| let %x165: Tensor[(1, 14, 14, 1024), float32] = %tensor_082; | |
| let %storage_083: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][83]) /* ty=Storage[] */; | |
| let %tensor_083: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_083, 0 /* ty=int64 */, meta[relay.Constant][83] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][83]) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %536 = fn (%p083: Tensor[(1024), float32], %p172: Tensor[(1024), float32], %p232: Tensor[(1, 14, 14, 1024), float32], %p326: Tensor[(1024), float32], %p424: Tensor[(1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] { | |
| %525 = add(%p083, 2e-05f /* ty=float32 */) /* ty=Tensor[(1024), float32] */; | |
| %526 = sqrt(%525) /* ty=Tensor[(1024), float32] */; | |
| %527 = divide(1f /* ty=float32 */, %526) /* ty=Tensor[(1024), float32] */; | |
| %528 = multiply(%527, %p172) /* ty=Tensor[(1024), float32] */; | |
| %529 = expand_dims(%528, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 1024), float32] */; | |
| %530 = multiply(%p232, %529) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %531 = negative(%p326) /* ty=Tensor[(1024), float32] */; | |
| %532 = multiply(%531, %528) /* ty=Tensor[(1024), float32] */; | |
| %533 = add(%532, %p424) /* ty=Tensor[(1024), float32] */; | |
| %534 = expand_dims(%533, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 1024), float32] */; | |
| %535 = add(%530, %534) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| nn.relu(%535) /* ty=Tensor[(1, 14, 14, 1024), float32] */ | |
| }; | |
| %537 = (%stage3_unit2_bn1_moving_var, %stage3_unit2_bn1_gamma, %x165, %stage3_unit2_bn1_moving_mean, %stage3_unit2_bn1_beta); | |
| %538 = (%tensor_083,); | |
| let %x166: () = vm.invoke_tvm_op(%536, %537, %538) /* ty=() */; | |
| let %x167: Tensor[(1, 14, 14, 1024), float32] = %tensor_083; | |
| let %storage_084: Storage[] = memory.alloc_storage(1024 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][84]) /* ty=Storage[] */; | |
| let %tensor_084: Tensor[(256), float32] = memory.alloc_tensor(%storage_084, 0 /* ty=int64 */, meta[relay.Constant][84] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][84]) /* ty=Tensor[(256), float32] */; | |
| %542 = fn (%p084: Tensor[(256), float32], %p173: Tensor[(256), float32], Primitive=1) -> Tensor[(256), float32] { | |
| %539 = add(%p084, 2e-05f /* ty=float32 */) /* ty=Tensor[(256), float32] */; | |
| %540 = sqrt(%539) /* ty=Tensor[(256), float32] */; | |
| %541 = divide(1f /* ty=float32 */, %540) /* ty=Tensor[(256), float32] */; | |
| multiply(%541, %p173) /* ty=Tensor[(256), float32] */ | |
| }; | |
| %543 = (%stage3_unit2_bn2_moving_var, %stage3_unit2_bn2_gamma); | |
| %544 = (%tensor_084,); | |
| let %x168: () = vm.invoke_tvm_op(%542, %543, %544) /* ty=() */; | |
| let %x169: Tensor[(256), float32] = %tensor_084; | |
| let %storage_085: Storage[] = memory.alloc_storage(1048576 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][85]) /* ty=Storage[] */; | |
| let %tensor_085: Tensor[(1, 1, 1024, 256), float32] = memory.alloc_tensor(%storage_085, 0 /* ty=int64 */, meta[relay.Constant][85] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][85]) /* ty=Tensor[(1, 1, 1024, 256), float32] */; | |
| %546 = fn (%p085: Tensor[(256, 1024, 1, 1), float32], %p174: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 1, 1024, 256), float32] { | |
| %545 = layout_transform(%p085, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 1024, 256), float32] */; | |
| multiply(%545, %p174) /* ty=Tensor[(1, 1, 1024, 256), float32] */ | |
| }; | |
| %547 = (%stage3_unit2_conv1_weight, %x169); | |
| %548 = (%tensor_085,); | |
| let %x170: () = vm.invoke_tvm_op(%546, %547, %548) /* ty=() */; | |
| let %x171: Tensor[(1, 1, 1024, 256), float32] = %tensor_085; | |
| let %storage_086: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][86]) /* ty=Storage[] */; | |
| let %tensor_086: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_086, 0 /* ty=int64 */, meta[relay.Constant][86] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][86]) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %555 = fn (%p086: Tensor[(1, 14, 14, 1024), float32], %p175: Tensor[(1, 1, 1024, 256), float32], %p233: Tensor[(256), float32], %p327: Tensor[(256), float32], %p425: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] { | |
| %549 = nn.conv2d(%p086, %p175, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %550 = negative(%p233) /* ty=Tensor[(256), float32] */; | |
| %551 = multiply(%550, %p327) /* ty=Tensor[(256), float32] */; | |
| %552 = add(%551, %p425) /* ty=Tensor[(256), float32] */; | |
| %553 = expand_dims(%552, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %554 = add(%549, %553) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| nn.relu(%554) /* ty=Tensor[(1, 14, 14, 256), float32] */ | |
| }; | |
| %556 = (%x167, %x171, %stage3_unit2_bn2_moving_mean, %x169, %stage3_unit2_bn2_beta); | |
| %557 = (%tensor_086,); | |
| let %x172: () = vm.invoke_tvm_op(%555, %556, %557) /* ty=() */; | |
| let %x173: Tensor[(1, 14, 14, 256), float32] = %tensor_086; | |
| let %storage_087: Storage[] = memory.alloc_storage(1024 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][87]) /* ty=Storage[] */; | |
| let %tensor_087: Tensor[(256), float32] = memory.alloc_tensor(%storage_087, 0 /* ty=int64 */, meta[relay.Constant][87] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][87]) /* ty=Tensor[(256), float32] */; | |
| %561 = fn (%p087: Tensor[(256), float32], %p176: Tensor[(256), float32], Primitive=1) -> Tensor[(256), float32] { | |
| %558 = add(%p087, 2e-05f /* ty=float32 */) /* ty=Tensor[(256), float32] */; | |
| %559 = sqrt(%558) /* ty=Tensor[(256), float32] */; | |
| %560 = divide(1f /* ty=float32 */, %559) /* ty=Tensor[(256), float32] */; | |
| multiply(%560, %p176) /* ty=Tensor[(256), float32] */ | |
| }; | |
| %562 = (%stage3_unit2_bn3_moving_var, %stage3_unit2_bn3_gamma); | |
| %563 = (%tensor_087,); | |
| let %x174: () = vm.invoke_tvm_op(%561, %562, %563) /* ty=() */; | |
| let %x175: Tensor[(256), float32] = %tensor_087; | |
| let %storage_088: Storage[] = memory.alloc_storage(2359296 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][88]) /* ty=Storage[] */; | |
| let %tensor_088: Tensor[(3, 3, 256, 256), float32] = memory.alloc_tensor(%storage_088, 0 /* ty=int64 */, meta[relay.Constant][88] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][88]) /* ty=Tensor[(3, 3, 256, 256), float32] */; | |
| %565 = fn (%p088: Tensor[(256, 256, 3, 3), float32], %p177: Tensor[(256), float32], Primitive=1) -> Tensor[(3, 3, 256, 256), float32] { | |
| %564 = layout_transform(%p088, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(3, 3, 256, 256), float32] */; | |
| multiply(%564, %p177) /* ty=Tensor[(3, 3, 256, 256), float32] */ | |
| }; | |
| %566 = (%stage3_unit2_conv2_weight, %x175); | |
| %567 = (%tensor_088,); | |
| let %x176: () = vm.invoke_tvm_op(%565, %566, %567) /* ty=() */; | |
| let %x177: Tensor[(3, 3, 256, 256), float32] = %tensor_088; | |
| let %storage_089: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][89]) /* ty=Storage[] */; | |
| let %tensor_089: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_089, 0 /* ty=int64 */, meta[relay.Constant][89] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][89]) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %574 = fn (%p089: Tensor[(1, 14, 14, 256), float32], %p178: Tensor[(3, 3, 256, 256), float32], %p234: Tensor[(256), float32], %p328: Tensor[(256), float32], %p426: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] { | |
| %568 = nn.conv2d(%p089, %p178, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %569 = negative(%p234) /* ty=Tensor[(256), float32] */; | |
| %570 = multiply(%569, %p328) /* ty=Tensor[(256), float32] */; | |
| %571 = add(%570, %p426) /* ty=Tensor[(256), float32] */; | |
| %572 = expand_dims(%571, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %573 = add(%568, %572) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| nn.relu(%573) /* ty=Tensor[(1, 14, 14, 256), float32] */ | |
| }; | |
| %575 = (%x173, %x177, %stage3_unit2_bn3_moving_mean, %x175, %stage3_unit2_bn3_beta); | |
| %576 = (%tensor_089,); | |
| let %x178: () = vm.invoke_tvm_op(%574, %575, %576) /* ty=() */; | |
| let %x179: Tensor[(1, 14, 14, 256), float32] = %tensor_089; | |
| let %storage_090: Storage[] = memory.alloc_storage(1048576 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][90]) /* ty=Storage[] */; | |
| let %tensor_090: Tensor[(1, 1, 256, 1024), float32] = memory.alloc_tensor(%storage_090, 0 /* ty=int64 */, meta[relay.Constant][90] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][90]) /* ty=Tensor[(1, 1, 256, 1024), float32] */; | |
| %577 = fn (%p090: Tensor[(1024, 256, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 256, 1024), float32] { | |
| layout_transform(%p090, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 256, 1024), float32] */ | |
| }; | |
| %578 = (%stage3_unit2_conv3_weight,); | |
| %579 = (%tensor_090,); | |
| let %x180: () = vm.invoke_tvm_op(%577, %578, %579) /* ty=() */; | |
| let %x181: Tensor[(1, 1, 256, 1024), float32] = %tensor_090; | |
| let %storage_091: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][91]) /* ty=Storage[] */; | |
| let %tensor_091: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_091, 0 /* ty=int64 */, meta[relay.Constant][91] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][91]) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %581 = fn (%p091: Tensor[(1, 14, 14, 256), float32], %p179: Tensor[(1, 1, 256, 1024), float32], %p235: Tensor[(1, 14, 14, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] { | |
| %580 = nn.conv2d(%p091, %p179, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| add(%580, %p235) /* ty=Tensor[(1, 14, 14, 1024), float32] */ | |
| }; | |
| %582 = (%x179, %x181, %x165); | |
| %583 = (%tensor_091,); | |
| let %x182: () = vm.invoke_tvm_op(%581, %582, %583) /* ty=() */; | |
| let %x183: Tensor[(1, 14, 14, 1024), float32] = %tensor_091; | |
| let %storage_092: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][92]) /* ty=Storage[] */; | |
| let %tensor_092: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_092, 0 /* ty=int64 */, meta[relay.Constant][92] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][92]) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %595 = fn (%p092: Tensor[(1024), float32], %p180: Tensor[(1024), float32], %p236: Tensor[(1, 14, 14, 1024), float32], %p329: Tensor[(1024), float32], %p427: Tensor[(1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] { | |
| %584 = add(%p092, 2e-05f /* ty=float32 */) /* ty=Tensor[(1024), float32] */; | |
| %585 = sqrt(%584) /* ty=Tensor[(1024), float32] */; | |
| %586 = divide(1f /* ty=float32 */, %585) /* ty=Tensor[(1024), float32] */; | |
| %587 = multiply(%586, %p180) /* ty=Tensor[(1024), float32] */; | |
| %588 = expand_dims(%587, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 1024), float32] */; | |
| %589 = multiply(%p236, %588) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %590 = negative(%p329) /* ty=Tensor[(1024), float32] */; | |
| %591 = multiply(%590, %587) /* ty=Tensor[(1024), float32] */; | |
| %592 = add(%591, %p427) /* ty=Tensor[(1024), float32] */; | |
| %593 = expand_dims(%592, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 1024), float32] */; | |
| %594 = add(%589, %593) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| nn.relu(%594) /* ty=Tensor[(1, 14, 14, 1024), float32] */ | |
| }; | |
| %596 = (%stage3_unit3_bn1_moving_var, %stage3_unit3_bn1_gamma, %x183, %stage3_unit3_bn1_moving_mean, %stage3_unit3_bn1_beta); | |
| %597 = (%tensor_092,); | |
| let %x184: () = vm.invoke_tvm_op(%595, %596, %597) /* ty=() */; | |
| let %x185: Tensor[(1, 14, 14, 1024), float32] = %tensor_092; | |
| let %storage_093: Storage[] = memory.alloc_storage(1024 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][93]) /* ty=Storage[] */; | |
| let %tensor_093: Tensor[(256), float32] = memory.alloc_tensor(%storage_093, 0 /* ty=int64 */, meta[relay.Constant][93] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][93]) /* ty=Tensor[(256), float32] */; | |
| %601 = fn (%p093: Tensor[(256), float32], %p181: Tensor[(256), float32], Primitive=1) -> Tensor[(256), float32] { | |
| %598 = add(%p093, 2e-05f /* ty=float32 */) /* ty=Tensor[(256), float32] */; | |
| %599 = sqrt(%598) /* ty=Tensor[(256), float32] */; | |
| %600 = divide(1f /* ty=float32 */, %599) /* ty=Tensor[(256), float32] */; | |
| multiply(%600, %p181) /* ty=Tensor[(256), float32] */ | |
| }; | |
| %602 = (%stage3_unit3_bn2_moving_var, %stage3_unit3_bn2_gamma); | |
| %603 = (%tensor_093,); | |
| let %x186: () = vm.invoke_tvm_op(%601, %602, %603) /* ty=() */; | |
| let %x187: Tensor[(256), float32] = %tensor_093; | |
| let %storage_094: Storage[] = memory.alloc_storage(1048576 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][94]) /* ty=Storage[] */; | |
| let %tensor_094: Tensor[(1, 1, 1024, 256), float32] = memory.alloc_tensor(%storage_094, 0 /* ty=int64 */, meta[relay.Constant][94] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][94]) /* ty=Tensor[(1, 1, 1024, 256), float32] */; | |
| %605 = fn (%p094: Tensor[(256, 1024, 1, 1), float32], %p182: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 1, 1024, 256), float32] { | |
| %604 = layout_transform(%p094, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 1024, 256), float32] */; | |
| multiply(%604, %p182) /* ty=Tensor[(1, 1, 1024, 256), float32] */ | |
| }; | |
| %606 = (%stage3_unit3_conv1_weight, %x187); | |
| %607 = (%tensor_094,); | |
| let %x188: () = vm.invoke_tvm_op(%605, %606, %607) /* ty=() */; | |
| let %x189: Tensor[(1, 1, 1024, 256), float32] = %tensor_094; | |
| let %storage_095: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][95]) /* ty=Storage[] */; | |
| let %tensor_095: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_095, 0 /* ty=int64 */, meta[relay.Constant][95] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][95]) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %614 = fn (%p095: Tensor[(1, 14, 14, 1024), float32], %p183: Tensor[(1, 1, 1024, 256), float32], %p237: Tensor[(256), float32], %p330: Tensor[(256), float32], %p428: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] { | |
| %608 = nn.conv2d(%p095, %p183, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %609 = negative(%p237) /* ty=Tensor[(256), float32] */; | |
| %610 = multiply(%609, %p330) /* ty=Tensor[(256), float32] */; | |
| %611 = add(%610, %p428) /* ty=Tensor[(256), float32] */; | |
| %612 = expand_dims(%611, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %613 = add(%608, %612) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| nn.relu(%613) /* ty=Tensor[(1, 14, 14, 256), float32] */ | |
| }; | |
| %615 = (%x185, %x189, %stage3_unit3_bn2_moving_mean, %x187, %stage3_unit3_bn2_beta); | |
| %616 = (%tensor_095,); | |
| let %x190: () = vm.invoke_tvm_op(%614, %615, %616) /* ty=() */; | |
| let %x191: Tensor[(1, 14, 14, 256), float32] = %tensor_095; | |
| let %storage_096: Storage[] = memory.alloc_storage(1024 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][96]) /* ty=Storage[] */; | |
| let %tensor_096: Tensor[(256), float32] = memory.alloc_tensor(%storage_096, 0 /* ty=int64 */, meta[relay.Constant][96] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][96]) /* ty=Tensor[(256), float32] */; | |
| %620 = fn (%p096: Tensor[(256), float32], %p184: Tensor[(256), float32], Primitive=1) -> Tensor[(256), float32] { | |
| %617 = add(%p096, 2e-05f /* ty=float32 */) /* ty=Tensor[(256), float32] */; | |
| %618 = sqrt(%617) /* ty=Tensor[(256), float32] */; | |
| %619 = divide(1f /* ty=float32 */, %618) /* ty=Tensor[(256), float32] */; | |
| multiply(%619, %p184) /* ty=Tensor[(256), float32] */ | |
| }; | |
| %621 = (%stage3_unit3_bn3_moving_var, %stage3_unit3_bn3_gamma); | |
| %622 = (%tensor_096,); | |
| let %x192: () = vm.invoke_tvm_op(%620, %621, %622) /* ty=() */; | |
| let %x193: Tensor[(256), float32] = %tensor_096; | |
| let %storage_097: Storage[] = memory.alloc_storage(2359296 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][97]) /* ty=Storage[] */; | |
| let %tensor_097: Tensor[(3, 3, 256, 256), float32] = memory.alloc_tensor(%storage_097, 0 /* ty=int64 */, meta[relay.Constant][97] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][97]) /* ty=Tensor[(3, 3, 256, 256), float32] */; | |
| %624 = fn (%p097: Tensor[(256, 256, 3, 3), float32], %p185: Tensor[(256), float32], Primitive=1) -> Tensor[(3, 3, 256, 256), float32] { | |
| %623 = layout_transform(%p097, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(3, 3, 256, 256), float32] */; | |
| multiply(%623, %p185) /* ty=Tensor[(3, 3, 256, 256), float32] */ | |
| }; | |
| %625 = (%stage3_unit3_conv2_weight, %x193); | |
| %626 = (%tensor_097,); | |
| let %x194: () = vm.invoke_tvm_op(%624, %625, %626) /* ty=() */; | |
| let %x195: Tensor[(3, 3, 256, 256), float32] = %tensor_097; | |
| let %storage_098: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][98]) /* ty=Storage[] */; | |
| let %tensor_098: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_098, 0 /* ty=int64 */, meta[relay.Constant][98] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][98]) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %633 = fn (%p098: Tensor[(1, 14, 14, 256), float32], %p186: Tensor[(3, 3, 256, 256), float32], %p238: Tensor[(256), float32], %p331: Tensor[(256), float32], %p429: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] { | |
| %627 = nn.conv2d(%p098, %p186, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %628 = negative(%p238) /* ty=Tensor[(256), float32] */; | |
| %629 = multiply(%628, %p331) /* ty=Tensor[(256), float32] */; | |
| %630 = add(%629, %p429) /* ty=Tensor[(256), float32] */; | |
| %631 = expand_dims(%630, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %632 = add(%627, %631) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| nn.relu(%632) /* ty=Tensor[(1, 14, 14, 256), float32] */ | |
| }; | |
| %634 = (%x191, %x195, %stage3_unit3_bn3_moving_mean, %x193, %stage3_unit3_bn3_beta); | |
| %635 = (%tensor_098,); | |
| let %x196: () = vm.invoke_tvm_op(%633, %634, %635) /* ty=() */; | |
| let %x197: Tensor[(1, 14, 14, 256), float32] = %tensor_098; | |
| let %storage_099: Storage[] = memory.alloc_storage(1048576 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][99]) /* ty=Storage[] */; | |
| let %tensor_099: Tensor[(1, 1, 256, 1024), float32] = memory.alloc_tensor(%storage_099, 0 /* ty=int64 */, meta[relay.Constant][99] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][99]) /* ty=Tensor[(1, 1, 256, 1024), float32] */; | |
| %636 = fn (%p099: Tensor[(1024, 256, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 256, 1024), float32] { | |
| layout_transform(%p099, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 256, 1024), float32] */ | |
| }; | |
| %637 = (%stage3_unit3_conv3_weight,); | |
| %638 = (%tensor_099,); | |
| let %x198: () = vm.invoke_tvm_op(%636, %637, %638) /* ty=() */; | |
| let %x199: Tensor[(1, 1, 256, 1024), float32] = %tensor_099; | |
| let %storage_0100: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][100]) /* ty=Storage[] */; | |
| let %tensor_0100: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_0100, 0 /* ty=int64 */, meta[relay.Constant][100] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][100]) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %640 = fn (%p0100: Tensor[(1, 14, 14, 256), float32], %p187: Tensor[(1, 1, 256, 1024), float32], %p239: Tensor[(1, 14, 14, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] { | |
| %639 = nn.conv2d(%p0100, %p187, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| add(%639, %p239) /* ty=Tensor[(1, 14, 14, 1024), float32] */ | |
| }; | |
| %641 = (%x197, %x199, %x183); | |
| %642 = (%tensor_0100,); | |
| let %x200: () = vm.invoke_tvm_op(%640, %641, %642) /* ty=() */; | |
| let %x201: Tensor[(1, 14, 14, 1024), float32] = %tensor_0100; | |
| let %storage_0101: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][101]) /* ty=Storage[] */; | |
| let %tensor_0101: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_0101, 0 /* ty=int64 */, meta[relay.Constant][101] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][101]) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %654 = fn (%p0101: Tensor[(1024), float32], %p188: Tensor[(1024), float32], %p240: Tensor[(1, 14, 14, 1024), float32], %p332: Tensor[(1024), float32], %p430: Tensor[(1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] { | |
| %643 = add(%p0101, 2e-05f /* ty=float32 */) /* ty=Tensor[(1024), float32] */; | |
| %644 = sqrt(%643) /* ty=Tensor[(1024), float32] */; | |
| %645 = divide(1f /* ty=float32 */, %644) /* ty=Tensor[(1024), float32] */; | |
| %646 = multiply(%645, %p188) /* ty=Tensor[(1024), float32] */; | |
| %647 = expand_dims(%646, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 1024), float32] */; | |
| %648 = multiply(%p240, %647) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %649 = negative(%p332) /* ty=Tensor[(1024), float32] */; | |
| %650 = multiply(%649, %646) /* ty=Tensor[(1024), float32] */; | |
| %651 = add(%650, %p430) /* ty=Tensor[(1024), float32] */; | |
| %652 = expand_dims(%651, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 1024), float32] */; | |
| %653 = add(%648, %652) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| nn.relu(%653) /* ty=Tensor[(1, 14, 14, 1024), float32] */ | |
| }; | |
| %655 = (%stage3_unit4_bn1_moving_var, %stage3_unit4_bn1_gamma, %x201, %stage3_unit4_bn1_moving_mean, %stage3_unit4_bn1_beta); | |
| %656 = (%tensor_0101,); | |
| let %x202: () = vm.invoke_tvm_op(%654, %655, %656) /* ty=() */; | |
| let %x203: Tensor[(1, 14, 14, 1024), float32] = %tensor_0101; | |
| let %storage_0102: Storage[] = memory.alloc_storage(1024 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][102]) /* ty=Storage[] */; | |
| let %tensor_0102: Tensor[(256), float32] = memory.alloc_tensor(%storage_0102, 0 /* ty=int64 */, meta[relay.Constant][102] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][102]) /* ty=Tensor[(256), float32] */; | |
| %660 = fn (%p0102: Tensor[(256), float32], %p189: Tensor[(256), float32], Primitive=1) -> Tensor[(256), float32] { | |
| %657 = add(%p0102, 2e-05f /* ty=float32 */) /* ty=Tensor[(256), float32] */; | |
| %658 = sqrt(%657) /* ty=Tensor[(256), float32] */; | |
| %659 = divide(1f /* ty=float32 */, %658) /* ty=Tensor[(256), float32] */; | |
| multiply(%659, %p189) /* ty=Tensor[(256), float32] */ | |
| }; | |
| %661 = (%stage3_unit4_bn2_moving_var, %stage3_unit4_bn2_gamma); | |
| %662 = (%tensor_0102,); | |
| let %x204: () = vm.invoke_tvm_op(%660, %661, %662) /* ty=() */; | |
| let %x205: Tensor[(256), float32] = %tensor_0102; | |
| let %storage_0103: Storage[] = memory.alloc_storage(1048576 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][103]) /* ty=Storage[] */; | |
| let %tensor_0103: Tensor[(1, 1, 1024, 256), float32] = memory.alloc_tensor(%storage_0103, 0 /* ty=int64 */, meta[relay.Constant][103] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][103]) /* ty=Tensor[(1, 1, 1024, 256), float32] */; | |
| %664 = fn (%p0103: Tensor[(256, 1024, 1, 1), float32], %p190: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 1, 1024, 256), float32] { | |
| %663 = layout_transform(%p0103, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 1024, 256), float32] */; | |
| multiply(%663, %p190) /* ty=Tensor[(1, 1, 1024, 256), float32] */ | |
| }; | |
| %665 = (%stage3_unit4_conv1_weight, %x205); | |
| %666 = (%tensor_0103,); | |
| let %x206: () = vm.invoke_tvm_op(%664, %665, %666) /* ty=() */; | |
| let %x207: Tensor[(1, 1, 1024, 256), float32] = %tensor_0103; | |
| let %storage_0104: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][104]) /* ty=Storage[] */; | |
| let %tensor_0104: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_0104, 0 /* ty=int64 */, meta[relay.Constant][104] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][104]) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %673 = fn (%p0104: Tensor[(1, 14, 14, 1024), float32], %p191: Tensor[(1, 1, 1024, 256), float32], %p241: Tensor[(256), float32], %p333: Tensor[(256), float32], %p431: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] { | |
| %667 = nn.conv2d(%p0104, %p191, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %668 = negative(%p241) /* ty=Tensor[(256), float32] */; | |
| %669 = multiply(%668, %p333) /* ty=Tensor[(256), float32] */; | |
| %670 = add(%669, %p431) /* ty=Tensor[(256), float32] */; | |
| %671 = expand_dims(%670, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %672 = add(%667, %671) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| nn.relu(%672) /* ty=Tensor[(1, 14, 14, 256), float32] */ | |
| }; | |
| %674 = (%x203, %x207, %stage3_unit4_bn2_moving_mean, %x205, %stage3_unit4_bn2_beta); | |
| %675 = (%tensor_0104,); | |
| let %x208: () = vm.invoke_tvm_op(%673, %674, %675) /* ty=() */; | |
| let %x209: Tensor[(1, 14, 14, 256), float32] = %tensor_0104; | |
| let %storage_0105: Storage[] = memory.alloc_storage(1024 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][105]) /* ty=Storage[] */; | |
| let %tensor_0105: Tensor[(256), float32] = memory.alloc_tensor(%storage_0105, 0 /* ty=int64 */, meta[relay.Constant][105] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][105]) /* ty=Tensor[(256), float32] */; | |
| %679 = fn (%p0105: Tensor[(256), float32], %p192: Tensor[(256), float32], Primitive=1) -> Tensor[(256), float32] { | |
| %676 = add(%p0105, 2e-05f /* ty=float32 */) /* ty=Tensor[(256), float32] */; | |
| %677 = sqrt(%676) /* ty=Tensor[(256), float32] */; | |
| %678 = divide(1f /* ty=float32 */, %677) /* ty=Tensor[(256), float32] */; | |
| multiply(%678, %p192) /* ty=Tensor[(256), float32] */ | |
| }; | |
| %680 = (%stage3_unit4_bn3_moving_var, %stage3_unit4_bn3_gamma); | |
| %681 = (%tensor_0105,); | |
| let %x210: () = vm.invoke_tvm_op(%679, %680, %681) /* ty=() */; | |
| let %x211: Tensor[(256), float32] = %tensor_0105; | |
| let %storage_0106: Storage[] = memory.alloc_storage(2359296 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][106]) /* ty=Storage[] */; | |
| let %tensor_0106: Tensor[(3, 3, 256, 256), float32] = memory.alloc_tensor(%storage_0106, 0 /* ty=int64 */, meta[relay.Constant][106] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][106]) /* ty=Tensor[(3, 3, 256, 256), float32] */; | |
| %683 = fn (%p0106: Tensor[(256, 256, 3, 3), float32], %p193: Tensor[(256), float32], Primitive=1) -> Tensor[(3, 3, 256, 256), float32] { | |
| %682 = layout_transform(%p0106, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(3, 3, 256, 256), float32] */; | |
| multiply(%682, %p193) /* ty=Tensor[(3, 3, 256, 256), float32] */ | |
| }; | |
| %684 = (%stage3_unit4_conv2_weight, %x211); | |
| %685 = (%tensor_0106,); | |
| let %x212: () = vm.invoke_tvm_op(%683, %684, %685) /* ty=() */; | |
| let %x213: Tensor[(3, 3, 256, 256), float32] = %tensor_0106; | |
| let %storage_0107: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][107]) /* ty=Storage[] */; | |
| let %tensor_0107: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_0107, 0 /* ty=int64 */, meta[relay.Constant][107] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][107]) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %692 = fn (%p0107: Tensor[(1, 14, 14, 256), float32], %p194: Tensor[(3, 3, 256, 256), float32], %p242: Tensor[(256), float32], %p334: Tensor[(256), float32], %p432: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] { | |
| %686 = nn.conv2d(%p0107, %p194, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %687 = negative(%p242) /* ty=Tensor[(256), float32] */; | |
| %688 = multiply(%687, %p334) /* ty=Tensor[(256), float32] */; | |
| %689 = add(%688, %p432) /* ty=Tensor[(256), float32] */; | |
| %690 = expand_dims(%689, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %691 = add(%686, %690) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| nn.relu(%691) /* ty=Tensor[(1, 14, 14, 256), float32] */ | |
| }; | |
| %693 = (%x209, %x213, %stage3_unit4_bn3_moving_mean, %x211, %stage3_unit4_bn3_beta); | |
| %694 = (%tensor_0107,); | |
| let %x214: () = vm.invoke_tvm_op(%692, %693, %694) /* ty=() */; | |
| let %x215: Tensor[(1, 14, 14, 256), float32] = %tensor_0107; | |
| let %storage_0108: Storage[] = memory.alloc_storage(1048576 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][108]) /* ty=Storage[] */; | |
| let %tensor_0108: Tensor[(1, 1, 256, 1024), float32] = memory.alloc_tensor(%storage_0108, 0 /* ty=int64 */, meta[relay.Constant][108] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][108]) /* ty=Tensor[(1, 1, 256, 1024), float32] */; | |
| %695 = fn (%p0108: Tensor[(1024, 256, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 256, 1024), float32] { | |
| layout_transform(%p0108, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 256, 1024), float32] */ | |
| }; | |
| %696 = (%stage3_unit4_conv3_weight,); | |
| %697 = (%tensor_0108,); | |
| let %x216: () = vm.invoke_tvm_op(%695, %696, %697) /* ty=() */; | |
| let %x217: Tensor[(1, 1, 256, 1024), float32] = %tensor_0108; | |
| let %storage_0109: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][109]) /* ty=Storage[] */; | |
| let %tensor_0109: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_0109, 0 /* ty=int64 */, meta[relay.Constant][109] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][109]) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %699 = fn (%p0109: Tensor[(1, 14, 14, 256), float32], %p195: Tensor[(1, 1, 256, 1024), float32], %p243: Tensor[(1, 14, 14, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] { | |
| %698 = nn.conv2d(%p0109, %p195, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| add(%698, %p243) /* ty=Tensor[(1, 14, 14, 1024), float32] */ | |
| }; | |
| %700 = (%x215, %x217, %x201); | |
| %701 = (%tensor_0109,); | |
| let %x218: () = vm.invoke_tvm_op(%699, %700, %701) /* ty=() */; | |
| let %x219: Tensor[(1, 14, 14, 1024), float32] = %tensor_0109; | |
| let %storage_0110: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][110]) /* ty=Storage[] */; | |
| let %tensor_0110: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_0110, 0 /* ty=int64 */, meta[relay.Constant][110] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][110]) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %713 = fn (%p0110: Tensor[(1024), float32], %p196: Tensor[(1024), float32], %p244: Tensor[(1, 14, 14, 1024), float32], %p335: Tensor[(1024), float32], %p433: Tensor[(1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] { | |
| %702 = add(%p0110, 2e-05f /* ty=float32 */) /* ty=Tensor[(1024), float32] */; | |
| %703 = sqrt(%702) /* ty=Tensor[(1024), float32] */; | |
| %704 = divide(1f /* ty=float32 */, %703) /* ty=Tensor[(1024), float32] */; | |
| %705 = multiply(%704, %p196) /* ty=Tensor[(1024), float32] */; | |
| %706 = expand_dims(%705, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 1024), float32] */; | |
| %707 = multiply(%p244, %706) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %708 = negative(%p335) /* ty=Tensor[(1024), float32] */; | |
| %709 = multiply(%708, %705) /* ty=Tensor[(1024), float32] */; | |
| %710 = add(%709, %p433) /* ty=Tensor[(1024), float32] */; | |
| %711 = expand_dims(%710, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 1024), float32] */; | |
| %712 = add(%707, %711) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| nn.relu(%712) /* ty=Tensor[(1, 14, 14, 1024), float32] */ | |
| }; | |
| %714 = (%stage3_unit5_bn1_moving_var, %stage3_unit5_bn1_gamma, %x219, %stage3_unit5_bn1_moving_mean, %stage3_unit5_bn1_beta); | |
| %715 = (%tensor_0110,); | |
| let %x220: () = vm.invoke_tvm_op(%713, %714, %715) /* ty=() */; | |
| let %x221: Tensor[(1, 14, 14, 1024), float32] = %tensor_0110; | |
| let %storage_0111: Storage[] = memory.alloc_storage(1024 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][111]) /* ty=Storage[] */; | |
| let %tensor_0111: Tensor[(256), float32] = memory.alloc_tensor(%storage_0111, 0 /* ty=int64 */, meta[relay.Constant][111] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][111]) /* ty=Tensor[(256), float32] */; | |
| %719 = fn (%p0111: Tensor[(256), float32], %p197: Tensor[(256), float32], Primitive=1) -> Tensor[(256), float32] { | |
| %716 = add(%p0111, 2e-05f /* ty=float32 */) /* ty=Tensor[(256), float32] */; | |
| %717 = sqrt(%716) /* ty=Tensor[(256), float32] */; | |
| %718 = divide(1f /* ty=float32 */, %717) /* ty=Tensor[(256), float32] */; | |
| multiply(%718, %p197) /* ty=Tensor[(256), float32] */ | |
| }; | |
| %720 = (%stage3_unit5_bn2_moving_var, %stage3_unit5_bn2_gamma); | |
| %721 = (%tensor_0111,); | |
| let %x222: () = vm.invoke_tvm_op(%719, %720, %721) /* ty=() */; | |
| let %x223: Tensor[(256), float32] = %tensor_0111; | |
| let %storage_0112: Storage[] = memory.alloc_storage(1048576 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][112]) /* ty=Storage[] */; | |
| let %tensor_0112: Tensor[(1, 1, 1024, 256), float32] = memory.alloc_tensor(%storage_0112, 0 /* ty=int64 */, meta[relay.Constant][112] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][112]) /* ty=Tensor[(1, 1, 1024, 256), float32] */; | |
| %723 = fn (%p0112: Tensor[(256, 1024, 1, 1), float32], %p198: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 1, 1024, 256), float32] { | |
| %722 = layout_transform(%p0112, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 1024, 256), float32] */; | |
| multiply(%722, %p198) /* ty=Tensor[(1, 1, 1024, 256), float32] */ | |
| }; | |
| %724 = (%stage3_unit5_conv1_weight, %x223); | |
| %725 = (%tensor_0112,); | |
| let %x224: () = vm.invoke_tvm_op(%723, %724, %725) /* ty=() */; | |
| let %x225: Tensor[(1, 1, 1024, 256), float32] = %tensor_0112; | |
| let %storage_0113: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][113]) /* ty=Storage[] */; | |
| let %tensor_0113: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_0113, 0 /* ty=int64 */, meta[relay.Constant][113] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][113]) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %732 = fn (%p0113: Tensor[(1, 14, 14, 1024), float32], %p199: Tensor[(1, 1, 1024, 256), float32], %p245: Tensor[(256), float32], %p336: Tensor[(256), float32], %p434: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] { | |
| %726 = nn.conv2d(%p0113, %p199, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %727 = negative(%p245) /* ty=Tensor[(256), float32] */; | |
| %728 = multiply(%727, %p336) /* ty=Tensor[(256), float32] */; | |
| %729 = add(%728, %p434) /* ty=Tensor[(256), float32] */; | |
| %730 = expand_dims(%729, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %731 = add(%726, %730) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| nn.relu(%731) /* ty=Tensor[(1, 14, 14, 256), float32] */ | |
| }; | |
| %733 = (%x221, %x225, %stage3_unit5_bn2_moving_mean, %x223, %stage3_unit5_bn2_beta); | |
| %734 = (%tensor_0113,); | |
| let %x226: () = vm.invoke_tvm_op(%732, %733, %734) /* ty=() */; | |
| let %x227: Tensor[(1, 14, 14, 256), float32] = %tensor_0113; | |
| let %storage_0114: Storage[] = memory.alloc_storage(1024 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][114]) /* ty=Storage[] */; | |
| let %tensor_0114: Tensor[(256), float32] = memory.alloc_tensor(%storage_0114, 0 /* ty=int64 */, meta[relay.Constant][114] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][114]) /* ty=Tensor[(256), float32] */; | |
| %738 = fn (%p0114: Tensor[(256), float32], %p1100: Tensor[(256), float32], Primitive=1) -> Tensor[(256), float32] { | |
| %735 = add(%p0114, 2e-05f /* ty=float32 */) /* ty=Tensor[(256), float32] */; | |
| %736 = sqrt(%735) /* ty=Tensor[(256), float32] */; | |
| %737 = divide(1f /* ty=float32 */, %736) /* ty=Tensor[(256), float32] */; | |
| multiply(%737, %p1100) /* ty=Tensor[(256), float32] */ | |
| }; | |
| %739 = (%stage3_unit5_bn3_moving_var, %stage3_unit5_bn3_gamma); | |
| %740 = (%tensor_0114,); | |
| let %x228: () = vm.invoke_tvm_op(%738, %739, %740) /* ty=() */; | |
| let %x229: Tensor[(256), float32] = %tensor_0114; | |
| let %storage_0115: Storage[] = memory.alloc_storage(2359296 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][115]) /* ty=Storage[] */; | |
| let %tensor_0115: Tensor[(3, 3, 256, 256), float32] = memory.alloc_tensor(%storage_0115, 0 /* ty=int64 */, meta[relay.Constant][115] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][115]) /* ty=Tensor[(3, 3, 256, 256), float32] */; | |
| %742 = fn (%p0115: Tensor[(256, 256, 3, 3), float32], %p1101: Tensor[(256), float32], Primitive=1) -> Tensor[(3, 3, 256, 256), float32] { | |
| %741 = layout_transform(%p0115, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(3, 3, 256, 256), float32] */; | |
| multiply(%741, %p1101) /* ty=Tensor[(3, 3, 256, 256), float32] */ | |
| }; | |
| %743 = (%stage3_unit5_conv2_weight, %x229); | |
| %744 = (%tensor_0115,); | |
| let %x230: () = vm.invoke_tvm_op(%742, %743, %744) /* ty=() */; | |
| let %x231: Tensor[(3, 3, 256, 256), float32] = %tensor_0115; | |
| let %storage_0116: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][116]) /* ty=Storage[] */; | |
| let %tensor_0116: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_0116, 0 /* ty=int64 */, meta[relay.Constant][116] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][116]) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %751 = fn (%p0116: Tensor[(1, 14, 14, 256), float32], %p1102: Tensor[(3, 3, 256, 256), float32], %p246: Tensor[(256), float32], %p337: Tensor[(256), float32], %p435: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] { | |
| %745 = nn.conv2d(%p0116, %p1102, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %746 = negative(%p246) /* ty=Tensor[(256), float32] */; | |
| %747 = multiply(%746, %p337) /* ty=Tensor[(256), float32] */; | |
| %748 = add(%747, %p435) /* ty=Tensor[(256), float32] */; | |
| %749 = expand_dims(%748, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %750 = add(%745, %749) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| nn.relu(%750) /* ty=Tensor[(1, 14, 14, 256), float32] */ | |
| }; | |
| %752 = (%x227, %x231, %stage3_unit5_bn3_moving_mean, %x229, %stage3_unit5_bn3_beta); | |
| %753 = (%tensor_0116,); | |
| let %x232: () = vm.invoke_tvm_op(%751, %752, %753) /* ty=() */; | |
| let %x233: Tensor[(1, 14, 14, 256), float32] = %tensor_0116; | |
| let %storage_0117: Storage[] = memory.alloc_storage(1048576 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][117]) /* ty=Storage[] */; | |
| let %tensor_0117: Tensor[(1, 1, 256, 1024), float32] = memory.alloc_tensor(%storage_0117, 0 /* ty=int64 */, meta[relay.Constant][117] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][117]) /* ty=Tensor[(1, 1, 256, 1024), float32] */; | |
| %754 = fn (%p0117: Tensor[(1024, 256, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 256, 1024), float32] { | |
| layout_transform(%p0117, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 256, 1024), float32] */ | |
| }; | |
| %755 = (%stage3_unit5_conv3_weight,); | |
| %756 = (%tensor_0117,); | |
| let %x234: () = vm.invoke_tvm_op(%754, %755, %756) /* ty=() */; | |
| let %x235: Tensor[(1, 1, 256, 1024), float32] = %tensor_0117; | |
| let %storage_0118: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][118]) /* ty=Storage[] */; | |
| let %tensor_0118: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_0118, 0 /* ty=int64 */, meta[relay.Constant][118] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][118]) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %758 = fn (%p0118: Tensor[(1, 14, 14, 256), float32], %p1103: Tensor[(1, 1, 256, 1024), float32], %p247: Tensor[(1, 14, 14, 1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] { | |
| %757 = nn.conv2d(%p0118, %p1103, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| add(%757, %p247) /* ty=Tensor[(1, 14, 14, 1024), float32] */ | |
| }; | |
| %759 = (%x233, %x235, %x219); | |
| %760 = (%tensor_0118,); | |
| let %x236: () = vm.invoke_tvm_op(%758, %759, %760) /* ty=() */; | |
| let %x237: Tensor[(1, 14, 14, 1024), float32] = %tensor_0118; | |
| let %storage_0119: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][119]) /* ty=Storage[] */; | |
| let %tensor_0119: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_0119, 0 /* ty=int64 */, meta[relay.Constant][119] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][119]) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %772 = fn (%p0119: Tensor[(1024), float32], %p1104: Tensor[(1024), float32], %p248: Tensor[(1, 14, 14, 1024), float32], %p338: Tensor[(1024), float32], %p436: Tensor[(1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] { | |
| %761 = add(%p0119, 2e-05f /* ty=float32 */) /* ty=Tensor[(1024), float32] */; | |
| %762 = sqrt(%761) /* ty=Tensor[(1024), float32] */; | |
| %763 = divide(1f /* ty=float32 */, %762) /* ty=Tensor[(1024), float32] */; | |
| %764 = multiply(%763, %p1104) /* ty=Tensor[(1024), float32] */; | |
| %765 = expand_dims(%764, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 1024), float32] */; | |
| %766 = multiply(%p248, %765) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %767 = negative(%p338) /* ty=Tensor[(1024), float32] */; | |
| %768 = multiply(%767, %764) /* ty=Tensor[(1024), float32] */; | |
| %769 = add(%768, %p436) /* ty=Tensor[(1024), float32] */; | |
| %770 = expand_dims(%769, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 1024), float32] */; | |
| %771 = add(%766, %770) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| nn.relu(%771) /* ty=Tensor[(1, 14, 14, 1024), float32] */ | |
| }; | |
| %773 = (%stage3_unit6_bn1_moving_var, %stage3_unit6_bn1_gamma, %x237, %stage3_unit6_bn1_moving_mean, %stage3_unit6_bn1_beta); | |
| %774 = (%tensor_0119,); | |
| let %x238: () = vm.invoke_tvm_op(%772, %773, %774) /* ty=() */; | |
| let %x239: Tensor[(1, 14, 14, 1024), float32] = %tensor_0119; | |
| let %storage_0120: Storage[] = memory.alloc_storage(1024 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][120]) /* ty=Storage[] */; | |
| let %tensor_0120: Tensor[(256), float32] = memory.alloc_tensor(%storage_0120, 0 /* ty=int64 */, meta[relay.Constant][120] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][120]) /* ty=Tensor[(256), float32] */; | |
| %778 = fn (%p0120: Tensor[(256), float32], %p1105: Tensor[(256), float32], Primitive=1) -> Tensor[(256), float32] { | |
| %775 = add(%p0120, 2e-05f /* ty=float32 */) /* ty=Tensor[(256), float32] */; | |
| %776 = sqrt(%775) /* ty=Tensor[(256), float32] */; | |
| %777 = divide(1f /* ty=float32 */, %776) /* ty=Tensor[(256), float32] */; | |
| multiply(%777, %p1105) /* ty=Tensor[(256), float32] */ | |
| }; | |
| %779 = (%stage3_unit6_bn2_moving_var, %stage3_unit6_bn2_gamma); | |
| %780 = (%tensor_0120,); | |
| let %x240: () = vm.invoke_tvm_op(%778, %779, %780) /* ty=() */; | |
| let %x241: Tensor[(256), float32] = %tensor_0120; | |
| let %storage_0121: Storage[] = memory.alloc_storage(1048576 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][121]) /* ty=Storage[] */; | |
| let %tensor_0121: Tensor[(1, 1, 1024, 256), float32] = memory.alloc_tensor(%storage_0121, 0 /* ty=int64 */, meta[relay.Constant][121] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][121]) /* ty=Tensor[(1, 1, 1024, 256), float32] */; | |
| %782 = fn (%p0121: Tensor[(256, 1024, 1, 1), float32], %p1106: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 1, 1024, 256), float32] { | |
| %781 = layout_transform(%p0121, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 1024, 256), float32] */; | |
| multiply(%781, %p1106) /* ty=Tensor[(1, 1, 1024, 256), float32] */ | |
| }; | |
| %783 = (%stage3_unit6_conv1_weight, %x241); | |
| %784 = (%tensor_0121,); | |
| let %x242: () = vm.invoke_tvm_op(%782, %783, %784) /* ty=() */; | |
| let %x243: Tensor[(1, 1, 1024, 256), float32] = %tensor_0121; | |
| let %storage_0122: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][122]) /* ty=Storage[] */; | |
| let %tensor_0122: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_0122, 0 /* ty=int64 */, meta[relay.Constant][122] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][122]) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %791 = fn (%p0122: Tensor[(1, 14, 14, 1024), float32], %p1107: Tensor[(1, 1, 1024, 256), float32], %p249: Tensor[(256), float32], %p339: Tensor[(256), float32], %p437: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] { | |
| %785 = nn.conv2d(%p0122, %p1107, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %786 = negative(%p249) /* ty=Tensor[(256), float32] */; | |
| %787 = multiply(%786, %p339) /* ty=Tensor[(256), float32] */; | |
| %788 = add(%787, %p437) /* ty=Tensor[(256), float32] */; | |
| %789 = expand_dims(%788, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %790 = add(%785, %789) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| nn.relu(%790) /* ty=Tensor[(1, 14, 14, 256), float32] */ | |
| }; | |
| %792 = (%x239, %x243, %stage3_unit6_bn2_moving_mean, %x241, %stage3_unit6_bn2_beta); | |
| %793 = (%tensor_0122,); | |
| let %x244: () = vm.invoke_tvm_op(%791, %792, %793) /* ty=() */; | |
| let %x245: Tensor[(1, 14, 14, 256), float32] = %tensor_0122; | |
| let %storage_0123: Storage[] = memory.alloc_storage(1024 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][123]) /* ty=Storage[] */; | |
| let %tensor_0123: Tensor[(256), float32] = memory.alloc_tensor(%storage_0123, 0 /* ty=int64 */, meta[relay.Constant][123] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][123]) /* ty=Tensor[(256), float32] */; | |
| %797 = fn (%p0123: Tensor[(256), float32], %p1108: Tensor[(256), float32], Primitive=1) -> Tensor[(256), float32] { | |
| %794 = add(%p0123, 2e-05f /* ty=float32 */) /* ty=Tensor[(256), float32] */; | |
| %795 = sqrt(%794) /* ty=Tensor[(256), float32] */; | |
| %796 = divide(1f /* ty=float32 */, %795) /* ty=Tensor[(256), float32] */; | |
| multiply(%796, %p1108) /* ty=Tensor[(256), float32] */ | |
| }; | |
| %798 = (%stage3_unit6_bn3_moving_var, %stage3_unit6_bn3_gamma); | |
| %799 = (%tensor_0123,); | |
| let %x246: () = vm.invoke_tvm_op(%797, %798, %799) /* ty=() */; | |
| let %x247: Tensor[(256), float32] = %tensor_0123; | |
| let %storage_0124: Storage[] = memory.alloc_storage(2359296 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][124]) /* ty=Storage[] */; | |
| let %tensor_0124: Tensor[(3, 3, 256, 256), float32] = memory.alloc_tensor(%storage_0124, 0 /* ty=int64 */, meta[relay.Constant][124] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][124]) /* ty=Tensor[(3, 3, 256, 256), float32] */; | |
| %801 = fn (%p0124: Tensor[(256, 256, 3, 3), float32], %p1109: Tensor[(256), float32], Primitive=1) -> Tensor[(3, 3, 256, 256), float32] { | |
| %800 = layout_transform(%p0124, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(3, 3, 256, 256), float32] */; | |
| multiply(%800, %p1109) /* ty=Tensor[(3, 3, 256, 256), float32] */ | |
| }; | |
| %802 = (%stage3_unit6_conv2_weight, %x247); | |
| %803 = (%tensor_0124,); | |
| let %x248: () = vm.invoke_tvm_op(%801, %802, %803) /* ty=() */; | |
| let %x249: Tensor[(3, 3, 256, 256), float32] = %tensor_0124; | |
| let %storage_0125: Storage[] = memory.alloc_storage(200704 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][125]) /* ty=Storage[] */; | |
| let %tensor_0125: Tensor[(1, 14, 14, 256), float32] = memory.alloc_tensor(%storage_0125, 0 /* ty=int64 */, meta[relay.Constant][125] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][125]) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %810 = fn (%p0125: Tensor[(1, 14, 14, 256), float32], %p1110: Tensor[(3, 3, 256, 256), float32], %p250: Tensor[(256), float32], %p340: Tensor[(256), float32], %p438: Tensor[(256), float32], Primitive=1) -> Tensor[(1, 14, 14, 256), float32] { | |
| %804 = nn.conv2d(%p0125, %p1110, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| %805 = negative(%p250) /* ty=Tensor[(256), float32] */; | |
| %806 = multiply(%805, %p340) /* ty=Tensor[(256), float32] */; | |
| %807 = add(%806, %p438) /* ty=Tensor[(256), float32] */; | |
| %808 = expand_dims(%807, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 256), float32] */; | |
| %809 = add(%804, %808) /* ty=Tensor[(1, 14, 14, 256), float32] */; | |
| nn.relu(%809) /* ty=Tensor[(1, 14, 14, 256), float32] */ | |
| }; | |
| %811 = (%x245, %x249, %stage3_unit6_bn3_moving_mean, %x247, %stage3_unit6_bn3_beta); | |
| %812 = (%tensor_0125,); | |
| let %x250: () = vm.invoke_tvm_op(%810, %811, %812) /* ty=() */; | |
| let %x251: Tensor[(1, 14, 14, 256), float32] = %tensor_0125; | |
| let %storage_0126: Storage[] = memory.alloc_storage(1048576 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][126]) /* ty=Storage[] */; | |
| let %tensor_0126: Tensor[(1, 1, 256, 1024), float32] = memory.alloc_tensor(%storage_0126, 0 /* ty=int64 */, meta[relay.Constant][126] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][126]) /* ty=Tensor[(1, 1, 256, 1024), float32] */; | |
| %813 = fn (%p0126: Tensor[(1024, 256, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 256, 1024), float32] { | |
| layout_transform(%p0126, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 256, 1024), float32] */ | |
| }; | |
| %814 = (%stage3_unit6_conv3_weight,); | |
| %815 = (%tensor_0126,); | |
| let %x252: () = vm.invoke_tvm_op(%813, %814, %815) /* ty=() */; | |
| let %x253: Tensor[(1, 1, 256, 1024), float32] = %tensor_0126; | |
| let %storage_0127: Storage[] = memory.alloc_storage(4096 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][127]) /* ty=Storage[] */; | |
| let %tensor_0127: Tensor[(1024), float32] = memory.alloc_tensor(%storage_0127, 0 /* ty=int64 */, meta[relay.Constant][127] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][127]) /* ty=Tensor[(1024), float32] */; | |
| %819 = fn (%p0127: Tensor[(1024), float32], %p1111: Tensor[(1024), float32], Primitive=1) -> Tensor[(1024), float32] { | |
| %816 = add(%p0127, 2e-05f /* ty=float32 */) /* ty=Tensor[(1024), float32] */; | |
| %817 = sqrt(%816) /* ty=Tensor[(1024), float32] */; | |
| %818 = divide(1f /* ty=float32 */, %817) /* ty=Tensor[(1024), float32] */; | |
| multiply(%818, %p1111) /* ty=Tensor[(1024), float32] */ | |
| }; | |
| %820 = (%stage4_unit1_bn1_moving_var, %stage4_unit1_bn1_gamma); | |
| %821 = (%tensor_0127,); | |
| let %x254: () = vm.invoke_tvm_op(%819, %820, %821) /* ty=() */; | |
| let %x255: Tensor[(1024), float32] = %tensor_0127; | |
| let %storage_0128: Storage[] = memory.alloc_storage(802816 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][128]) /* ty=Storage[] */; | |
| let %tensor_0128: Tensor[(1, 14, 14, 1024), float32] = memory.alloc_tensor(%storage_0128, 0 /* ty=int64 */, meta[relay.Constant][128] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][128]) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %831 = fn (%p0128: Tensor[(1, 14, 14, 256), float32], %p1112: Tensor[(1, 1, 256, 1024), float32], %p251: Tensor[(1, 14, 14, 1024), float32], %p341: Tensor[(1024), float32], %p439: Tensor[(1024), float32], %p52: Tensor[(1024), float32], Primitive=1) -> Tensor[(1, 14, 14, 1024), float32] { | |
| %822 = nn.conv2d(%p0128, %p1112, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %823 = add(%822, %p251) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %824 = expand_dims(%p341, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 1024), float32] */; | |
| %825 = multiply(%823, %824) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| %826 = negative(%p439) /* ty=Tensor[(1024), float32] */; | |
| %827 = multiply(%826, %p341) /* ty=Tensor[(1024), float32] */; | |
| %828 = add(%827, %p52) /* ty=Tensor[(1024), float32] */; | |
| %829 = expand_dims(%828, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 1024), float32] */; | |
| %830 = add(%825, %829) /* ty=Tensor[(1, 14, 14, 1024), float32] */; | |
| nn.relu(%830) /* ty=Tensor[(1, 14, 14, 1024), float32] */ | |
| }; | |
| %832 = (%x251, %x253, %x237, %x255, %stage4_unit1_bn1_moving_mean, %stage4_unit1_bn1_beta); | |
| %833 = (%tensor_0128,); | |
| let %x256: () = vm.invoke_tvm_op(%831, %832, %833) /* ty=() */; | |
| let %x257: Tensor[(1, 14, 14, 1024), float32] = %tensor_0128; | |
| let %storage_0129: Storage[] = memory.alloc_storage(2048 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][129]) /* ty=Storage[] */; | |
| let %tensor_0129: Tensor[(512), float32] = memory.alloc_tensor(%storage_0129, 0 /* ty=int64 */, meta[relay.Constant][129] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][129]) /* ty=Tensor[(512), float32] */; | |
| %837 = fn (%p0129: Tensor[(512), float32], %p1113: Tensor[(512), float32], Primitive=1) -> Tensor[(512), float32] { | |
| %834 = add(%p0129, 2e-05f /* ty=float32 */) /* ty=Tensor[(512), float32] */; | |
| %835 = sqrt(%834) /* ty=Tensor[(512), float32] */; | |
| %836 = divide(1f /* ty=float32 */, %835) /* ty=Tensor[(512), float32] */; | |
| multiply(%836, %p1113) /* ty=Tensor[(512), float32] */ | |
| }; | |
| %838 = (%stage4_unit1_bn2_moving_var, %stage4_unit1_bn2_gamma); | |
| %839 = (%tensor_0129,); | |
| let %x258: () = vm.invoke_tvm_op(%837, %838, %839) /* ty=() */; | |
| let %x259: Tensor[(512), float32] = %tensor_0129; | |
| let %storage_0130: Storage[] = memory.alloc_storage(2097152 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][130]) /* ty=Storage[] */; | |
| let %tensor_0130: Tensor[(1, 1, 1024, 512), float32] = memory.alloc_tensor(%storage_0130, 0 /* ty=int64 */, meta[relay.Constant][130] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][130]) /* ty=Tensor[(1, 1, 1024, 512), float32] */; | |
| %841 = fn (%p0130: Tensor[(512, 1024, 1, 1), float32], %p1114: Tensor[(512), float32], Primitive=1) -> Tensor[(1, 1, 1024, 512), float32] { | |
| %840 = layout_transform(%p0130, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 1024, 512), float32] */; | |
| multiply(%840, %p1114) /* ty=Tensor[(1, 1, 1024, 512), float32] */ | |
| }; | |
| %842 = (%stage4_unit1_conv1_weight, %x259); | |
| %843 = (%tensor_0130,); | |
| let %x260: () = vm.invoke_tvm_op(%841, %842, %843) /* ty=() */; | |
| let %x261: Tensor[(1, 1, 1024, 512), float32] = %tensor_0130; | |
| let %storage_0131: Storage[] = memory.alloc_storage(100352 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][131]) /* ty=Storage[] */; | |
| let %tensor_0131: Tensor[(1, 7, 7, 512), float32] = memory.alloc_tensor(%storage_0131, 0 /* ty=int64 */, meta[relay.Constant][131] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][131]) /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| %850 = fn (%p0131: Tensor[(1, 14, 14, 1024), float32], %p1115: Tensor[(1, 1, 1024, 512), float32], %p252: Tensor[(512), float32], %p342: Tensor[(512), float32], %p440: Tensor[(512), float32], Primitive=1) -> Tensor[(1, 7, 7, 512), float32] { | |
| %844 = nn.conv2d(%p0131, %p1115, strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| %845 = negative(%p252) /* ty=Tensor[(512), float32] */; | |
| %846 = multiply(%845, %p342) /* ty=Tensor[(512), float32] */; | |
| %847 = add(%846, %p440) /* ty=Tensor[(512), float32] */; | |
| %848 = expand_dims(%847, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 512), float32] */; | |
| %849 = add(%844, %848) /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| nn.relu(%849) /* ty=Tensor[(1, 7, 7, 512), float32] */ | |
| }; | |
| %851 = (%x257, %x261, %stage4_unit1_bn2_moving_mean, %x259, %stage4_unit1_bn2_beta); | |
| %852 = (%tensor_0131,); | |
| let %x262: () = vm.invoke_tvm_op(%850, %851, %852) /* ty=() */; | |
| let %x263: Tensor[(1, 7, 7, 512), float32] = %tensor_0131; | |
| let %storage_0132: Storage[] = memory.alloc_storage(2048 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][132]) /* ty=Storage[] */; | |
| let %tensor_0132: Tensor[(512), float32] = memory.alloc_tensor(%storage_0132, 0 /* ty=int64 */, meta[relay.Constant][132] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][132]) /* ty=Tensor[(512), float32] */; | |
| %856 = fn (%p0132: Tensor[(512), float32], %p1116: Tensor[(512), float32], Primitive=1) -> Tensor[(512), float32] { | |
| %853 = add(%p0132, 2e-05f /* ty=float32 */) /* ty=Tensor[(512), float32] */; | |
| %854 = sqrt(%853) /* ty=Tensor[(512), float32] */; | |
| %855 = divide(1f /* ty=float32 */, %854) /* ty=Tensor[(512), float32] */; | |
| multiply(%855, %p1116) /* ty=Tensor[(512), float32] */ | |
| }; | |
| %857 = (%stage4_unit1_bn3_moving_var, %stage4_unit1_bn3_gamma); | |
| %858 = (%tensor_0132,); | |
| let %x264: () = vm.invoke_tvm_op(%856, %857, %858) /* ty=() */; | |
| let %x265: Tensor[(512), float32] = %tensor_0132; | |
| let %storage_0133: Storage[] = memory.alloc_storage(9437184 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][133]) /* ty=Storage[] */; | |
| let %tensor_0133: Tensor[(3, 3, 512, 512), float32] = memory.alloc_tensor(%storage_0133, 0 /* ty=int64 */, meta[relay.Constant][133] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][133]) /* ty=Tensor[(3, 3, 512, 512), float32] */; | |
| %860 = fn (%p0133: Tensor[(512, 512, 3, 3), float32], %p1117: Tensor[(512), float32], Primitive=1) -> Tensor[(3, 3, 512, 512), float32] { | |
| %859 = layout_transform(%p0133, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(3, 3, 512, 512), float32] */; | |
| multiply(%859, %p1117) /* ty=Tensor[(3, 3, 512, 512), float32] */ | |
| }; | |
| %861 = (%stage4_unit1_conv2_weight, %x265); | |
| %862 = (%tensor_0133,); | |
| let %x266: () = vm.invoke_tvm_op(%860, %861, %862) /* ty=() */; | |
| let %x267: Tensor[(3, 3, 512, 512), float32] = %tensor_0133; | |
| let %storage_0134: Storage[] = memory.alloc_storage(100352 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][134]) /* ty=Storage[] */; | |
| let %tensor_0134: Tensor[(1, 7, 7, 512), float32] = memory.alloc_tensor(%storage_0134, 0 /* ty=int64 */, meta[relay.Constant][134] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][134]) /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| %869 = fn (%p0134: Tensor[(1, 7, 7, 512), float32], %p1118: Tensor[(3, 3, 512, 512), float32], %p253: Tensor[(512), float32], %p343: Tensor[(512), float32], %p441: Tensor[(512), float32], Primitive=1) -> Tensor[(1, 7, 7, 512), float32] { | |
| %863 = nn.conv2d(%p0134, %p1118, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| %864 = negative(%p253) /* ty=Tensor[(512), float32] */; | |
| %865 = multiply(%864, %p343) /* ty=Tensor[(512), float32] */; | |
| %866 = add(%865, %p441) /* ty=Tensor[(512), float32] */; | |
| %867 = expand_dims(%866, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 512), float32] */; | |
| %868 = add(%863, %867) /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| nn.relu(%868) /* ty=Tensor[(1, 7, 7, 512), float32] */ | |
| }; | |
| %870 = (%x263, %x267, %stage4_unit1_bn3_moving_mean, %x265, %stage4_unit1_bn3_beta); | |
| %871 = (%tensor_0134,); | |
| let %x268: () = vm.invoke_tvm_op(%869, %870, %871) /* ty=() */; | |
| let %x269: Tensor[(1, 7, 7, 512), float32] = %tensor_0134; | |
| let %storage_0135: Storage[] = memory.alloc_storage(4194304 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][135]) /* ty=Storage[] */; | |
| let %tensor_0135: Tensor[(1, 1, 512, 2048), float32] = memory.alloc_tensor(%storage_0135, 0 /* ty=int64 */, meta[relay.Constant][135] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][135]) /* ty=Tensor[(1, 1, 512, 2048), float32] */; | |
| %872 = fn (%p0135: Tensor[(2048, 512, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 512, 2048), float32] { | |
| layout_transform(%p0135, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 512, 2048), float32] */ | |
| }; | |
| %873 = (%stage4_unit1_conv3_weight,); | |
| %874 = (%tensor_0135,); | |
| let %x270: () = vm.invoke_tvm_op(%872, %873, %874) /* ty=() */; | |
| let %x271: Tensor[(1, 1, 512, 2048), float32] = %tensor_0135; | |
| let %storage_0136: Storage[] = memory.alloc_storage(8388608 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][136]) /* ty=Storage[] */; | |
| let %tensor_0136: Tensor[(1, 1, 1024, 2048), float32] = memory.alloc_tensor(%storage_0136, 0 /* ty=int64 */, meta[relay.Constant][136] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][136]) /* ty=Tensor[(1, 1, 1024, 2048), float32] */; | |
| %875 = fn (%p0136: Tensor[(2048, 1024, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 1024, 2048), float32] { | |
| layout_transform(%p0136, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 1024, 2048), float32] */ | |
| }; | |
| %876 = (%stage4_unit1_sc_weight,); | |
| %877 = (%tensor_0136,); | |
| let %x272: () = vm.invoke_tvm_op(%875, %876, %877) /* ty=() */; | |
| let %x273: Tensor[(1, 1, 1024, 2048), float32] = %tensor_0136; | |
| let %storage_0137: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][137]) /* ty=Storage[] */; | |
| let %tensor_0137: Tensor[(1, 7, 7, 2048), float32] = memory.alloc_tensor(%storage_0137, 0 /* ty=int64 */, meta[relay.Constant][137] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][137]) /* ty=Tensor[(1, 7, 7, 2048), float32] */; | |
| %878 = fn (%p0137: Tensor[(1, 14, 14, 1024), float32], %p1119: Tensor[(1, 1, 1024, 2048), float32], Primitive=1) -> Tensor[(1, 7, 7, 2048), float32] { | |
| nn.conv2d(%p0137, %p1119, strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 2048), float32] */ | |
| }; | |
| %879 = (%x257, %x273); | |
| %880 = (%tensor_0137,); | |
| let %x274: () = vm.invoke_tvm_op(%878, %879, %880) /* ty=() */; | |
| let %x275: Tensor[(1, 7, 7, 2048), float32] = %tensor_0137; | |
| let %storage_0138: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][138]) /* ty=Storage[] */; | |
| let %tensor_0138: Tensor[(1, 7, 7, 2048), float32] = memory.alloc_tensor(%storage_0138, 0 /* ty=int64 */, meta[relay.Constant][138] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][138]) /* ty=Tensor[(1, 7, 7, 2048), float32] */; | |
| %882 = fn (%p0138: Tensor[(1, 7, 7, 512), float32], %p1120: Tensor[(1, 1, 512, 2048), float32], %p254: Tensor[(1, 7, 7, 2048), float32], Primitive=1) -> Tensor[(1, 7, 7, 2048), float32] { | |
| %881 = nn.conv2d(%p0138, %p1120, padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 2048), float32] */; | |
| add(%881, %p254) /* ty=Tensor[(1, 7, 7, 2048), float32] */ | |
| }; | |
| %883 = (%x269, %x271, %x275); | |
| %884 = (%tensor_0138,); | |
| let %x276: () = vm.invoke_tvm_op(%882, %883, %884) /* ty=() */; | |
| let %x277: Tensor[(1, 7, 7, 2048), float32] = %tensor_0138; | |
| let %storage_0139: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][139]) /* ty=Storage[] */; | |
| let %tensor_0139: Tensor[(1, 7, 7, 2048), float32] = memory.alloc_tensor(%storage_0139, 0 /* ty=int64 */, meta[relay.Constant][139] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][139]) /* ty=Tensor[(1, 7, 7, 2048), float32] */; | |
| %896 = fn (%p0139: Tensor[(2048), float32], %p1121: Tensor[(2048), float32], %p255: Tensor[(1, 7, 7, 2048), float32], %p344: Tensor[(2048), float32], %p442: Tensor[(2048), float32], Primitive=1) -> Tensor[(1, 7, 7, 2048), float32] { | |
| %885 = add(%p0139, 2e-05f /* ty=float32 */) /* ty=Tensor[(2048), float32] */; | |
| %886 = sqrt(%885) /* ty=Tensor[(2048), float32] */; | |
| %887 = divide(1f /* ty=float32 */, %886) /* ty=Tensor[(2048), float32] */; | |
| %888 = multiply(%887, %p1121) /* ty=Tensor[(2048), float32] */; | |
| %889 = expand_dims(%888, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 2048), float32] */; | |
| %890 = multiply(%p255, %889) /* ty=Tensor[(1, 7, 7, 2048), float32] */; | |
| %891 = negative(%p344) /* ty=Tensor[(2048), float32] */; | |
| %892 = multiply(%891, %888) /* ty=Tensor[(2048), float32] */; | |
| %893 = add(%892, %p442) /* ty=Tensor[(2048), float32] */; | |
| %894 = expand_dims(%893, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 2048), float32] */; | |
| %895 = add(%890, %894) /* ty=Tensor[(1, 7, 7, 2048), float32] */; | |
| nn.relu(%895) /* ty=Tensor[(1, 7, 7, 2048), float32] */ | |
| }; | |
| %897 = (%stage4_unit2_bn1_moving_var, %stage4_unit2_bn1_gamma, %x277, %stage4_unit2_bn1_moving_mean, %stage4_unit2_bn1_beta); | |
| %898 = (%tensor_0139,); | |
| let %x278: () = vm.invoke_tvm_op(%896, %897, %898) /* ty=() */; | |
| let %x279: Tensor[(1, 7, 7, 2048), float32] = %tensor_0139; | |
| let %storage_0140: Storage[] = memory.alloc_storage(2048 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][140]) /* ty=Storage[] */; | |
| let %tensor_0140: Tensor[(512), float32] = memory.alloc_tensor(%storage_0140, 0 /* ty=int64 */, meta[relay.Constant][140] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][140]) /* ty=Tensor[(512), float32] */; | |
| %902 = fn (%p0140: Tensor[(512), float32], %p1122: Tensor[(512), float32], Primitive=1) -> Tensor[(512), float32] { | |
| %899 = add(%p0140, 2e-05f /* ty=float32 */) /* ty=Tensor[(512), float32] */; | |
| %900 = sqrt(%899) /* ty=Tensor[(512), float32] */; | |
| %901 = divide(1f /* ty=float32 */, %900) /* ty=Tensor[(512), float32] */; | |
| multiply(%901, %p1122) /* ty=Tensor[(512), float32] */ | |
| }; | |
| %903 = (%stage4_unit2_bn2_moving_var, %stage4_unit2_bn2_gamma); | |
| %904 = (%tensor_0140,); | |
| let %x280: () = vm.invoke_tvm_op(%902, %903, %904) /* ty=() */; | |
| let %x281: Tensor[(512), float32] = %tensor_0140; | |
| let %storage_0141: Storage[] = memory.alloc_storage(4194304 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][141]) /* ty=Storage[] */; | |
| let %tensor_0141: Tensor[(1, 1, 2048, 512), float32] = memory.alloc_tensor(%storage_0141, 0 /* ty=int64 */, meta[relay.Constant][141] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][141]) /* ty=Tensor[(1, 1, 2048, 512), float32] */; | |
| %906 = fn (%p0141: Tensor[(512, 2048, 1, 1), float32], %p1123: Tensor[(512), float32], Primitive=1) -> Tensor[(1, 1, 2048, 512), float32] { | |
| %905 = layout_transform(%p0141, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 2048, 512), float32] */; | |
| multiply(%905, %p1123) /* ty=Tensor[(1, 1, 2048, 512), float32] */ | |
| }; | |
| %907 = (%stage4_unit2_conv1_weight, %x281); | |
| %908 = (%tensor_0141,); | |
| let %x282: () = vm.invoke_tvm_op(%906, %907, %908) /* ty=() */; | |
| let %x283: Tensor[(1, 1, 2048, 512), float32] = %tensor_0141; | |
| let %storage_0142: Storage[] = memory.alloc_storage(100352 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][142]) /* ty=Storage[] */; | |
| let %tensor_0142: Tensor[(1, 7, 7, 512), float32] = memory.alloc_tensor(%storage_0142, 0 /* ty=int64 */, meta[relay.Constant][142] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][142]) /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| %915 = fn (%p0142: Tensor[(1, 7, 7, 2048), float32], %p1124: Tensor[(1, 1, 2048, 512), float32], %p256: Tensor[(512), float32], %p345: Tensor[(512), float32], %p443: Tensor[(512), float32], Primitive=1) -> Tensor[(1, 7, 7, 512), float32] { | |
| %909 = nn.conv2d(%p0142, %p1124, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| %910 = negative(%p256) /* ty=Tensor[(512), float32] */; | |
| %911 = multiply(%910, %p345) /* ty=Tensor[(512), float32] */; | |
| %912 = add(%911, %p443) /* ty=Tensor[(512), float32] */; | |
| %913 = expand_dims(%912, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 512), float32] */; | |
| %914 = add(%909, %913) /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| nn.relu(%914) /* ty=Tensor[(1, 7, 7, 512), float32] */ | |
| }; | |
| %916 = (%x279, %x283, %stage4_unit2_bn2_moving_mean, %x281, %stage4_unit2_bn2_beta); | |
| %917 = (%tensor_0142,); | |
| let %x284: () = vm.invoke_tvm_op(%915, %916, %917) /* ty=() */; | |
| let %x285: Tensor[(1, 7, 7, 512), float32] = %tensor_0142; | |
| let %storage_0143: Storage[] = memory.alloc_storage(2048 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][143]) /* ty=Storage[] */; | |
| let %tensor_0143: Tensor[(512), float32] = memory.alloc_tensor(%storage_0143, 0 /* ty=int64 */, meta[relay.Constant][143] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][143]) /* ty=Tensor[(512), float32] */; | |
| %921 = fn (%p0143: Tensor[(512), float32], %p1125: Tensor[(512), float32], Primitive=1) -> Tensor[(512), float32] { | |
| %918 = add(%p0143, 2e-05f /* ty=float32 */) /* ty=Tensor[(512), float32] */; | |
| %919 = sqrt(%918) /* ty=Tensor[(512), float32] */; | |
| %920 = divide(1f /* ty=float32 */, %919) /* ty=Tensor[(512), float32] */; | |
| multiply(%920, %p1125) /* ty=Tensor[(512), float32] */ | |
| }; | |
| %922 = (%stage4_unit2_bn3_moving_var, %stage4_unit2_bn3_gamma); | |
| %923 = (%tensor_0143,); | |
| let %x286: () = vm.invoke_tvm_op(%921, %922, %923) /* ty=() */; | |
| let %x287: Tensor[(512), float32] = %tensor_0143; | |
| let %storage_0144: Storage[] = memory.alloc_storage(9437184 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][144]) /* ty=Storage[] */; | |
| let %tensor_0144: Tensor[(3, 3, 512, 512), float32] = memory.alloc_tensor(%storage_0144, 0 /* ty=int64 */, meta[relay.Constant][144] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][144]) /* ty=Tensor[(3, 3, 512, 512), float32] */; | |
| %925 = fn (%p0144: Tensor[(512, 512, 3, 3), float32], %p1126: Tensor[(512), float32], Primitive=1) -> Tensor[(3, 3, 512, 512), float32] { | |
| %924 = layout_transform(%p0144, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(3, 3, 512, 512), float32] */; | |
| multiply(%924, %p1126) /* ty=Tensor[(3, 3, 512, 512), float32] */ | |
| }; | |
| %926 = (%stage4_unit2_conv2_weight, %x287); | |
| %927 = (%tensor_0144,); | |
| let %x288: () = vm.invoke_tvm_op(%925, %926, %927) /* ty=() */; | |
| let %x289: Tensor[(3, 3, 512, 512), float32] = %tensor_0144; | |
| let %storage_0145: Storage[] = memory.alloc_storage(100352 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][145]) /* ty=Storage[] */; | |
| let %tensor_0145: Tensor[(1, 7, 7, 512), float32] = memory.alloc_tensor(%storage_0145, 0 /* ty=int64 */, meta[relay.Constant][145] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][145]) /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| %934 = fn (%p0145: Tensor[(1, 7, 7, 512), float32], %p1127: Tensor[(3, 3, 512, 512), float32], %p257: Tensor[(512), float32], %p346: Tensor[(512), float32], %p444: Tensor[(512), float32], Primitive=1) -> Tensor[(1, 7, 7, 512), float32] { | |
| %928 = nn.conv2d(%p0145, %p1127, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| %929 = negative(%p257) /* ty=Tensor[(512), float32] */; | |
| %930 = multiply(%929, %p346) /* ty=Tensor[(512), float32] */; | |
| %931 = add(%930, %p444) /* ty=Tensor[(512), float32] */; | |
| %932 = expand_dims(%931, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 512), float32] */; | |
| %933 = add(%928, %932) /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| nn.relu(%933) /* ty=Tensor[(1, 7, 7, 512), float32] */ | |
| }; | |
| %935 = (%x285, %x289, %stage4_unit2_bn3_moving_mean, %x287, %stage4_unit2_bn3_beta); | |
| %936 = (%tensor_0145,); | |
| let %x290: () = vm.invoke_tvm_op(%934, %935, %936) /* ty=() */; | |
| let %x291: Tensor[(1, 7, 7, 512), float32] = %tensor_0145; | |
| let %storage_0146: Storage[] = memory.alloc_storage(4194304 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][146]) /* ty=Storage[] */; | |
| let %tensor_0146: Tensor[(1, 1, 512, 2048), float32] = memory.alloc_tensor(%storage_0146, 0 /* ty=int64 */, meta[relay.Constant][146] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][146]) /* ty=Tensor[(1, 1, 512, 2048), float32] */; | |
| %937 = fn (%p0146: Tensor[(2048, 512, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 512, 2048), float32] { | |
| layout_transform(%p0146, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 512, 2048), float32] */ | |
| }; | |
| %938 = (%stage4_unit2_conv3_weight,); | |
| %939 = (%tensor_0146,); | |
| let %x292: () = vm.invoke_tvm_op(%937, %938, %939) /* ty=() */; | |
| let %x293: Tensor[(1, 1, 512, 2048), float32] = %tensor_0146; | |
| let %storage_0147: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][147]) /* ty=Storage[] */; | |
| let %tensor_0147: Tensor[(1, 7, 7, 2048), float32] = memory.alloc_tensor(%storage_0147, 0 /* ty=int64 */, meta[relay.Constant][147] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][147]) /* ty=Tensor[(1, 7, 7, 2048), float32] */; | |
| %941 = fn (%p0147: Tensor[(1, 7, 7, 512), float32], %p1128: Tensor[(1, 1, 512, 2048), float32], %p258: Tensor[(1, 7, 7, 2048), float32], Primitive=1) -> Tensor[(1, 7, 7, 2048), float32] { | |
| %940 = nn.conv2d(%p0147, %p1128, padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 2048), float32] */; | |
| add(%940, %p258) /* ty=Tensor[(1, 7, 7, 2048), float32] */ | |
| }; | |
| %942 = (%x291, %x293, %x277); | |
| %943 = (%tensor_0147,); | |
| let %x294: () = vm.invoke_tvm_op(%941, %942, %943) /* ty=() */; | |
| let %x295: Tensor[(1, 7, 7, 2048), float32] = %tensor_0147; | |
| let %storage_0148: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][148]) /* ty=Storage[] */; | |
| let %tensor_0148: Tensor[(1, 7, 7, 2048), float32] = memory.alloc_tensor(%storage_0148, 0 /* ty=int64 */, meta[relay.Constant][148] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][148]) /* ty=Tensor[(1, 7, 7, 2048), float32] */; | |
| %955 = fn (%p0148: Tensor[(2048), float32], %p1129: Tensor[(2048), float32], %p259: Tensor[(1, 7, 7, 2048), float32], %p347: Tensor[(2048), float32], %p445: Tensor[(2048), float32], Primitive=1) -> Tensor[(1, 7, 7, 2048), float32] { | |
| %944 = add(%p0148, 2e-05f /* ty=float32 */) /* ty=Tensor[(2048), float32] */; | |
| %945 = sqrt(%944) /* ty=Tensor[(2048), float32] */; | |
| %946 = divide(1f /* ty=float32 */, %945) /* ty=Tensor[(2048), float32] */; | |
| %947 = multiply(%946, %p1129) /* ty=Tensor[(2048), float32] */; | |
| %948 = expand_dims(%947, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 2048), float32] */; | |
| %949 = multiply(%p259, %948) /* ty=Tensor[(1, 7, 7, 2048), float32] */; | |
| %950 = negative(%p347) /* ty=Tensor[(2048), float32] */; | |
| %951 = multiply(%950, %947) /* ty=Tensor[(2048), float32] */; | |
| %952 = add(%951, %p445) /* ty=Tensor[(2048), float32] */; | |
| %953 = expand_dims(%952, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 2048), float32] */; | |
| %954 = add(%949, %953) /* ty=Tensor[(1, 7, 7, 2048), float32] */; | |
| nn.relu(%954) /* ty=Tensor[(1, 7, 7, 2048), float32] */ | |
| }; | |
| %956 = (%stage4_unit3_bn1_moving_var, %stage4_unit3_bn1_gamma, %x295, %stage4_unit3_bn1_moving_mean, %stage4_unit3_bn1_beta); | |
| %957 = (%tensor_0148,); | |
| let %x296: () = vm.invoke_tvm_op(%955, %956, %957) /* ty=() */; | |
| let %x297: Tensor[(1, 7, 7, 2048), float32] = %tensor_0148; | |
| let %storage_0149: Storage[] = memory.alloc_storage(2048 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][149]) /* ty=Storage[] */; | |
| let %tensor_0149: Tensor[(512), float32] = memory.alloc_tensor(%storage_0149, 0 /* ty=int64 */, meta[relay.Constant][149] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][149]) /* ty=Tensor[(512), float32] */; | |
| %961 = fn (%p0149: Tensor[(512), float32], %p1130: Tensor[(512), float32], Primitive=1) -> Tensor[(512), float32] { | |
| %958 = add(%p0149, 2e-05f /* ty=float32 */) /* ty=Tensor[(512), float32] */; | |
| %959 = sqrt(%958) /* ty=Tensor[(512), float32] */; | |
| %960 = divide(1f /* ty=float32 */, %959) /* ty=Tensor[(512), float32] */; | |
| multiply(%960, %p1130) /* ty=Tensor[(512), float32] */ | |
| }; | |
| %962 = (%stage4_unit3_bn2_moving_var, %stage4_unit3_bn2_gamma); | |
| %963 = (%tensor_0149,); | |
| let %x298: () = vm.invoke_tvm_op(%961, %962, %963) /* ty=() */; | |
| let %x299: Tensor[(512), float32] = %tensor_0149; | |
| let %storage_0150: Storage[] = memory.alloc_storage(4194304 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][150]) /* ty=Storage[] */; | |
| let %tensor_0150: Tensor[(1, 1, 2048, 512), float32] = memory.alloc_tensor(%storage_0150, 0 /* ty=int64 */, meta[relay.Constant][150] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][150]) /* ty=Tensor[(1, 1, 2048, 512), float32] */; | |
| %965 = fn (%p0150: Tensor[(512, 2048, 1, 1), float32], %p1131: Tensor[(512), float32], Primitive=1) -> Tensor[(1, 1, 2048, 512), float32] { | |
| %964 = layout_transform(%p0150, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 2048, 512), float32] */; | |
| multiply(%964, %p1131) /* ty=Tensor[(1, 1, 2048, 512), float32] */ | |
| }; | |
| %966 = (%stage4_unit3_conv1_weight, %x299); | |
| %967 = (%tensor_0150,); | |
| let %x300: () = vm.invoke_tvm_op(%965, %966, %967) /* ty=() */; | |
| let %x301: Tensor[(1, 1, 2048, 512), float32] = %tensor_0150; | |
| let %storage_0151: Storage[] = memory.alloc_storage(100352 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][151]) /* ty=Storage[] */; | |
| let %tensor_0151: Tensor[(1, 7, 7, 512), float32] = memory.alloc_tensor(%storage_0151, 0 /* ty=int64 */, meta[relay.Constant][151] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][151]) /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| %974 = fn (%p0151: Tensor[(1, 7, 7, 2048), float32], %p1132: Tensor[(1, 1, 2048, 512), float32], %p260: Tensor[(512), float32], %p348: Tensor[(512), float32], %p446: Tensor[(512), float32], Primitive=1) -> Tensor[(1, 7, 7, 512), float32] { | |
| %968 = nn.conv2d(%p0151, %p1132, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| %969 = negative(%p260) /* ty=Tensor[(512), float32] */; | |
| %970 = multiply(%969, %p348) /* ty=Tensor[(512), float32] */; | |
| %971 = add(%970, %p446) /* ty=Tensor[(512), float32] */; | |
| %972 = expand_dims(%971, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 512), float32] */; | |
| %973 = add(%968, %972) /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| nn.relu(%973) /* ty=Tensor[(1, 7, 7, 512), float32] */ | |
| }; | |
| %975 = (%x297, %x301, %stage4_unit3_bn2_moving_mean, %x299, %stage4_unit3_bn2_beta); | |
| %976 = (%tensor_0151,); | |
| let %x302: () = vm.invoke_tvm_op(%974, %975, %976) /* ty=() */; | |
| let %x303: Tensor[(1, 7, 7, 512), float32] = %tensor_0151; | |
| let %storage_0152: Storage[] = memory.alloc_storage(2048 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][152]) /* ty=Storage[] */; | |
| let %tensor_0152: Tensor[(512), float32] = memory.alloc_tensor(%storage_0152, 0 /* ty=int64 */, meta[relay.Constant][152] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][152]) /* ty=Tensor[(512), float32] */; | |
| %980 = fn (%p0152: Tensor[(512), float32], %p1133: Tensor[(512), float32], Primitive=1) -> Tensor[(512), float32] { | |
| %977 = add(%p0152, 2e-05f /* ty=float32 */) /* ty=Tensor[(512), float32] */; | |
| %978 = sqrt(%977) /* ty=Tensor[(512), float32] */; | |
| %979 = divide(1f /* ty=float32 */, %978) /* ty=Tensor[(512), float32] */; | |
| multiply(%979, %p1133) /* ty=Tensor[(512), float32] */ | |
| }; | |
| %981 = (%stage4_unit3_bn3_moving_var, %stage4_unit3_bn3_gamma); | |
| %982 = (%tensor_0152,); | |
| let %x304: () = vm.invoke_tvm_op(%980, %981, %982) /* ty=() */; | |
| let %x305: Tensor[(512), float32] = %tensor_0152; | |
| let %storage_0153: Storage[] = memory.alloc_storage(9437184 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][153]) /* ty=Storage[] */; | |
| let %tensor_0153: Tensor[(3, 3, 512, 512), float32] = memory.alloc_tensor(%storage_0153, 0 /* ty=int64 */, meta[relay.Constant][153] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][153]) /* ty=Tensor[(3, 3, 512, 512), float32] */; | |
| %984 = fn (%p0153: Tensor[(512, 512, 3, 3), float32], %p1134: Tensor[(512), float32], Primitive=1) -> Tensor[(3, 3, 512, 512), float32] { | |
| %983 = layout_transform(%p0153, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(3, 3, 512, 512), float32] */; | |
| multiply(%983, %p1134) /* ty=Tensor[(3, 3, 512, 512), float32] */ | |
| }; | |
| %985 = (%stage4_unit3_conv2_weight, %x305); | |
| %986 = (%tensor_0153,); | |
| let %x306: () = vm.invoke_tvm_op(%984, %985, %986) /* ty=() */; | |
| let %x307: Tensor[(3, 3, 512, 512), float32] = %tensor_0153; | |
| let %storage_0154: Storage[] = memory.alloc_storage(100352 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][154]) /* ty=Storage[] */; | |
| let %tensor_0154: Tensor[(1, 7, 7, 512), float32] = memory.alloc_tensor(%storage_0154, 0 /* ty=int64 */, meta[relay.Constant][154] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][154]) /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| %993 = fn (%p0154: Tensor[(1, 7, 7, 512), float32], %p1135: Tensor[(3, 3, 512, 512), float32], %p261: Tensor[(512), float32], %p349: Tensor[(512), float32], %p447: Tensor[(512), float32], Primitive=1) -> Tensor[(1, 7, 7, 512), float32] { | |
| %987 = nn.conv2d(%p0154, %p1135, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| %988 = negative(%p261) /* ty=Tensor[(512), float32] */; | |
| %989 = multiply(%988, %p349) /* ty=Tensor[(512), float32] */; | |
| %990 = add(%989, %p447) /* ty=Tensor[(512), float32] */; | |
| %991 = expand_dims(%990, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 512), float32] */; | |
| %992 = add(%987, %991) /* ty=Tensor[(1, 7, 7, 512), float32] */; | |
| nn.relu(%992) /* ty=Tensor[(1, 7, 7, 512), float32] */ | |
| }; | |
| %994 = (%x303, %x307, %stage4_unit3_bn3_moving_mean, %x305, %stage4_unit3_bn3_beta); | |
| %995 = (%tensor_0154,); | |
| let %x308: () = vm.invoke_tvm_op(%993, %994, %995) /* ty=() */; | |
| let %x309: Tensor[(1, 7, 7, 512), float32] = %tensor_0154; | |
| let %storage_0155: Storage[] = memory.alloc_storage(4194304 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][155]) /* ty=Storage[] */; | |
| let %tensor_0155: Tensor[(1, 1, 512, 2048), float32] = memory.alloc_tensor(%storage_0155, 0 /* ty=int64 */, meta[relay.Constant][155] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][155]) /* ty=Tensor[(1, 1, 512, 2048), float32] */; | |
| %996 = fn (%p0155: Tensor[(2048, 512, 1, 1), float32], Primitive=1) -> Tensor[(1, 1, 512, 2048), float32] { | |
| layout_transform(%p0155, src_layout="OIHW", dst_layout="HWIO") /* ty=Tensor[(1, 1, 512, 2048), float32] */ | |
| }; | |
| %997 = (%stage4_unit3_conv3_weight,); | |
| %998 = (%tensor_0155,); | |
| let %x310: () = vm.invoke_tvm_op(%996, %997, %998) /* ty=() */; | |
| let %x311: Tensor[(1, 1, 512, 2048), float32] = %tensor_0155; | |
| let %storage_0156: Storage[] = memory.alloc_storage(8192 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][156]) /* ty=Storage[] */; | |
| let %tensor_0156: Tensor[(2048), float32] = memory.alloc_tensor(%storage_0156, 0 /* ty=int64 */, meta[relay.Constant][156] /* ty=Tensor[(1), int64] */, meta[relay.attrs.AllocTensorAttrs][156]) /* ty=Tensor[(2048), float32] */; | |
| %1002 = fn (%p0156: Tensor[(2048), float32], %p1136: Tensor[(2048), float32], Primitive=1) -> Tensor[(2048), float32] { | |
| %999 = add(%p0156, 2e-05f /* ty=float32 */) /* ty=Tensor[(2048), float32] */; | |
| %1000 = sqrt(%999) /* ty=Tensor[(2048), float32] */; | |
| %1001 = divide(1f /* ty=float32 */, %1000) /* ty=Tensor[(2048), float32] */; | |
| multiply(%1001, %p1136) /* ty=Tensor[(2048), float32] */ | |
| }; | |
| %1003 = (%bn1_moving_var, %bn1_gamma); | |
| %1004 = (%tensor_0156,); | |
| let %x312: () = vm.invoke_tvm_op(%1002, %1003, %1004) /* ty=() */; | |
| let %x313: Tensor[(2048), float32] = %tensor_0156; | |
| let %storage_0157: Storage[] = memory.alloc_storage(401408 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][157]) /* ty=Storage[] */; | |
| let %tensor_0157: Tensor[(1, 7, 7, 2048), float32] = memory.alloc_tensor(%storage_0157, 0 /* ty=int64 */, meta[relay.Constant][157] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][157]) /* ty=Tensor[(1, 7, 7, 2048), float32] */; | |
| %1014 = fn (%p0157: Tensor[(1, 7, 7, 512), float32], %p1137: Tensor[(1, 1, 512, 2048), float32], %p262: Tensor[(1, 7, 7, 2048), float32], %p350: Tensor[(2048), float32], %p448: Tensor[(2048), float32], %p53: Tensor[(2048), float32], Primitive=1) -> Tensor[(1, 7, 7, 2048), float32] { | |
| %1005 = nn.conv2d(%p0157, %p1137, padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1], data_layout="NHWC", kernel_layout="HWIO") /* ty=Tensor[(1, 7, 7, 2048), float32] */; | |
| %1006 = add(%1005, %p262) /* ty=Tensor[(1, 7, 7, 2048), float32] */; | |
| %1007 = expand_dims(%p350, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 2048), float32] */; | |
| %1008 = multiply(%1006, %1007) /* ty=Tensor[(1, 7, 7, 2048), float32] */; | |
| %1009 = negative(%p448) /* ty=Tensor[(2048), float32] */; | |
| %1010 = multiply(%1009, %p350) /* ty=Tensor[(2048), float32] */; | |
| %1011 = add(%1010, %p53) /* ty=Tensor[(2048), float32] */; | |
| %1012 = expand_dims(%1011, axis=0, num_newaxis=3) /* ty=Tensor[(1, 1, 1, 2048), float32] */; | |
| %1013 = add(%1008, %1012) /* ty=Tensor[(1, 7, 7, 2048), float32] */; | |
| nn.relu(%1013) /* ty=Tensor[(1, 7, 7, 2048), float32] */ | |
| }; | |
| %1015 = (%x309, %x311, %x295, %x313, %bn1_moving_mean, %bn1_beta); | |
| %1016 = (%tensor_0157,); | |
| let %x314: () = vm.invoke_tvm_op(%1014, %1015, %1016) /* ty=() */; | |
| let %x315: Tensor[(1, 7, 7, 2048), float32] = %tensor_0157; | |
| let %storage_0158: Storage[] = memory.alloc_storage(8192 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][158]) /* ty=Storage[] */; | |
| let %tensor_0158: Tensor[(1, 1, 1, 2048), float32] = memory.alloc_tensor(%storage_0158, 0 /* ty=int64 */, meta[relay.Constant][158] /* ty=Tensor[(4), int64] */, meta[relay.attrs.AllocTensorAttrs][158]) /* ty=Tensor[(1, 1, 1, 2048), float32] */; | |
| %1017 = fn (%p0158: Tensor[(1, 7, 7, 2048), float32], Primitive=1) -> Tensor[(1, 1, 1, 2048), float32] { | |
| nn.global_avg_pool2d(%p0158, layout="NHWC") /* ty=Tensor[(1, 1, 1, 2048), float32] */ | |
| }; | |
| %1018 = (%x315,); | |
| %1019 = (%tensor_0158,); | |
| let %x316: () = vm.invoke_tvm_op(%1017, %1018, %1019) /* ty=() */; | |
| let %x317: Tensor[(1, 1, 1, 2048), float32] = %tensor_0158; | |
| let %storage_0159: Storage[] = memory.alloc_storage(8192 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][159]) /* ty=Storage[] */; | |
| let %tensor_0159: Tensor[(1, 2048), float32] = memory.alloc_tensor(%storage_0159, 0 /* ty=int64 */, meta[relay.Constant][159] /* ty=Tensor[(2), int64] */, meta[relay.attrs.AllocTensorAttrs][159]) /* ty=Tensor[(1, 2048), float32] */; | |
| %1021 = fn (%p0159: Tensor[(1, 1, 1, 2048), float32], Primitive=1) -> Tensor[(1, 2048), float32] { | |
| %1020 = layout_transform(%p0159, src_layout="NHWC", dst_layout="NCHW") /* ty=Tensor[(1, 2048, 1, 1), float32] */; | |
| nn.batch_flatten(%1020) /* ty=Tensor[(1, 2048), float32] */ | |
| }; | |
| %1022 = (%x317,); | |
| %1023 = (%tensor_0159,); | |
| let %x318: () = vm.invoke_tvm_op(%1021, %1022, %1023) /* ty=() */; | |
| let %x319: Tensor[(1, 2048), float32] = %tensor_0159; | |
| let %storage_0160: Storage[] = memory.alloc_storage(8192000 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][160]) /* ty=Storage[] */; | |
| let %tensor_0160: Tensor[(100, 2048, 10), float32] = memory.alloc_tensor(%storage_0160, 0 /* ty=int64 */, meta[relay.Constant][160] /* ty=Tensor[(3), int64] */, meta[relay.attrs.AllocTensorAttrs][160]) /* ty=Tensor[(100, 2048, 10), float32] */; | |
| %1024 = fn (%p0160: Tensor[(1000, 2048), float32], Primitive=1) -> Tensor[(100, 2048, 10), float32] { | |
| layout_transform(%p0160, src_layout="NK", dst_layout="NK10n") /* ty=Tensor[(100, 2048, 10), float32] */ | |
| }; | |
| %1025 = (%fc1_weight,); | |
| %1026 = (%tensor_0160,); | |
| let %x320: () = vm.invoke_tvm_op(%1024, %1025, %1026) /* ty=() */; | |
| let %x321: Tensor[(100, 2048, 10), float32] = %tensor_0160; | |
| let %storage_0161: Storage[] = memory.alloc_storage(4000 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][161]) /* ty=Storage[] */; | |
| let %tensor_0161: Tensor[(1, 1000), float32] = memory.alloc_tensor(%storage_0161, 0 /* ty=int64 */, meta[relay.Constant][161] /* ty=Tensor[(2), int64] */, meta[relay.attrs.AllocTensorAttrs][161]) /* ty=Tensor[(1, 1000), float32] */; | |
| %1028 = fn (%p0161: Tensor[(1, 2048), float32], %p1138: Tensor[(100, 2048, 10), float32], %p263: Tensor[(1000), float32], Primitive=1) -> Tensor[(1, 1000), float32] { | |
| %1027 = nn.contrib_dense_pack(%p0161, %p1138, units=None, out_dtype="float32") /* ty=Tensor[(1, 1000), float32] */; | |
| add(%1027, %p263) /* ty=Tensor[(1, 1000), float32] */ | |
| }; | |
| %1029 = (%x319, %x321, %fc1_bias); | |
| %1030 = (%tensor_0161,); | |
| let %x322: () = vm.invoke_tvm_op(%1028, %1029, %1030) /* ty=() */; | |
| let %x323: Tensor[(1, 1000), float32] = %tensor_0161; | |
| let %storage_0162: Storage[] = memory.alloc_storage(4000 /* ty=int64 */, 64 /* ty=int64 */, meta[relay.attrs.AllocStorageAttrs][162]) /* ty=Storage[] */; | |
| let %tensor_0162: Tensor[(1, 1000), float32] = memory.alloc_tensor(%storage_0162, 0 /* ty=int64 */, meta[relay.Constant][162] /* ty=Tensor[(2), int64] */, meta[relay.attrs.AllocTensorAttrs][162]) /* ty=Tensor[(1, 1000), float32] */; | |
| %1031 = fn (%p0162: Tensor[(1, 1000), float32], Primitive=1) -> Tensor[(1, 1000), float32] { | |
| nn.softmax(%p0162) /* ty=Tensor[(1, 1000), float32] */ | |
| }; | |
| %1032 = (%x323,); | |
| %1033 = (%tensor_0162,); | |
| let %x324: () = vm.invoke_tvm_op(%1031, %1032, %1033) /* ty=() */; | |
| let %x325: Tensor[(1, 1000), float32] = %tensor_0162; | |
| %x325 | |
| } | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment