saik0s commited on
Commit
0020516
1 Parent(s): 82d6af2

Adds new models

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .DS_Store +0 -0
  2. ggml-base-encoder.mlmodelc.zip +1 -1
  3. ggml-large-encoder.mlmodelc.zip → ggml-base-encoder.mlmodelc/analytics/coremldata.bin +2 -2
  4. ggml-large.bin → ggml-base-encoder.mlmodelc/coremldata.bin +2 -2
  5. ggml-base-encoder.mlmodelc/metadata.json +64 -0
  6. ggml-base-encoder.mlmodelc/model.mil +393 -0
  7. ggml-base-encoder.mlmodelc/weights/weight.bin +3 -0
  8. ggml-base.en-encoder.mlmodelc.zip +1 -1
  9. ggml-base.en-encoder.mlmodelc/analytics/coremldata.bin +3 -0
  10. ggml-base.en-encoder.mlmodelc/coremldata.bin +3 -0
  11. ggml-base.en-encoder.mlmodelc/metadata.json +64 -0
  12. ggml-base.en-encoder.mlmodelc/model.mil +393 -0
  13. ggml-base.en-encoder.mlmodelc/weights/weight.bin +3 -0
  14. ggml-large-v1-encoder.mlmodelc.zip +2 -2
  15. ggml-large-v1-encoder.mlmodelc/analytics/coremldata.bin +3 -0
  16. ggml-large-v1-encoder.mlmodelc/coremldata.bin +3 -0
  17. ggml-large-v1-encoder.mlmodelc/metadata.json +65 -0
  18. ggml-large-v1-encoder.mlmodelc/model.mil +0 -0
  19. ggml-large-v1-encoder.mlmodelc/weights/weight.bin +3 -0
  20. ggml-large-v2-encoder.mlmodelc.zip +2 -2
  21. ggml-large-v2-encoder.mlmodelc/analytics/coremldata.bin +3 -0
  22. ggml-large-v2-encoder.mlmodelc/coremldata.bin +3 -0
  23. ggml-large-v2-encoder.mlmodelc/metadata.json +65 -0
  24. ggml-large-v2-encoder.mlmodelc/model.mil +0 -0
  25. ggml-large-v2-encoder.mlmodelc/weights/weight.bin +3 -0
  26. ggml-large-v3-encoder.mlmodelc.zip +3 -0
  27. ggml-large-v3-encoder.mlmodelc/analytics/coremldata.bin +3 -0
  28. ggml-large-v3-encoder.mlmodelc/coremldata.bin +3 -0
  29. ggml-large-v3-encoder.mlmodelc/metadata.json +65 -0
  30. ggml-large-v3-encoder.mlmodelc/model.mil +0 -0
  31. ggml-large-v3-encoder.mlmodelc/weights/weight.bin +3 -0
  32. ggml-medium-encoder.mlmodelc.zip +1 -1
  33. ggml-medium-encoder.mlmodelc/analytics/coremldata.bin +3 -0
  34. ggml-medium-encoder.mlmodelc/coremldata.bin +3 -0
  35. ggml-medium-encoder.mlmodelc/metadata.json +64 -0
  36. ggml-medium-encoder.mlmodelc/model.mil +0 -0
  37. ggml-medium-encoder.mlmodelc/weights/weight.bin +3 -0
  38. ggml-medium.en-encoder.mlmodelc.zip +1 -1
  39. ggml-medium.en-encoder.mlmodelc/analytics/coremldata.bin +3 -0
  40. ggml-medium.en-encoder.mlmodelc/coremldata.bin +3 -0
  41. ggml-medium.en-encoder.mlmodelc/metadata.json +64 -0
  42. ggml-medium.en-encoder.mlmodelc/model.mil +0 -0
  43. ggml-medium.en-encoder.mlmodelc/weights/weight.bin +3 -0
  44. ggml-small-encoder.mlmodelc.zip +1 -1
  45. ggml-small-encoder.mlmodelc/analytics/coremldata.bin +3 -0
  46. ggml-small-encoder.mlmodelc/coremldata.bin +3 -0
  47. ggml-small-encoder.mlmodelc/metadata.json +64 -0
  48. ggml-small-encoder.mlmodelc/model.mil +0 -0
  49. ggml-small-encoder.mlmodelc/weights/weight.bin +3 -0
  50. ggml-small.en-encoder.mlmodelc.zip +1 -1
.DS_Store ADDED
Binary file (14.3 kB). View file
 
ggml-base-encoder.mlmodelc.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e6ab77041942572f239b5b602f8aaa1c3ed29d73e3d8f20abea03a773541089
3
  size 37922638
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cf9278264a9e0f56a6649ca10ab2cde81f6bfe86133a1f08de15006c269cd39
3
  size 37922638
ggml-large-encoder.mlmodelc.zip → ggml-base-encoder.mlmodelc/analytics/coremldata.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c65d28737f51d09dff3b9da9d26c2ab6f5d82857c9b049ce383d64c2502a5541
3
- size 1174643458
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2441ae34fc7d12946dba7b63379063e856ffc7c3e11ba5f7533efb1450562ca6
3
+ size 207
ggml-large.bin → ggml-base-encoder.mlmodelc/coremldata.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a423fe4d40c82774b6af34115b8b935f34152246eb19e80e376071d3f999487
3
- size 3094623691
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05fe28591b40616fa0c34ad7b853133623f5300923ec812acb11459c411acf3b
3
+ size 149
ggml-base-encoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float32",
10
+ "formattedType" : "MultiArray (Float32)",
11
+ "shortDescription" : "",
12
+ "shape" : "[]",
13
+ "name" : "output",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 6,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Linear" : 36,
23
+ "Matmul" : 12,
24
+ "Cast" : 2,
25
+ "Conv" : 2,
26
+ "Softmax" : 6,
27
+ "Add" : 13,
28
+ "LayerNorm" : 13,
29
+ "Mul" : 12,
30
+ "Transpose" : 25,
31
+ "Gelu" : 8,
32
+ "Reshape" : 24
33
+ },
34
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
35
+ "isUpdatable" : "0",
36
+ "availability" : {
37
+ "macOS" : "12.0",
38
+ "tvOS" : "15.0",
39
+ "watchOS" : "8.0",
40
+ "iOS" : "15.0",
41
+ "macCatalyst" : "15.0"
42
+ },
43
+ "modelType" : {
44
+ "name" : "MLModelType_mlProgram"
45
+ },
46
+ "userDefinedMetadata" : {
47
+
48
+ },
49
+ "inputSchema" : [
50
+ {
51
+ "hasShapeFlexibility" : "0",
52
+ "isOptional" : "0",
53
+ "dataType" : "Float32",
54
+ "formattedType" : "MultiArray (Float32 1 × 80 × 3000)",
55
+ "shortDescription" : "",
56
+ "shape" : "[1, 80, 3000]",
57
+ "name" : "logmel_data",
58
+ "type" : "MultiArray"
59
+ }
60
+ ],
61
+ "generatedClassName" : "coreml_encoder_base",
62
+ "method" : "predict"
63
+ }
64
+ ]
ggml-base-encoder.mlmodelc/model.mil ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "4.28.4"}, {"coremlc-version", "1436.100.10"}})]
3
+ {
4
+ func main<ios15>(tensor<fp32, [1, 80, 3000]> logmel_data) {
5
+ tensor<int32, []> var_20 = const()[name = tensor<string, []>("op_20"), val = tensor<int32, []>(1)];
6
+ tensor<int32, [1]> var_28 = const()[name = tensor<string, []>("op_28"), val = tensor<int32, [1]>([1])];
7
+ tensor<int32, [1]> var_30 = const()[name = tensor<string, []>("op_30"), val = tensor<int32, [1]>([1])];
8
+ tensor<string, []> var_32_pad_type_0 = const()[name = tensor<string, []>("op_32_pad_type_0"), val = tensor<string, []>("custom")];
9
+ tensor<int32, [2]> var_32_pad_0 = const()[name = tensor<string, []>("op_32_pad_0"), val = tensor<int32, [2]>([1, 1])];
10
+ tensor<string, []> logmel_data_to_fp16_dtype_0 = const()[name = tensor<string, []>("logmel_data_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
11
+ tensor<fp16, [512, 80, 3]> weight_3_to_fp16 = const()[name = tensor<string, []>("weight_3_to_fp16"), val = tensor<fp16, [512, 80, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
12
+ tensor<fp16, [512]> bias_3_to_fp16 = const()[name = tensor<string, []>("bias_3_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(245888)))];
13
+ tensor<fp16, [1, 80, 3000]> cast_187 = cast(dtype = logmel_data_to_fp16_dtype_0, x = logmel_data);
14
+ tensor<fp16, [1, 512, 3000]> var_32_cast = conv(bias = bias_3_to_fp16, dilations = var_30, groups = var_20, pad = var_32_pad_0, pad_type = var_32_pad_type_0, strides = var_28, weight = weight_3_to_fp16, x = cast_187);
15
+ tensor<string, []> input_1_mode_0 = const()[name = tensor<string, []>("input_1_mode_0"), val = tensor<string, []>("EXACT")];
16
+ tensor<fp16, [1, 512, 3000]> input_1_cast = gelu(mode = input_1_mode_0, x = var_32_cast);
17
+ tensor<int32, []> var_36 = const()[name = tensor<string, []>("op_36"), val = tensor<int32, []>(1)];
18
+ tensor<int32, [1]> var_45 = const()[name = tensor<string, []>("op_45"), val = tensor<int32, [1]>([2])];
19
+ tensor<int32, [1]> var_47 = const()[name = tensor<string, []>("op_47"), val = tensor<int32, [1]>([1])];
20
+ tensor<string, []> var_49_pad_type_0 = const()[name = tensor<string, []>("op_49_pad_type_0"), val = tensor<string, []>("custom")];
21
+ tensor<int32, [2]> var_49_pad_0 = const()[name = tensor<string, []>("op_49_pad_0"), val = tensor<int32, [2]>([1, 1])];
22
+ tensor<fp16, [512, 512, 3]> weight_7_to_fp16 = const()[name = tensor<string, []>("weight_7_to_fp16"), val = tensor<fp16, [512, 512, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(246976)))];
23
+ tensor<fp16, [512]> bias_7_to_fp16 = const()[name = tensor<string, []>("bias_7_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1819904)))];
24
+ tensor<fp16, [1, 512, 1500]> var_49_cast = conv(bias = bias_7_to_fp16, dilations = var_47, groups = var_36, pad = var_49_pad_0, pad_type = var_49_pad_type_0, strides = var_45, weight = weight_7_to_fp16, x = input_1_cast);
25
+ tensor<string, []> x_3_mode_0 = const()[name = tensor<string, []>("x_3_mode_0"), val = tensor<string, []>("EXACT")];
26
+ tensor<fp16, [1, 512, 1500]> x_3_cast = gelu(mode = x_3_mode_0, x = var_49_cast);
27
+ tensor<int32, [3]> var_54 = const()[name = tensor<string, []>("op_54"), val = tensor<int32, [3]>([0, 2, 1])];
28
+ tensor<fp16, [1500, 512]> positional_embedding_to_fp16 = const()[name = tensor<string, []>("positional_embedding_to_fp16"), val = tensor<fp16, [1500, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1820992)))];
29
+ tensor<fp16, [1, 1500, 512]> transpose_48 = transpose(perm = var_54, x = x_3_cast);
30
+ tensor<fp16, [1, 1500, 512]> var_57_cast = add(x = transpose_48, y = positional_embedding_to_fp16);
31
+ tensor<int32, []> var_70 = const()[name = tensor<string, []>("op_70"), val = tensor<int32, []>(-1)];
32
+ tensor<int32, [1]> var_87_axes_0 = const()[name = tensor<string, []>("op_87_axes_0"), val = tensor<int32, [1]>([-1])];
33
+ tensor<fp16, [512]> blocks_0_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3357056)))];
34
+ tensor<fp16, [512]> blocks_0_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3358144)))];
35
+ tensor<fp16, []> var_76_to_fp16 = const()[name = tensor<string, []>("op_76_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
36
+ tensor<fp16, [1, 1500, 512]> var_87_cast = layer_norm(axes = var_87_axes_0, beta = blocks_0_attn_ln_bias_to_fp16, epsilon = var_76_to_fp16, gamma = blocks_0_attn_ln_weight_to_fp16, x = var_57_cast);
37
+ tensor<fp16, [512, 512]> var_98_to_fp16 = const()[name = tensor<string, []>("op_98_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3359232)))];
38
+ tensor<fp16, [512]> var_99_to_fp16 = const()[name = tensor<string, []>("op_99_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3883584)))];
39
+ tensor<fp16, [1, 1500, 512]> q_1_cast = linear(bias = var_99_to_fp16, weight = var_98_to_fp16, x = var_87_cast);
40
+ tensor<fp16, [512, 512]> var_102_to_fp16 = const()[name = tensor<string, []>("op_102_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3884672)))];
41
+ tensor<fp16, [512]> k_1_bias_0_to_fp16 = const()[name = tensor<string, []>("k_1_bias_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4409024)))];
42
+ tensor<fp16, [1, 1500, 512]> k_1_cast = linear(bias = k_1_bias_0_to_fp16, weight = var_102_to_fp16, x = var_87_cast);
43
+ tensor<fp16, [512, 512]> var_106_to_fp16 = const()[name = tensor<string, []>("op_106_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4410112)))];
44
+ tensor<fp16, [512]> var_107_to_fp16 = const()[name = tensor<string, []>("op_107_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4934464)))];
45
+ tensor<fp16, [1, 1500, 512]> v_1_cast = linear(bias = var_107_to_fp16, weight = var_106_to_fp16, x = var_87_cast);
46
+ tensor<int32, [4]> var_115 = const()[name = tensor<string, []>("op_115"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
47
+ tensor<fp16, [1, 1500, 8, 64]> var_116_cast = reshape(shape = var_115, x = q_1_cast);
48
+ tensor<fp16, [1, 1, 1, 1]> const_42_to_fp16 = const()[name = tensor<string, []>("const_42_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
49
+ tensor<fp16, [1, 1500, 8, 64]> q_3_cast = mul(x = var_116_cast, y = const_42_to_fp16);
50
+ tensor<int32, [4]> var_122 = const()[name = tensor<string, []>("op_122"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
51
+ tensor<fp16, [1, 1500, 8, 64]> var_123_cast = reshape(shape = var_122, x = k_1_cast);
52
+ tensor<fp16, [1, 1, 1, 1]> const_43_to_fp16 = const()[name = tensor<string, []>("const_43_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
53
+ tensor<fp16, [1, 1500, 8, 64]> k_3_cast = mul(x = var_123_cast, y = const_43_to_fp16);
54
+ tensor<int32, [4]> var_129 = const()[name = tensor<string, []>("op_129"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
55
+ tensor<fp16, [1, 1500, 8, 64]> var_130_cast = reshape(shape = var_129, x = v_1_cast);
56
+ tensor<int32, [4]> var_131 = const()[name = tensor<string, []>("op_131"), val = tensor<int32, [4]>([0, 2, 1, 3])];
57
+ tensor<bool, []> qk_1_transpose_x_0 = const()[name = tensor<string, []>("qk_1_transpose_x_0"), val = tensor<bool, []>(false)];
58
+ tensor<bool, []> qk_1_transpose_y_0 = const()[name = tensor<string, []>("qk_1_transpose_y_0"), val = tensor<bool, []>(false)];
59
+ tensor<int32, [4]> transpose_12_perm_0 = const()[name = tensor<string, []>("transpose_12_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
60
+ tensor<int32, [4]> transpose_13_perm_0 = const()[name = tensor<string, []>("transpose_13_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
61
+ tensor<fp16, [1, 8, 64, 1500]> transpose_45 = transpose(perm = transpose_13_perm_0, x = k_3_cast);
62
+ tensor<fp16, [1, 8, 1500, 64]> transpose_46 = transpose(perm = transpose_12_perm_0, x = q_3_cast);
63
+ tensor<fp16, [1, 8, 1500, 1500]> qk_1_cast = matmul(transpose_x = qk_1_transpose_x_0, transpose_y = qk_1_transpose_y_0, x = transpose_46, y = transpose_45);
64
+ tensor<fp16, [1, 8, 1500, 1500]> var_135_cast = softmax(axis = var_70, x = qk_1_cast);
65
+ tensor<bool, []> var_137_transpose_x_0 = const()[name = tensor<string, []>("op_137_transpose_x_0"), val = tensor<bool, []>(false)];
66
+ tensor<bool, []> var_137_transpose_y_0 = const()[name = tensor<string, []>("op_137_transpose_y_0"), val = tensor<bool, []>(false)];
67
+ tensor<fp16, [1, 8, 1500, 64]> transpose_47 = transpose(perm = var_131, x = var_130_cast);
68
+ tensor<fp16, [1, 8, 1500, 64]> var_137_cast = matmul(transpose_x = var_137_transpose_x_0, transpose_y = var_137_transpose_y_0, x = var_135_cast, y = transpose_47);
69
+ tensor<int32, [4]> var_138 = const()[name = tensor<string, []>("op_138"), val = tensor<int32, [4]>([0, 2, 1, 3])];
70
+ tensor<int32, [3]> concat_0 = const()[name = tensor<string, []>("concat_0"), val = tensor<int32, [3]>([1, 1500, 512])];
71
+ tensor<fp16, [1, 1500, 8, 64]> transpose_44 = transpose(perm = var_138, x = var_137_cast);
72
+ tensor<fp16, [1, 1500, 512]> x_11_cast = reshape(shape = concat_0, x = transpose_44);
73
+ tensor<fp16, [512, 512]> var_143_to_fp16 = const()[name = tensor<string, []>("op_143_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4935552)))];
74
+ tensor<fp16, [512]> var_144_to_fp16 = const()[name = tensor<string, []>("op_144_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5459904)))];
75
+ tensor<fp16, [1, 1500, 512]> var_145_cast = linear(bias = var_144_to_fp16, weight = var_143_to_fp16, x = x_11_cast);
76
+ tensor<fp16, [1, 1500, 512]> x_13_cast = add(x = var_57_cast, y = var_145_cast);
77
+ tensor<int32, [1]> var_151_axes_0 = const()[name = tensor<string, []>("op_151_axes_0"), val = tensor<int32, [1]>([-1])];
78
+ tensor<fp16, [512]> blocks_0_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5460992)))];
79
+ tensor<fp16, [512]> blocks_0_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5462080)))];
80
+ tensor<fp16, [1, 1500, 512]> var_151_cast = layer_norm(axes = var_151_axes_0, beta = blocks_0_mlp_ln_bias_to_fp16, epsilon = var_76_to_fp16, gamma = blocks_0_mlp_ln_weight_to_fp16, x = x_13_cast);
81
+ tensor<fp16, [2048, 512]> var_160_to_fp16 = const()[name = tensor<string, []>("op_160_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5463168)))];
82
+ tensor<fp16, [2048]> var_161_to_fp16 = const()[name = tensor<string, []>("op_161_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(7560384)))];
83
+ tensor<fp16, [1, 1500, 2048]> input_9_cast = linear(bias = var_161_to_fp16, weight = var_160_to_fp16, x = var_151_cast);
84
+ tensor<string, []> x_17_mode_0 = const()[name = tensor<string, []>("x_17_mode_0"), val = tensor<string, []>("EXACT")];
85
+ tensor<fp16, [1, 1500, 2048]> x_17_cast = gelu(mode = x_17_mode_0, x = input_9_cast);
86
+ tensor<fp16, [512, 2048]> var_166_to_fp16 = const()[name = tensor<string, []>("op_166_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(7564544)))];
87
+ tensor<fp16, [512]> var_167_to_fp16 = const()[name = tensor<string, []>("op_167_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9661760)))];
88
+ tensor<fp16, [1, 1500, 512]> var_168_cast = linear(bias = var_167_to_fp16, weight = var_166_to_fp16, x = x_17_cast);
89
+ tensor<fp16, [1, 1500, 512]> x_19_cast = add(x = x_13_cast, y = var_168_cast);
90
+ tensor<int32, []> var_177 = const()[name = tensor<string, []>("op_177"), val = tensor<int32, []>(-1)];
91
+ tensor<int32, [1]> var_194_axes_0 = const()[name = tensor<string, []>("op_194_axes_0"), val = tensor<int32, [1]>([-1])];
92
+ tensor<fp16, [512]> blocks_1_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9662848)))];
93
+ tensor<fp16, [512]> blocks_1_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9663936)))];
94
+ tensor<fp16, []> var_183_to_fp16 = const()[name = tensor<string, []>("op_183_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
95
+ tensor<fp16, [1, 1500, 512]> var_194_cast = layer_norm(axes = var_194_axes_0, beta = blocks_1_attn_ln_bias_to_fp16, epsilon = var_183_to_fp16, gamma = blocks_1_attn_ln_weight_to_fp16, x = x_19_cast);
96
+ tensor<fp16, [512, 512]> var_205_to_fp16 = const()[name = tensor<string, []>("op_205_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9665024)))];
97
+ tensor<fp16, [512]> var_206_to_fp16 = const()[name = tensor<string, []>("op_206_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10189376)))];
98
+ tensor<fp16, [1, 1500, 512]> q_5_cast = linear(bias = var_206_to_fp16, weight = var_205_to_fp16, x = var_194_cast);
99
+ tensor<fp16, [512, 512]> var_209_to_fp16 = const()[name = tensor<string, []>("op_209_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10190464)))];
100
+ tensor<fp16, [512]> k_5_bias_0_to_fp16 = const()[name = tensor<string, []>("k_5_bias_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10714816)))];
101
+ tensor<fp16, [1, 1500, 512]> k_5_cast = linear(bias = k_5_bias_0_to_fp16, weight = var_209_to_fp16, x = var_194_cast);
102
+ tensor<fp16, [512, 512]> var_213_to_fp16 = const()[name = tensor<string, []>("op_213_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10715904)))];
103
+ tensor<fp16, [512]> var_214_to_fp16 = const()[name = tensor<string, []>("op_214_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11240256)))];
104
+ tensor<fp16, [1, 1500, 512]> v_5_cast = linear(bias = var_214_to_fp16, weight = var_213_to_fp16, x = var_194_cast);
105
+ tensor<int32, [4]> var_222 = const()[name = tensor<string, []>("op_222"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
106
+ tensor<fp16, [1, 1500, 8, 64]> var_223_cast = reshape(shape = var_222, x = q_5_cast);
107
+ tensor<fp16, [1, 1, 1, 1]> const_44_to_fp16 = const()[name = tensor<string, []>("const_44_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
108
+ tensor<fp16, [1, 1500, 8, 64]> q_7_cast = mul(x = var_223_cast, y = const_44_to_fp16);
109
+ tensor<int32, [4]> var_229 = const()[name = tensor<string, []>("op_229"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
110
+ tensor<fp16, [1, 1500, 8, 64]> var_230_cast = reshape(shape = var_229, x = k_5_cast);
111
+ tensor<fp16, [1, 1, 1, 1]> const_45_to_fp16 = const()[name = tensor<string, []>("const_45_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
112
+ tensor<fp16, [1, 1500, 8, 64]> k_7_cast = mul(x = var_230_cast, y = const_45_to_fp16);
113
+ tensor<int32, [4]> var_236 = const()[name = tensor<string, []>("op_236"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
114
+ tensor<fp16, [1, 1500, 8, 64]> var_237_cast = reshape(shape = var_236, x = v_5_cast);
115
+ tensor<int32, [4]> var_238 = const()[name = tensor<string, []>("op_238"), val = tensor<int32, [4]>([0, 2, 1, 3])];
116
+ tensor<bool, []> qk_3_transpose_x_0 = const()[name = tensor<string, []>("qk_3_transpose_x_0"), val = tensor<bool, []>(false)];
117
+ tensor<bool, []> qk_3_transpose_y_0 = const()[name = tensor<string, []>("qk_3_transpose_y_0"), val = tensor<bool, []>(false)];
118
+ tensor<int32, [4]> transpose_14_perm_0 = const()[name = tensor<string, []>("transpose_14_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
119
+ tensor<int32, [4]> transpose_15_perm_0 = const()[name = tensor<string, []>("transpose_15_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
120
+ tensor<fp16, [1, 8, 64, 1500]> transpose_41 = transpose(perm = transpose_15_perm_0, x = k_7_cast);
121
+ tensor<fp16, [1, 8, 1500, 64]> transpose_42 = transpose(perm = transpose_14_perm_0, x = q_7_cast);
122
+ tensor<fp16, [1, 8, 1500, 1500]> qk_3_cast = matmul(transpose_x = qk_3_transpose_x_0, transpose_y = qk_3_transpose_y_0, x = transpose_42, y = transpose_41);
123
+ tensor<fp16, [1, 8, 1500, 1500]> var_242_cast = softmax(axis = var_177, x = qk_3_cast);
124
+ tensor<bool, []> var_244_transpose_x_0 = const()[name = tensor<string, []>("op_244_transpose_x_0"), val = tensor<bool, []>(false)];
125
+ tensor<bool, []> var_244_transpose_y_0 = const()[name = tensor<string, []>("op_244_transpose_y_0"), val = tensor<bool, []>(false)];
126
+ tensor<fp16, [1, 8, 1500, 64]> transpose_43 = transpose(perm = var_238, x = var_237_cast);
127
+ tensor<fp16, [1, 8, 1500, 64]> var_244_cast = matmul(transpose_x = var_244_transpose_x_0, transpose_y = var_244_transpose_y_0, x = var_242_cast, y = transpose_43);
128
+ tensor<int32, [4]> var_245 = const()[name = tensor<string, []>("op_245"), val = tensor<int32, [4]>([0, 2, 1, 3])];
129
+ tensor<int32, [3]> concat_1 = const()[name = tensor<string, []>("concat_1"), val = tensor<int32, [3]>([1, 1500, 512])];
130
+ tensor<fp16, [1, 1500, 8, 64]> transpose_40 = transpose(perm = var_245, x = var_244_cast);
131
+ tensor<fp16, [1, 1500, 512]> x_23_cast = reshape(shape = concat_1, x = transpose_40);
132
+ tensor<fp16, [512, 512]> var_250_to_fp16 = const()[name = tensor<string, []>("op_250_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11241344)))];
133
+ tensor<fp16, [512]> var_251_to_fp16 = const()[name = tensor<string, []>("op_251_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11765696)))];
134
+ tensor<fp16, [1, 1500, 512]> var_252_cast = linear(bias = var_251_to_fp16, weight = var_250_to_fp16, x = x_23_cast);
135
+ tensor<fp16, [1, 1500, 512]> x_25_cast = add(x = x_19_cast, y = var_252_cast);
136
+ tensor<int32, [1]> var_258_axes_0 = const()[name = tensor<string, []>("op_258_axes_0"), val = tensor<int32, [1]>([-1])];
137
+ tensor<fp16, [512]> blocks_1_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11766784)))];
138
+ tensor<fp16, [512]> blocks_1_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11767872)))];
139
+ tensor<fp16, [1, 1500, 512]> var_258_cast = layer_norm(axes = var_258_axes_0, beta = blocks_1_mlp_ln_bias_to_fp16, epsilon = var_183_to_fp16, gamma = blocks_1_mlp_ln_weight_to_fp16, x = x_25_cast);
140
+ tensor<fp16, [2048, 512]> var_267_to_fp16 = const()[name = tensor<string, []>("op_267_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11768960)))];
141
+ tensor<fp16, [2048]> var_268_to_fp16 = const()[name = tensor<string, []>("op_268_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13866176)))];
142
+ tensor<fp16, [1, 1500, 2048]> input_17_cast = linear(bias = var_268_to_fp16, weight = var_267_to_fp16, x = var_258_cast);
143
+ tensor<string, []> x_29_mode_0 = const()[name = tensor<string, []>("x_29_mode_0"), val = tensor<string, []>("EXACT")];
144
+ tensor<fp16, [1, 1500, 2048]> x_29_cast = gelu(mode = x_29_mode_0, x = input_17_cast);
145
+ tensor<fp16, [512, 2048]> var_273_to_fp16 = const()[name = tensor<string, []>("op_273_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13870336)))];
146
+ tensor<fp16, [512]> var_274_to_fp16 = const()[name = tensor<string, []>("op_274_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15967552)))];
147
+ tensor<fp16, [1, 1500, 512]> var_275_cast = linear(bias = var_274_to_fp16, weight = var_273_to_fp16, x = x_29_cast);
148
+ tensor<fp16, [1, 1500, 512]> x_31_cast = add(x = x_25_cast, y = var_275_cast);
149
+ tensor<int32, []> var_284 = const()[name = tensor<string, []>("op_284"), val = tensor<int32, []>(-1)];
150
+ tensor<int32, [1]> var_301_axes_0 = const()[name = tensor<string, []>("op_301_axes_0"), val = tensor<int32, [1]>([-1])];
151
+ tensor<fp16, [512]> blocks_2_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15968640)))];
152
+ tensor<fp16, [512]> blocks_2_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15969728)))];
153
+ tensor<fp16, []> var_290_to_fp16 = const()[name = tensor<string, []>("op_290_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
154
+ tensor<fp16, [1, 1500, 512]> var_301_cast = layer_norm(axes = var_301_axes_0, beta = blocks_2_attn_ln_bias_to_fp16, epsilon = var_290_to_fp16, gamma = blocks_2_attn_ln_weight_to_fp16, x = x_31_cast);
155
+ tensor<fp16, [512, 512]> var_312_to_fp16 = const()[name = tensor<string, []>("op_312_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15970816)))];
156
+ tensor<fp16, [512]> var_313_to_fp16 = const()[name = tensor<string, []>("op_313_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16495168)))];
157
+ tensor<fp16, [1, 1500, 512]> q_9_cast = linear(bias = var_313_to_fp16, weight = var_312_to_fp16, x = var_301_cast);
158
+ tensor<fp16, [512, 512]> var_316_to_fp16 = const()[name = tensor<string, []>("op_316_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16496256)))];
159
+ tensor<fp16, [512]> k_9_bias_0_to_fp16 = const()[name = tensor<string, []>("k_9_bias_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(17020608)))];
160
+ tensor<fp16, [1, 1500, 512]> k_9_cast = linear(bias = k_9_bias_0_to_fp16, weight = var_316_to_fp16, x = var_301_cast);
161
+ tensor<fp16, [512, 512]> var_320_to_fp16 = const()[name = tensor<string, []>("op_320_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(17021696)))];
162
+ tensor<fp16, [512]> var_321_to_fp16 = const()[name = tensor<string, []>("op_321_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(17546048)))];
163
+ tensor<fp16, [1, 1500, 512]> v_9_cast = linear(bias = var_321_to_fp16, weight = var_320_to_fp16, x = var_301_cast);
164
+ tensor<int32, [4]> var_329 = const()[name = tensor<string, []>("op_329"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
165
+ tensor<fp16, [1, 1500, 8, 64]> var_330_cast = reshape(shape = var_329, x = q_9_cast);
166
+ tensor<fp16, [1, 1, 1, 1]> const_46_to_fp16 = const()[name = tensor<string, []>("const_46_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
167
+ tensor<fp16, [1, 1500, 8, 64]> q_11_cast = mul(x = var_330_cast, y = const_46_to_fp16);
168
+ tensor<int32, [4]> var_336 = const()[name = tensor<string, []>("op_336"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
169
+ tensor<fp16, [1, 1500, 8, 64]> var_337_cast = reshape(shape = var_336, x = k_9_cast);
170
+ tensor<fp16, [1, 1, 1, 1]> const_47_to_fp16 = const()[name = tensor<string, []>("const_47_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
171
+ tensor<fp16, [1, 1500, 8, 64]> k_11_cast = mul(x = var_337_cast, y = const_47_to_fp16);
172
+ tensor<int32, [4]> var_343 = const()[name = tensor<string, []>("op_343"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
173
+ tensor<fp16, [1, 1500, 8, 64]> var_344_cast = reshape(shape = var_343, x = v_9_cast);
174
+ tensor<int32, [4]> var_345 = const()[name = tensor<string, []>("op_345"), val = tensor<int32, [4]>([0, 2, 1, 3])];
175
+ tensor<bool, []> qk_5_transpose_x_0 = const()[name = tensor<string, []>("qk_5_transpose_x_0"), val = tensor<bool, []>(false)];
176
+ tensor<bool, []> qk_5_transpose_y_0 = const()[name = tensor<string, []>("qk_5_transpose_y_0"), val = tensor<bool, []>(false)];
177
+ tensor<int32, [4]> transpose_16_perm_0 = const()[name = tensor<string, []>("transpose_16_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
178
+ tensor<int32, [4]> transpose_17_perm_0 = const()[name = tensor<string, []>("transpose_17_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
179
+ tensor<fp16, [1, 8, 64, 1500]> transpose_37 = transpose(perm = transpose_17_perm_0, x = k_11_cast);
180
+ tensor<fp16, [1, 8, 1500, 64]> transpose_38 = transpose(perm = transpose_16_perm_0, x = q_11_cast);
181
+ tensor<fp16, [1, 8, 1500, 1500]> qk_5_cast = matmul(transpose_x = qk_5_transpose_x_0, transpose_y = qk_5_transpose_y_0, x = transpose_38, y = transpose_37);
182
+ tensor<fp16, [1, 8, 1500, 1500]> var_349_cast = softmax(axis = var_284, x = qk_5_cast);
183
+ tensor<bool, []> var_351_transpose_x_0 = const()[name = tensor<string, []>("op_351_transpose_x_0"), val = tensor<bool, []>(false)];
184
+ tensor<bool, []> var_351_transpose_y_0 = const()[name = tensor<string, []>("op_351_transpose_y_0"), val = tensor<bool, []>(false)];
185
+ tensor<fp16, [1, 8, 1500, 64]> transpose_39 = transpose(perm = var_345, x = var_344_cast);
186
+ tensor<fp16, [1, 8, 1500, 64]> var_351_cast = matmul(transpose_x = var_351_transpose_x_0, transpose_y = var_351_transpose_y_0, x = var_349_cast, y = transpose_39);
187
+ tensor<int32, [4]> var_352 = const()[name = tensor<string, []>("op_352"), val = tensor<int32, [4]>([0, 2, 1, 3])];
188
+ tensor<int32, [3]> concat_2 = const()[name = tensor<string, []>("concat_2"), val = tensor<int32, [3]>([1, 1500, 512])];
189
+ tensor<fp16, [1, 1500, 8, 64]> transpose_36 = transpose(perm = var_352, x = var_351_cast);
190
+ tensor<fp16, [1, 1500, 512]> x_35_cast = reshape(shape = concat_2, x = transpose_36);
191
+ tensor<fp16, [512, 512]> var_357_to_fp16 = const()[name = tensor<string, []>("op_357_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(17547136)))];
192
+ tensor<fp16, [512]> var_358_to_fp16 = const()[name = tensor<string, []>("op_358_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(18071488)))];
193
+ tensor<fp16, [1, 1500, 512]> var_359_cast = linear(bias = var_358_to_fp16, weight = var_357_to_fp16, x = x_35_cast);
194
+ tensor<fp16, [1, 1500, 512]> x_37_cast = add(x = x_31_cast, y = var_359_cast);
195
+ tensor<int32, [1]> var_365_axes_0 = const()[name = tensor<string, []>("op_365_axes_0"), val = tensor<int32, [1]>([-1])];
196
+ tensor<fp16, [512]> blocks_2_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(18072576)))];
197
+ tensor<fp16, [512]> blocks_2_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(18073664)))];
198
+ tensor<fp16, [1, 1500, 512]> var_365_cast = layer_norm(axes = var_365_axes_0, beta = blocks_2_mlp_ln_bias_to_fp16, epsilon = var_290_to_fp16, gamma = blocks_2_mlp_ln_weight_to_fp16, x = x_37_cast);
199
+ tensor<fp16, [2048, 512]> var_374_to_fp16 = const()[name = tensor<string, []>("op_374_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(18074752)))];
200
+ tensor<fp16, [2048]> var_375_to_fp16 = const()[name = tensor<string, []>("op_375_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(20171968)))];
201
+ tensor<fp16, [1, 1500, 2048]> input_25_cast = linear(bias = var_375_to_fp16, weight = var_374_to_fp16, x = var_365_cast);
202
+ tensor<string, []> x_41_mode_0 = const()[name = tensor<string, []>("x_41_mode_0"), val = tensor<string, []>("EXACT")];
203
+ tensor<fp16, [1, 1500, 2048]> x_41_cast = gelu(mode = x_41_mode_0, x = input_25_cast);
204
+ tensor<fp16, [512, 2048]> var_380_to_fp16 = const()[name = tensor<string, []>("op_380_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(20176128)))];
205
+ tensor<fp16, [512]> var_381_to_fp16 = const()[name = tensor<string, []>("op_381_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22273344)))];
206
+ tensor<fp16, [1, 1500, 512]> var_382_cast = linear(bias = var_381_to_fp16, weight = var_380_to_fp16, x = x_41_cast);
207
+ tensor<fp16, [1, 1500, 512]> x_43_cast = add(x = x_37_cast, y = var_382_cast);
208
+ tensor<int32, []> var_391 = const()[name = tensor<string, []>("op_391"), val = tensor<int32, []>(-1)];
209
+ tensor<int32, [1]> var_408_axes_0 = const()[name = tensor<string, []>("op_408_axes_0"), val = tensor<int32, [1]>([-1])];
210
+ tensor<fp16, [512]> blocks_3_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_3_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22274432)))];
211
+ tensor<fp16, [512]> blocks_3_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_3_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22275520)))];
212
+ tensor<fp16, []> var_397_to_fp16 = const()[name = tensor<string, []>("op_397_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
213
+ tensor<fp16, [1, 1500, 512]> var_408_cast = layer_norm(axes = var_408_axes_0, beta = blocks_3_attn_ln_bias_to_fp16, epsilon = var_397_to_fp16, gamma = blocks_3_attn_ln_weight_to_fp16, x = x_43_cast);
214
+ tensor<fp16, [512, 512]> var_419_to_fp16 = const()[name = tensor<string, []>("op_419_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22276608)))];
215
+ tensor<fp16, [512]> var_420_to_fp16 = const()[name = tensor<string, []>("op_420_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22800960)))];
216
+ tensor<fp16, [1, 1500, 512]> q_13_cast = linear(bias = var_420_to_fp16, weight = var_419_to_fp16, x = var_408_cast);
217
+ tensor<fp16, [512, 512]> var_423_to_fp16 = const()[name = tensor<string, []>("op_423_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22802048)))];
218
+ tensor<fp16, [512]> k_13_bias_0_to_fp16 = const()[name = tensor<string, []>("k_13_bias_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(23326400)))];
219
+ tensor<fp16, [1, 1500, 512]> k_13_cast = linear(bias = k_13_bias_0_to_fp16, weight = var_423_to_fp16, x = var_408_cast);
220
+ tensor<fp16, [512, 512]> var_427_to_fp16 = const()[name = tensor<string, []>("op_427_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(23327488)))];
221
+ tensor<fp16, [512]> var_428_to_fp16 = const()[name = tensor<string, []>("op_428_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(23851840)))];
222
+ tensor<fp16, [1, 1500, 512]> v_13_cast = linear(bias = var_428_to_fp16, weight = var_427_to_fp16, x = var_408_cast);
223
+ tensor<int32, [4]> var_436 = const()[name = tensor<string, []>("op_436"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
224
+ tensor<fp16, [1, 1500, 8, 64]> var_437_cast = reshape(shape = var_436, x = q_13_cast);
225
+ tensor<fp16, [1, 1, 1, 1]> const_48_to_fp16 = const()[name = tensor<string, []>("const_48_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
226
+ tensor<fp16, [1, 1500, 8, 64]> q_15_cast = mul(x = var_437_cast, y = const_48_to_fp16);
227
+ tensor<int32, [4]> var_443 = const()[name = tensor<string, []>("op_443"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
228
+ tensor<fp16, [1, 1500, 8, 64]> var_444_cast = reshape(shape = var_443, x = k_13_cast);
229
+ tensor<fp16, [1, 1, 1, 1]> const_49_to_fp16 = const()[name = tensor<string, []>("const_49_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
230
+ tensor<fp16, [1, 1500, 8, 64]> k_15_cast = mul(x = var_444_cast, y = const_49_to_fp16);
231
+ tensor<int32, [4]> var_450 = const()[name = tensor<string, []>("op_450"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
232
+ tensor<fp16, [1, 1500, 8, 64]> var_451_cast = reshape(shape = var_450, x = v_13_cast);
233
+ tensor<int32, [4]> var_452 = const()[name = tensor<string, []>("op_452"), val = tensor<int32, [4]>([0, 2, 1, 3])];
234
+ tensor<bool, []> qk_7_transpose_x_0 = const()[name = tensor<string, []>("qk_7_transpose_x_0"), val = tensor<bool, []>(false)];
235
+ tensor<bool, []> qk_7_transpose_y_0 = const()[name = tensor<string, []>("qk_7_transpose_y_0"), val = tensor<bool, []>(false)];
236
+ tensor<int32, [4]> transpose_18_perm_0 = const()[name = tensor<string, []>("transpose_18_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
237
+ tensor<int32, [4]> transpose_19_perm_0 = const()[name = tensor<string, []>("transpose_19_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
238
+ tensor<fp16, [1, 8, 64, 1500]> transpose_33 = transpose(perm = transpose_19_perm_0, x = k_15_cast);
239
+ tensor<fp16, [1, 8, 1500, 64]> transpose_34 = transpose(perm = transpose_18_perm_0, x = q_15_cast);
240
+ tensor<fp16, [1, 8, 1500, 1500]> qk_7_cast = matmul(transpose_x = qk_7_transpose_x_0, transpose_y = qk_7_transpose_y_0, x = transpose_34, y = transpose_33);
241
+ tensor<fp16, [1, 8, 1500, 1500]> var_456_cast = softmax(axis = var_391, x = qk_7_cast);
242
+ tensor<bool, []> var_458_transpose_x_0 = const()[name = tensor<string, []>("op_458_transpose_x_0"), val = tensor<bool, []>(false)];
243
+ tensor<bool, []> var_458_transpose_y_0 = const()[name = tensor<string, []>("op_458_transpose_y_0"), val = tensor<bool, []>(false)];
244
+ tensor<fp16, [1, 8, 1500, 64]> transpose_35 = transpose(perm = var_452, x = var_451_cast);
245
+ tensor<fp16, [1, 8, 1500, 64]> var_458_cast = matmul(transpose_x = var_458_transpose_x_0, transpose_y = var_458_transpose_y_0, x = var_456_cast, y = transpose_35);
246
+ tensor<int32, [4]> var_459 = const()[name = tensor<string, []>("op_459"), val = tensor<int32, [4]>([0, 2, 1, 3])];
247
+ tensor<int32, [3]> concat_3 = const()[name = tensor<string, []>("concat_3"), val = tensor<int32, [3]>([1, 1500, 512])];
248
+ tensor<fp16, [1, 1500, 8, 64]> transpose_32 = transpose(perm = var_459, x = var_458_cast);
249
+ tensor<fp16, [1, 1500, 512]> x_47_cast = reshape(shape = concat_3, x = transpose_32);
250
+ tensor<fp16, [512, 512]> var_464_to_fp16 = const()[name = tensor<string, []>("op_464_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(23852928)))];
251
+ tensor<fp16, [512]> var_465_to_fp16 = const()[name = tensor<string, []>("op_465_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(24377280)))];
252
+ tensor<fp16, [1, 1500, 512]> var_466_cast = linear(bias = var_465_to_fp16, weight = var_464_to_fp16, x = x_47_cast);
253
+ tensor<fp16, [1, 1500, 512]> x_49_cast = add(x = x_43_cast, y = var_466_cast);
254
+ tensor<int32, [1]> var_472_axes_0 = const()[name = tensor<string, []>("op_472_axes_0"), val = tensor<int32, [1]>([-1])];
255
+ tensor<fp16, [512]> blocks_3_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_3_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(24378368)))];
256
+ tensor<fp16, [512]> blocks_3_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_3_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(24379456)))];
257
+ tensor<fp16, [1, 1500, 512]> var_472_cast = layer_norm(axes = var_472_axes_0, beta = blocks_3_mlp_ln_bias_to_fp16, epsilon = var_397_to_fp16, gamma = blocks_3_mlp_ln_weight_to_fp16, x = x_49_cast);
258
+ tensor<fp16, [2048, 512]> var_481_to_fp16 = const()[name = tensor<string, []>("op_481_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(24380544)))];
259
+ tensor<fp16, [2048]> var_482_to_fp16 = const()[name = tensor<string, []>("op_482_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(26477760)))];
260
+ tensor<fp16, [1, 1500, 2048]> input_33_cast = linear(bias = var_482_to_fp16, weight = var_481_to_fp16, x = var_472_cast);
261
+ tensor<string, []> x_53_mode_0 = const()[name = tensor<string, []>("x_53_mode_0"), val = tensor<string, []>("EXACT")];
262
+ tensor<fp16, [1, 1500, 2048]> x_53_cast = gelu(mode = x_53_mode_0, x = input_33_cast);
263
+ tensor<fp16, [512, 2048]> var_487_to_fp16 = const()[name = tensor<string, []>("op_487_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(26481920)))];
264
+ tensor<fp16, [512]> var_488_to_fp16 = const()[name = tensor<string, []>("op_488_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(28579136)))];
265
+ tensor<fp16, [1, 1500, 512]> var_489_cast = linear(bias = var_488_to_fp16, weight = var_487_to_fp16, x = x_53_cast);
266
+ tensor<fp16, [1, 1500, 512]> x_55_cast = add(x = x_49_cast, y = var_489_cast);
267
+ tensor<int32, []> var_498 = const()[name = tensor<string, []>("op_498"), val = tensor<int32, []>(-1)];
268
+ tensor<int32, [1]> var_515_axes_0 = const()[name = tensor<string, []>("op_515_axes_0"), val = tensor<int32, [1]>([-1])];
269
+ tensor<fp16, [512]> blocks_4_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_4_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(28580224)))];
270
+ tensor<fp16, [512]> blocks_4_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_4_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(28581312)))];
271
+ tensor<fp16, []> var_504_to_fp16 = const()[name = tensor<string, []>("op_504_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
272
+ tensor<fp16, [1, 1500, 512]> var_515_cast = layer_norm(axes = var_515_axes_0, beta = blocks_4_attn_ln_bias_to_fp16, epsilon = var_504_to_fp16, gamma = blocks_4_attn_ln_weight_to_fp16, x = x_55_cast);
273
+ tensor<fp16, [512, 512]> var_526_to_fp16 = const()[name = tensor<string, []>("op_526_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(28582400)))];
274
+ tensor<fp16, [512]> var_527_to_fp16 = const()[name = tensor<string, []>("op_527_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(29106752)))];
275
+ tensor<fp16, [1, 1500, 512]> q_17_cast = linear(bias = var_527_to_fp16, weight = var_526_to_fp16, x = var_515_cast);
276
+ tensor<fp16, [512, 512]> var_530_to_fp16 = const()[name = tensor<string, []>("op_530_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(29107840)))];
277
+ tensor<fp16, [512]> k_17_bias_0_to_fp16 = const()[name = tensor<string, []>("k_17_bias_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(29632192)))];
278
+ tensor<fp16, [1, 1500, 512]> k_17_cast = linear(bias = k_17_bias_0_to_fp16, weight = var_530_to_fp16, x = var_515_cast);
279
+ tensor<fp16, [512, 512]> var_534_to_fp16 = const()[name = tensor<string, []>("op_534_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(29633280)))];
280
+ tensor<fp16, [512]> var_535_to_fp16 = const()[name = tensor<string, []>("op_535_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30157632)))];
281
+ tensor<fp16, [1, 1500, 512]> v_17_cast = linear(bias = var_535_to_fp16, weight = var_534_to_fp16, x = var_515_cast);
282
+ tensor<int32, [4]> var_543 = const()[name = tensor<string, []>("op_543"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
283
+ tensor<fp16, [1, 1500, 8, 64]> var_544_cast = reshape(shape = var_543, x = q_17_cast);
284
+ tensor<fp16, [1, 1, 1, 1]> const_50_to_fp16 = const()[name = tensor<string, []>("const_50_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
285
+ tensor<fp16, [1, 1500, 8, 64]> q_19_cast = mul(x = var_544_cast, y = const_50_to_fp16);
286
+ tensor<int32, [4]> var_550 = const()[name = tensor<string, []>("op_550"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
287
+ tensor<fp16, [1, 1500, 8, 64]> var_551_cast = reshape(shape = var_550, x = k_17_cast);
288
+ tensor<fp16, [1, 1, 1, 1]> const_51_to_fp16 = const()[name = tensor<string, []>("const_51_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
289
+ tensor<fp16, [1, 1500, 8, 64]> k_19_cast = mul(x = var_551_cast, y = const_51_to_fp16);
290
+ tensor<int32, [4]> var_557 = const()[name = tensor<string, []>("op_557"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
291
+ tensor<fp16, [1, 1500, 8, 64]> var_558_cast = reshape(shape = var_557, x = v_17_cast);
292
+ tensor<int32, [4]> var_559 = const()[name = tensor<string, []>("op_559"), val = tensor<int32, [4]>([0, 2, 1, 3])];
293
+ tensor<bool, []> qk_9_transpose_x_0 = const()[name = tensor<string, []>("qk_9_transpose_x_0"), val = tensor<bool, []>(false)];
294
+ tensor<bool, []> qk_9_transpose_y_0 = const()[name = tensor<string, []>("qk_9_transpose_y_0"), val = tensor<bool, []>(false)];
295
+ tensor<int32, [4]> transpose_20_perm_0 = const()[name = tensor<string, []>("transpose_20_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
296
+ tensor<int32, [4]> transpose_21_perm_0 = const()[name = tensor<string, []>("transpose_21_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
297
+ tensor<fp16, [1, 8, 64, 1500]> transpose_29 = transpose(perm = transpose_21_perm_0, x = k_19_cast);
298
+ tensor<fp16, [1, 8, 1500, 64]> transpose_30 = transpose(perm = transpose_20_perm_0, x = q_19_cast);
299
+ tensor<fp16, [1, 8, 1500, 1500]> qk_9_cast = matmul(transpose_x = qk_9_transpose_x_0, transpose_y = qk_9_transpose_y_0, x = transpose_30, y = transpose_29);
300
+ tensor<fp16, [1, 8, 1500, 1500]> var_563_cast = softmax(axis = var_498, x = qk_9_cast);
301
+ tensor<bool, []> var_565_transpose_x_0 = const()[name = tensor<string, []>("op_565_transpose_x_0"), val = tensor<bool, []>(false)];
302
+ tensor<bool, []> var_565_transpose_y_0 = const()[name = tensor<string, []>("op_565_transpose_y_0"), val = tensor<bool, []>(false)];
303
+ tensor<fp16, [1, 8, 1500, 64]> transpose_31 = transpose(perm = var_559, x = var_558_cast);
304
+ tensor<fp16, [1, 8, 1500, 64]> var_565_cast = matmul(transpose_x = var_565_transpose_x_0, transpose_y = var_565_transpose_y_0, x = var_563_cast, y = transpose_31);
305
+ tensor<int32, [4]> var_566 = const()[name = tensor<string, []>("op_566"), val = tensor<int32, [4]>([0, 2, 1, 3])];
306
+ tensor<int32, [3]> concat_4 = const()[name = tensor<string, []>("concat_4"), val = tensor<int32, [3]>([1, 1500, 512])];
307
+ tensor<fp16, [1, 1500, 8, 64]> transpose_28 = transpose(perm = var_566, x = var_565_cast);
308
+ tensor<fp16, [1, 1500, 512]> x_59_cast = reshape(shape = concat_4, x = transpose_28);
309
+ tensor<fp16, [512, 512]> var_571_to_fp16 = const()[name = tensor<string, []>("op_571_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30158720)))];
310
+ tensor<fp16, [512]> var_572_to_fp16 = const()[name = tensor<string, []>("op_572_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30683072)))];
311
+ tensor<fp16, [1, 1500, 512]> var_573_cast = linear(bias = var_572_to_fp16, weight = var_571_to_fp16, x = x_59_cast);
312
+ tensor<fp16, [1, 1500, 512]> x_61_cast = add(x = x_55_cast, y = var_573_cast);
313
+ tensor<int32, [1]> var_579_axes_0 = const()[name = tensor<string, []>("op_579_axes_0"), val = tensor<int32, [1]>([-1])];
314
+ tensor<fp16, [512]> blocks_4_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_4_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30684160)))];
315
+ tensor<fp16, [512]> blocks_4_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_4_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30685248)))];
316
+ tensor<fp16, [1, 1500, 512]> var_579_cast = layer_norm(axes = var_579_axes_0, beta = blocks_4_mlp_ln_bias_to_fp16, epsilon = var_504_to_fp16, gamma = blocks_4_mlp_ln_weight_to_fp16, x = x_61_cast);
317
+ tensor<fp16, [2048, 512]> var_588_to_fp16 = const()[name = tensor<string, []>("op_588_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30686336)))];
318
+ tensor<fp16, [2048]> var_589_to_fp16 = const()[name = tensor<string, []>("op_589_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(32783552)))];
319
+ tensor<fp16, [1, 1500, 2048]> input_41_cast = linear(bias = var_589_to_fp16, weight = var_588_to_fp16, x = var_579_cast);
320
+ tensor<string, []> x_65_mode_0 = const()[name = tensor<string, []>("x_65_mode_0"), val = tensor<string, []>("EXACT")];
321
+ tensor<fp16, [1, 1500, 2048]> x_65_cast = gelu(mode = x_65_mode_0, x = input_41_cast);
322
+ tensor<fp16, [512, 2048]> var_594_to_fp16 = const()[name = tensor<string, []>("op_594_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(32787712)))];
323
+ tensor<fp16, [512]> var_595_to_fp16 = const()[name = tensor<string, []>("op_595_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(34884928)))];
324
+ tensor<fp16, [1, 1500, 512]> var_596_cast = linear(bias = var_595_to_fp16, weight = var_594_to_fp16, x = x_65_cast);
325
+ tensor<fp16, [1, 1500, 512]> x_67_cast = add(x = x_61_cast, y = var_596_cast);
326
+ tensor<int32, []> var_605 = const()[name = tensor<string, []>("op_605"), val = tensor<int32, []>(-1)];
327
+ tensor<int32, [1]> var_622_axes_0 = const()[name = tensor<string, []>("op_622_axes_0"), val = tensor<int32, [1]>([-1])];
328
+ tensor<fp16, [512]> blocks_5_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_5_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(34886016)))];
329
+ tensor<fp16, [512]> blocks_5_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_5_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(34887104)))];
330
+ tensor<fp16, []> var_611_to_fp16 = const()[name = tensor<string, []>("op_611_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
331
+ tensor<fp16, [1, 1500, 512]> var_622_cast = layer_norm(axes = var_622_axes_0, beta = blocks_5_attn_ln_bias_to_fp16, epsilon = var_611_to_fp16, gamma = blocks_5_attn_ln_weight_to_fp16, x = x_67_cast);
332
+ tensor<fp16, [512, 512]> var_633_to_fp16 = const()[name = tensor<string, []>("op_633_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(34888192)))];
333
+ tensor<fp16, [512]> var_634_to_fp16 = const()[name = tensor<string, []>("op_634_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(35412544)))];
334
+ tensor<fp16, [1, 1500, 512]> q_21_cast = linear(bias = var_634_to_fp16, weight = var_633_to_fp16, x = var_622_cast);
335
+ tensor<fp16, [512, 512]> var_637_to_fp16 = const()[name = tensor<string, []>("op_637_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(35413632)))];
336
+ tensor<fp16, [512]> k_21_bias_0_to_fp16 = const()[name = tensor<string, []>("k_21_bias_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(35937984)))];
337
+ tensor<fp16, [1, 1500, 512]> k_21_cast = linear(bias = k_21_bias_0_to_fp16, weight = var_637_to_fp16, x = var_622_cast);
338
+ tensor<fp16, [512, 512]> var_641_to_fp16 = const()[name = tensor<string, []>("op_641_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(35939072)))];
339
+ tensor<fp16, [512]> var_642_to_fp16 = const()[name = tensor<string, []>("op_642_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36463424)))];
340
+ tensor<fp16, [1, 1500, 512]> v_21_cast = linear(bias = var_642_to_fp16, weight = var_641_to_fp16, x = var_622_cast);
341
+ tensor<int32, [4]> var_650 = const()[name = tensor<string, []>("op_650"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
342
+ tensor<fp16, [1, 1500, 8, 64]> var_651_cast = reshape(shape = var_650, x = q_21_cast);
343
+ tensor<fp16, [1, 1, 1, 1]> const_52_to_fp16 = const()[name = tensor<string, []>("const_52_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
344
+ tensor<fp16, [1, 1500, 8, 64]> q_cast = mul(x = var_651_cast, y = const_52_to_fp16);
345
+ tensor<int32, [4]> var_657 = const()[name = tensor<string, []>("op_657"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
346
+ tensor<fp16, [1, 1500, 8, 64]> var_658_cast = reshape(shape = var_657, x = k_21_cast);
347
+ tensor<fp16, [1, 1, 1, 1]> const_53_to_fp16 = const()[name = tensor<string, []>("const_53_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
348
+ tensor<fp16, [1, 1500, 8, 64]> k_cast = mul(x = var_658_cast, y = const_53_to_fp16);
349
+ tensor<int32, [4]> var_664 = const()[name = tensor<string, []>("op_664"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
350
+ tensor<fp16, [1, 1500, 8, 64]> var_665_cast = reshape(shape = var_664, x = v_21_cast);
351
+ tensor<int32, [4]> var_666 = const()[name = tensor<string, []>("op_666"), val = tensor<int32, [4]>([0, 2, 1, 3])];
352
+ tensor<bool, []> qk_transpose_x_0 = const()[name = tensor<string, []>("qk_transpose_x_0"), val = tensor<bool, []>(false)];
353
+ tensor<bool, []> qk_transpose_y_0 = const()[name = tensor<string, []>("qk_transpose_y_0"), val = tensor<bool, []>(false)];
354
+ tensor<int32, [4]> transpose_22_perm_0 = const()[name = tensor<string, []>("transpose_22_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
355
+ tensor<int32, [4]> transpose_23_perm_0 = const()[name = tensor<string, []>("transpose_23_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
356
+ tensor<fp16, [1, 8, 64, 1500]> transpose_25 = transpose(perm = transpose_23_perm_0, x = k_cast);
357
+ tensor<fp16, [1, 8, 1500, 64]> transpose_26 = transpose(perm = transpose_22_perm_0, x = q_cast);
358
+ tensor<fp16, [1, 8, 1500, 1500]> qk_cast = matmul(transpose_x = qk_transpose_x_0, transpose_y = qk_transpose_y_0, x = transpose_26, y = transpose_25);
359
+ tensor<fp16, [1, 8, 1500, 1500]> var_670_cast = softmax(axis = var_605, x = qk_cast);
360
+ tensor<bool, []> var_672_transpose_x_0 = const()[name = tensor<string, []>("op_672_transpose_x_0"), val = tensor<bool, []>(false)];
361
+ tensor<bool, []> var_672_transpose_y_0 = const()[name = tensor<string, []>("op_672_transpose_y_0"), val = tensor<bool, []>(false)];
362
+ tensor<fp16, [1, 8, 1500, 64]> transpose_27 = transpose(perm = var_666, x = var_665_cast);
363
+ tensor<fp16, [1, 8, 1500, 64]> var_672_cast = matmul(transpose_x = var_672_transpose_x_0, transpose_y = var_672_transpose_y_0, x = var_670_cast, y = transpose_27);
364
+ tensor<int32, [4]> var_673 = const()[name = tensor<string, []>("op_673"), val = tensor<int32, [4]>([0, 2, 1, 3])];
365
+ tensor<int32, [3]> concat_5 = const()[name = tensor<string, []>("concat_5"), val = tensor<int32, [3]>([1, 1500, 512])];
366
+ tensor<fp16, [1, 1500, 8, 64]> transpose_24 = transpose(perm = var_673, x = var_672_cast);
367
+ tensor<fp16, [1, 1500, 512]> x_71_cast = reshape(shape = concat_5, x = transpose_24);
368
+ tensor<fp16, [512, 512]> var_678_to_fp16 = const()[name = tensor<string, []>("op_678_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36464512)))];
369
+ tensor<fp16, [512]> var_679_to_fp16 = const()[name = tensor<string, []>("op_679_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36988864)))];
370
+ tensor<fp16, [1, 1500, 512]> var_680_cast = linear(bias = var_679_to_fp16, weight = var_678_to_fp16, x = x_71_cast);
371
+ tensor<fp16, [1, 1500, 512]> x_73_cast = add(x = x_67_cast, y = var_680_cast);
372
+ tensor<int32, [1]> var_686_axes_0 = const()[name = tensor<string, []>("op_686_axes_0"), val = tensor<int32, [1]>([-1])];
373
+ tensor<fp16, [512]> blocks_5_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_5_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36989952)))];
374
+ tensor<fp16, [512]> blocks_5_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_5_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36991040)))];
375
+ tensor<fp16, [1, 1500, 512]> var_686_cast = layer_norm(axes = var_686_axes_0, beta = blocks_5_mlp_ln_bias_to_fp16, epsilon = var_611_to_fp16, gamma = blocks_5_mlp_ln_weight_to_fp16, x = x_73_cast);
376
+ tensor<fp16, [2048, 512]> var_695_to_fp16 = const()[name = tensor<string, []>("op_695_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36992128)))];
377
+ tensor<fp16, [2048]> var_696_to_fp16 = const()[name = tensor<string, []>("op_696_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(39089344)))];
378
+ tensor<fp16, [1, 1500, 2048]> input_49_cast = linear(bias = var_696_to_fp16, weight = var_695_to_fp16, x = var_686_cast);
379
+ tensor<string, []> x_77_mode_0 = const()[name = tensor<string, []>("x_77_mode_0"), val = tensor<string, []>("EXACT")];
380
+ tensor<fp16, [1, 1500, 2048]> x_77_cast = gelu(mode = x_77_mode_0, x = input_49_cast);
381
+ tensor<fp16, [512, 2048]> var_701_to_fp16 = const()[name = tensor<string, []>("op_701_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(39093504)))];
382
+ tensor<fp16, [512]> var_702_to_fp16 = const()[name = tensor<string, []>("op_702_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41190720)))];
383
+ tensor<fp16, [1, 1500, 512]> var_703_cast = linear(bias = var_702_to_fp16, weight = var_701_to_fp16, x = x_77_cast);
384
+ tensor<fp16, [1, 1500, 512]> x_cast = add(x = x_73_cast, y = var_703_cast);
385
+ tensor<int32, [1]> var_716_axes_0 = const()[name = tensor<string, []>("op_716_axes_0"), val = tensor<int32, [1]>([-1])];
386
+ tensor<fp16, [512]> ln_post_weight_to_fp16 = const()[name = tensor<string, []>("ln_post_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41191808)))];
387
+ tensor<fp16, [512]> ln_post_bias_to_fp16 = const()[name = tensor<string, []>("ln_post_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41192896)))];
388
+ tensor<fp16, []> var_707_to_fp16 = const()[name = tensor<string, []>("op_707_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
389
+ tensor<fp16, [1, 1500, 512]> var_716_cast = layer_norm(axes = var_716_axes_0, beta = ln_post_bias_to_fp16, epsilon = var_707_to_fp16, gamma = ln_post_weight_to_fp16, x = x_cast);
390
+ tensor<string, []> var_716_cast_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_716_cast_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
391
+ tensor<fp32, [1, 1500, 512]> output = cast(dtype = var_716_cast_to_fp32_dtype_0, x = var_716_cast);
392
+ } -> (output);
393
+ }
ggml-base-encoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa38c9d90f01effad1990d1f17e67cd8382c6486b8e5a4dbacf44439cd838a38
3
+ size 41193984
ggml-base.en-encoder.mlmodelc.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8cf860309e2449e2bdc8be834cf838ab2565747ecc8c0ef914ef5975115e192b
3
  size 37950917
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0634e109c4ec900dc6ffc3305ac8367822ade13fcc10425ec0db0dbfcebb12aa
3
  size 37950917
ggml-base.en-encoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2441ae34fc7d12946dba7b63379063e856ffc7c3e11ba5f7533efb1450562ca6
3
+ size 207
ggml-base.en-encoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05fe28591b40616fa0c34ad7b853133623f5300923ec812acb11459c411acf3b
3
+ size 149
ggml-base.en-encoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float32",
10
+ "formattedType" : "MultiArray (Float32)",
11
+ "shortDescription" : "",
12
+ "shape" : "[]",
13
+ "name" : "output",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 6,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Linear" : 36,
23
+ "Matmul" : 12,
24
+ "Cast" : 2,
25
+ "Conv" : 2,
26
+ "Softmax" : 6,
27
+ "Add" : 13,
28
+ "LayerNorm" : 13,
29
+ "Mul" : 12,
30
+ "Transpose" : 25,
31
+ "Gelu" : 8,
32
+ "Reshape" : 24
33
+ },
34
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
35
+ "isUpdatable" : "0",
36
+ "availability" : {
37
+ "macOS" : "12.0",
38
+ "tvOS" : "15.0",
39
+ "watchOS" : "8.0",
40
+ "iOS" : "15.0",
41
+ "macCatalyst" : "15.0"
42
+ },
43
+ "modelType" : {
44
+ "name" : "MLModelType_mlProgram"
45
+ },
46
+ "userDefinedMetadata" : {
47
+
48
+ },
49
+ "inputSchema" : [
50
+ {
51
+ "hasShapeFlexibility" : "0",
52
+ "isOptional" : "0",
53
+ "dataType" : "Float32",
54
+ "formattedType" : "MultiArray (Float32 1 × 80 × 3000)",
55
+ "shortDescription" : "",
56
+ "shape" : "[1, 80, 3000]",
57
+ "name" : "logmel_data",
58
+ "type" : "MultiArray"
59
+ }
60
+ ],
61
+ "generatedClassName" : "coreml_encoder_base_en",
62
+ "method" : "predict"
63
+ }
64
+ ]
ggml-base.en-encoder.mlmodelc/model.mil ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "4.28.4"}, {"coremlc-version", "1436.100.10"}})]
3
+ {
4
+ func main<ios15>(tensor<fp32, [1, 80, 3000]> logmel_data) {
5
+ tensor<int32, []> var_20 = const()[name = tensor<string, []>("op_20"), val = tensor<int32, []>(1)];
6
+ tensor<int32, [1]> var_28 = const()[name = tensor<string, []>("op_28"), val = tensor<int32, [1]>([1])];
7
+ tensor<int32, [1]> var_30 = const()[name = tensor<string, []>("op_30"), val = tensor<int32, [1]>([1])];
8
+ tensor<string, []> var_32_pad_type_0 = const()[name = tensor<string, []>("op_32_pad_type_0"), val = tensor<string, []>("custom")];
9
+ tensor<int32, [2]> var_32_pad_0 = const()[name = tensor<string, []>("op_32_pad_0"), val = tensor<int32, [2]>([1, 1])];
10
+ tensor<string, []> logmel_data_to_fp16_dtype_0 = const()[name = tensor<string, []>("logmel_data_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
11
+ tensor<fp16, [512, 80, 3]> weight_3_to_fp16 = const()[name = tensor<string, []>("weight_3_to_fp16"), val = tensor<fp16, [512, 80, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
12
+ tensor<fp16, [512]> bias_3_to_fp16 = const()[name = tensor<string, []>("bias_3_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(245888)))];
13
+ tensor<fp16, [1, 80, 3000]> cast_187 = cast(dtype = logmel_data_to_fp16_dtype_0, x = logmel_data);
14
+ tensor<fp16, [1, 512, 3000]> var_32_cast = conv(bias = bias_3_to_fp16, dilations = var_30, groups = var_20, pad = var_32_pad_0, pad_type = var_32_pad_type_0, strides = var_28, weight = weight_3_to_fp16, x = cast_187);
15
+ tensor<string, []> input_1_mode_0 = const()[name = tensor<string, []>("input_1_mode_0"), val = tensor<string, []>("EXACT")];
16
+ tensor<fp16, [1, 512, 3000]> input_1_cast = gelu(mode = input_1_mode_0, x = var_32_cast);
17
+ tensor<int32, []> var_36 = const()[name = tensor<string, []>("op_36"), val = tensor<int32, []>(1)];
18
+ tensor<int32, [1]> var_45 = const()[name = tensor<string, []>("op_45"), val = tensor<int32, [1]>([2])];
19
+ tensor<int32, [1]> var_47 = const()[name = tensor<string, []>("op_47"), val = tensor<int32, [1]>([1])];
20
+ tensor<string, []> var_49_pad_type_0 = const()[name = tensor<string, []>("op_49_pad_type_0"), val = tensor<string, []>("custom")];
21
+ tensor<int32, [2]> var_49_pad_0 = const()[name = tensor<string, []>("op_49_pad_0"), val = tensor<int32, [2]>([1, 1])];
22
+ tensor<fp16, [512, 512, 3]> weight_7_to_fp16 = const()[name = tensor<string, []>("weight_7_to_fp16"), val = tensor<fp16, [512, 512, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(246976)))];
23
+ tensor<fp16, [512]> bias_7_to_fp16 = const()[name = tensor<string, []>("bias_7_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1819904)))];
24
+ tensor<fp16, [1, 512, 1500]> var_49_cast = conv(bias = bias_7_to_fp16, dilations = var_47, groups = var_36, pad = var_49_pad_0, pad_type = var_49_pad_type_0, strides = var_45, weight = weight_7_to_fp16, x = input_1_cast);
25
+ tensor<string, []> x_3_mode_0 = const()[name = tensor<string, []>("x_3_mode_0"), val = tensor<string, []>("EXACT")];
26
+ tensor<fp16, [1, 512, 1500]> x_3_cast = gelu(mode = x_3_mode_0, x = var_49_cast);
27
+ tensor<int32, [3]> var_54 = const()[name = tensor<string, []>("op_54"), val = tensor<int32, [3]>([0, 2, 1])];
28
+ tensor<fp16, [1500, 512]> positional_embedding_to_fp16 = const()[name = tensor<string, []>("positional_embedding_to_fp16"), val = tensor<fp16, [1500, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1820992)))];
29
+ tensor<fp16, [1, 1500, 512]> transpose_48 = transpose(perm = var_54, x = x_3_cast);
30
+ tensor<fp16, [1, 1500, 512]> var_57_cast = add(x = transpose_48, y = positional_embedding_to_fp16);
31
+ tensor<int32, []> var_70 = const()[name = tensor<string, []>("op_70"), val = tensor<int32, []>(-1)];
32
+ tensor<int32, [1]> var_87_axes_0 = const()[name = tensor<string, []>("op_87_axes_0"), val = tensor<int32, [1]>([-1])];
33
+ tensor<fp16, [512]> blocks_0_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3357056)))];
34
+ tensor<fp16, [512]> blocks_0_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3358144)))];
35
+ tensor<fp16, []> var_76_to_fp16 = const()[name = tensor<string, []>("op_76_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
36
+ tensor<fp16, [1, 1500, 512]> var_87_cast = layer_norm(axes = var_87_axes_0, beta = blocks_0_attn_ln_bias_to_fp16, epsilon = var_76_to_fp16, gamma = blocks_0_attn_ln_weight_to_fp16, x = var_57_cast);
37
+ tensor<fp16, [512, 512]> var_98_to_fp16 = const()[name = tensor<string, []>("op_98_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3359232)))];
38
+ tensor<fp16, [512]> var_99_to_fp16 = const()[name = tensor<string, []>("op_99_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3883584)))];
39
+ tensor<fp16, [1, 1500, 512]> q_1_cast = linear(bias = var_99_to_fp16, weight = var_98_to_fp16, x = var_87_cast);
40
+ tensor<fp16, [512, 512]> var_102_to_fp16 = const()[name = tensor<string, []>("op_102_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3884672)))];
41
+ tensor<fp16, [512]> k_1_bias_0_to_fp16 = const()[name = tensor<string, []>("k_1_bias_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4409024)))];
42
+ tensor<fp16, [1, 1500, 512]> k_1_cast = linear(bias = k_1_bias_0_to_fp16, weight = var_102_to_fp16, x = var_87_cast);
43
+ tensor<fp16, [512, 512]> var_106_to_fp16 = const()[name = tensor<string, []>("op_106_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4410112)))];
44
+ tensor<fp16, [512]> var_107_to_fp16 = const()[name = tensor<string, []>("op_107_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4934464)))];
45
+ tensor<fp16, [1, 1500, 512]> v_1_cast = linear(bias = var_107_to_fp16, weight = var_106_to_fp16, x = var_87_cast);
46
+ tensor<int32, [4]> var_115 = const()[name = tensor<string, []>("op_115"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
47
+ tensor<fp16, [1, 1500, 8, 64]> var_116_cast = reshape(shape = var_115, x = q_1_cast);
48
+ tensor<fp16, [1, 1, 1, 1]> const_42_to_fp16 = const()[name = tensor<string, []>("const_42_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
49
+ tensor<fp16, [1, 1500, 8, 64]> q_3_cast = mul(x = var_116_cast, y = const_42_to_fp16);
50
+ tensor<int32, [4]> var_122 = const()[name = tensor<string, []>("op_122"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
51
+ tensor<fp16, [1, 1500, 8, 64]> var_123_cast = reshape(shape = var_122, x = k_1_cast);
52
+ tensor<fp16, [1, 1, 1, 1]> const_43_to_fp16 = const()[name = tensor<string, []>("const_43_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
53
+ tensor<fp16, [1, 1500, 8, 64]> k_3_cast = mul(x = var_123_cast, y = const_43_to_fp16);
54
+ tensor<int32, [4]> var_129 = const()[name = tensor<string, []>("op_129"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
55
+ tensor<fp16, [1, 1500, 8, 64]> var_130_cast = reshape(shape = var_129, x = v_1_cast);
56
+ tensor<int32, [4]> var_131 = const()[name = tensor<string, []>("op_131"), val = tensor<int32, [4]>([0, 2, 1, 3])];
57
+ tensor<bool, []> qk_1_transpose_x_0 = const()[name = tensor<string, []>("qk_1_transpose_x_0"), val = tensor<bool, []>(false)];
58
+ tensor<bool, []> qk_1_transpose_y_0 = const()[name = tensor<string, []>("qk_1_transpose_y_0"), val = tensor<bool, []>(false)];
59
+ tensor<int32, [4]> transpose_12_perm_0 = const()[name = tensor<string, []>("transpose_12_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
60
+ tensor<int32, [4]> transpose_13_perm_0 = const()[name = tensor<string, []>("transpose_13_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
61
+ tensor<fp16, [1, 8, 64, 1500]> transpose_45 = transpose(perm = transpose_13_perm_0, x = k_3_cast);
62
+ tensor<fp16, [1, 8, 1500, 64]> transpose_46 = transpose(perm = transpose_12_perm_0, x = q_3_cast);
63
+ tensor<fp16, [1, 8, 1500, 1500]> qk_1_cast = matmul(transpose_x = qk_1_transpose_x_0, transpose_y = qk_1_transpose_y_0, x = transpose_46, y = transpose_45);
64
+ tensor<fp16, [1, 8, 1500, 1500]> var_135_cast = softmax(axis = var_70, x = qk_1_cast);
65
+ tensor<bool, []> var_137_transpose_x_0 = const()[name = tensor<string, []>("op_137_transpose_x_0"), val = tensor<bool, []>(false)];
66
+ tensor<bool, []> var_137_transpose_y_0 = const()[name = tensor<string, []>("op_137_transpose_y_0"), val = tensor<bool, []>(false)];
67
+ tensor<fp16, [1, 8, 1500, 64]> transpose_47 = transpose(perm = var_131, x = var_130_cast);
68
+ tensor<fp16, [1, 8, 1500, 64]> var_137_cast = matmul(transpose_x = var_137_transpose_x_0, transpose_y = var_137_transpose_y_0, x = var_135_cast, y = transpose_47);
69
+ tensor<int32, [4]> var_138 = const()[name = tensor<string, []>("op_138"), val = tensor<int32, [4]>([0, 2, 1, 3])];
70
+ tensor<int32, [3]> concat_0 = const()[name = tensor<string, []>("concat_0"), val = tensor<int32, [3]>([1, 1500, 512])];
71
+ tensor<fp16, [1, 1500, 8, 64]> transpose_44 = transpose(perm = var_138, x = var_137_cast);
72
+ tensor<fp16, [1, 1500, 512]> x_11_cast = reshape(shape = concat_0, x = transpose_44);
73
+ tensor<fp16, [512, 512]> var_143_to_fp16 = const()[name = tensor<string, []>("op_143_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4935552)))];
74
+ tensor<fp16, [512]> var_144_to_fp16 = const()[name = tensor<string, []>("op_144_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5459904)))];
75
+ tensor<fp16, [1, 1500, 512]> var_145_cast = linear(bias = var_144_to_fp16, weight = var_143_to_fp16, x = x_11_cast);
76
+ tensor<fp16, [1, 1500, 512]> x_13_cast = add(x = var_57_cast, y = var_145_cast);
77
+ tensor<int32, [1]> var_151_axes_0 = const()[name = tensor<string, []>("op_151_axes_0"), val = tensor<int32, [1]>([-1])];
78
+ tensor<fp16, [512]> blocks_0_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5460992)))];
79
+ tensor<fp16, [512]> blocks_0_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5462080)))];
80
+ tensor<fp16, [1, 1500, 512]> var_151_cast = layer_norm(axes = var_151_axes_0, beta = blocks_0_mlp_ln_bias_to_fp16, epsilon = var_76_to_fp16, gamma = blocks_0_mlp_ln_weight_to_fp16, x = x_13_cast);
81
+ tensor<fp16, [2048, 512]> var_160_to_fp16 = const()[name = tensor<string, []>("op_160_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5463168)))];
82
+ tensor<fp16, [2048]> var_161_to_fp16 = const()[name = tensor<string, []>("op_161_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(7560384)))];
83
+ tensor<fp16, [1, 1500, 2048]> input_9_cast = linear(bias = var_161_to_fp16, weight = var_160_to_fp16, x = var_151_cast);
84
+ tensor<string, []> x_17_mode_0 = const()[name = tensor<string, []>("x_17_mode_0"), val = tensor<string, []>("EXACT")];
85
+ tensor<fp16, [1, 1500, 2048]> x_17_cast = gelu(mode = x_17_mode_0, x = input_9_cast);
86
+ tensor<fp16, [512, 2048]> var_166_to_fp16 = const()[name = tensor<string, []>("op_166_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(7564544)))];
87
+ tensor<fp16, [512]> var_167_to_fp16 = const()[name = tensor<string, []>("op_167_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9661760)))];
88
+ tensor<fp16, [1, 1500, 512]> var_168_cast = linear(bias = var_167_to_fp16, weight = var_166_to_fp16, x = x_17_cast);
89
+ tensor<fp16, [1, 1500, 512]> x_19_cast = add(x = x_13_cast, y = var_168_cast);
90
+ tensor<int32, []> var_177 = const()[name = tensor<string, []>("op_177"), val = tensor<int32, []>(-1)];
91
+ tensor<int32, [1]> var_194_axes_0 = const()[name = tensor<string, []>("op_194_axes_0"), val = tensor<int32, [1]>([-1])];
92
+ tensor<fp16, [512]> blocks_1_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9662848)))];
93
+ tensor<fp16, [512]> blocks_1_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9663936)))];
94
+ tensor<fp16, []> var_183_to_fp16 = const()[name = tensor<string, []>("op_183_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
95
+ tensor<fp16, [1, 1500, 512]> var_194_cast = layer_norm(axes = var_194_axes_0, beta = blocks_1_attn_ln_bias_to_fp16, epsilon = var_183_to_fp16, gamma = blocks_1_attn_ln_weight_to_fp16, x = x_19_cast);
96
+ tensor<fp16, [512, 512]> var_205_to_fp16 = const()[name = tensor<string, []>("op_205_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9665024)))];
97
+ tensor<fp16, [512]> var_206_to_fp16 = const()[name = tensor<string, []>("op_206_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10189376)))];
98
+ tensor<fp16, [1, 1500, 512]> q_5_cast = linear(bias = var_206_to_fp16, weight = var_205_to_fp16, x = var_194_cast);
99
+ tensor<fp16, [512, 512]> var_209_to_fp16 = const()[name = tensor<string, []>("op_209_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10190464)))];
100
+ tensor<fp16, [512]> k_5_bias_0_to_fp16 = const()[name = tensor<string, []>("k_5_bias_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10714816)))];
101
+ tensor<fp16, [1, 1500, 512]> k_5_cast = linear(bias = k_5_bias_0_to_fp16, weight = var_209_to_fp16, x = var_194_cast);
102
+ tensor<fp16, [512, 512]> var_213_to_fp16 = const()[name = tensor<string, []>("op_213_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10715904)))];
103
+ tensor<fp16, [512]> var_214_to_fp16 = const()[name = tensor<string, []>("op_214_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11240256)))];
104
+ tensor<fp16, [1, 1500, 512]> v_5_cast = linear(bias = var_214_to_fp16, weight = var_213_to_fp16, x = var_194_cast);
105
+ tensor<int32, [4]> var_222 = const()[name = tensor<string, []>("op_222"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
106
+ tensor<fp16, [1, 1500, 8, 64]> var_223_cast = reshape(shape = var_222, x = q_5_cast);
107
+ tensor<fp16, [1, 1, 1, 1]> const_44_to_fp16 = const()[name = tensor<string, []>("const_44_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
108
+ tensor<fp16, [1, 1500, 8, 64]> q_7_cast = mul(x = var_223_cast, y = const_44_to_fp16);
109
+ tensor<int32, [4]> var_229 = const()[name = tensor<string, []>("op_229"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
110
+ tensor<fp16, [1, 1500, 8, 64]> var_230_cast = reshape(shape = var_229, x = k_5_cast);
111
+ tensor<fp16, [1, 1, 1, 1]> const_45_to_fp16 = const()[name = tensor<string, []>("const_45_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
112
+ tensor<fp16, [1, 1500, 8, 64]> k_7_cast = mul(x = var_230_cast, y = const_45_to_fp16);
113
+ tensor<int32, [4]> var_236 = const()[name = tensor<string, []>("op_236"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
114
+ tensor<fp16, [1, 1500, 8, 64]> var_237_cast = reshape(shape = var_236, x = v_5_cast);
115
+ tensor<int32, [4]> var_238 = const()[name = tensor<string, []>("op_238"), val = tensor<int32, [4]>([0, 2, 1, 3])];
116
+ tensor<bool, []> qk_3_transpose_x_0 = const()[name = tensor<string, []>("qk_3_transpose_x_0"), val = tensor<bool, []>(false)];
117
+ tensor<bool, []> qk_3_transpose_y_0 = const()[name = tensor<string, []>("qk_3_transpose_y_0"), val = tensor<bool, []>(false)];
118
+ tensor<int32, [4]> transpose_14_perm_0 = const()[name = tensor<string, []>("transpose_14_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
119
+ tensor<int32, [4]> transpose_15_perm_0 = const()[name = tensor<string, []>("transpose_15_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
120
+ tensor<fp16, [1, 8, 64, 1500]> transpose_41 = transpose(perm = transpose_15_perm_0, x = k_7_cast);
121
+ tensor<fp16, [1, 8, 1500, 64]> transpose_42 = transpose(perm = transpose_14_perm_0, x = q_7_cast);
122
+ tensor<fp16, [1, 8, 1500, 1500]> qk_3_cast = matmul(transpose_x = qk_3_transpose_x_0, transpose_y = qk_3_transpose_y_0, x = transpose_42, y = transpose_41);
123
+ tensor<fp16, [1, 8, 1500, 1500]> var_242_cast = softmax(axis = var_177, x = qk_3_cast);
124
+ tensor<bool, []> var_244_transpose_x_0 = const()[name = tensor<string, []>("op_244_transpose_x_0"), val = tensor<bool, []>(false)];
125
+ tensor<bool, []> var_244_transpose_y_0 = const()[name = tensor<string, []>("op_244_transpose_y_0"), val = tensor<bool, []>(false)];
126
+ tensor<fp16, [1, 8, 1500, 64]> transpose_43 = transpose(perm = var_238, x = var_237_cast);
127
+ tensor<fp16, [1, 8, 1500, 64]> var_244_cast = matmul(transpose_x = var_244_transpose_x_0, transpose_y = var_244_transpose_y_0, x = var_242_cast, y = transpose_43);
128
+ tensor<int32, [4]> var_245 = const()[name = tensor<string, []>("op_245"), val = tensor<int32, [4]>([0, 2, 1, 3])];
129
+ tensor<int32, [3]> concat_1 = const()[name = tensor<string, []>("concat_1"), val = tensor<int32, [3]>([1, 1500, 512])];
130
+ tensor<fp16, [1, 1500, 8, 64]> transpose_40 = transpose(perm = var_245, x = var_244_cast);
131
+ tensor<fp16, [1, 1500, 512]> x_23_cast = reshape(shape = concat_1, x = transpose_40);
132
+ tensor<fp16, [512, 512]> var_250_to_fp16 = const()[name = tensor<string, []>("op_250_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11241344)))];
133
+ tensor<fp16, [512]> var_251_to_fp16 = const()[name = tensor<string, []>("op_251_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11765696)))];
134
+ tensor<fp16, [1, 1500, 512]> var_252_cast = linear(bias = var_251_to_fp16, weight = var_250_to_fp16, x = x_23_cast);
135
+ tensor<fp16, [1, 1500, 512]> x_25_cast = add(x = x_19_cast, y = var_252_cast);
136
+ tensor<int32, [1]> var_258_axes_0 = const()[name = tensor<string, []>("op_258_axes_0"), val = tensor<int32, [1]>([-1])];
137
+ tensor<fp16, [512]> blocks_1_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11766784)))];
138
+ tensor<fp16, [512]> blocks_1_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11767872)))];
139
+ tensor<fp16, [1, 1500, 512]> var_258_cast = layer_norm(axes = var_258_axes_0, beta = blocks_1_mlp_ln_bias_to_fp16, epsilon = var_183_to_fp16, gamma = blocks_1_mlp_ln_weight_to_fp16, x = x_25_cast);
140
+ tensor<fp16, [2048, 512]> var_267_to_fp16 = const()[name = tensor<string, []>("op_267_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11768960)))];
141
+ tensor<fp16, [2048]> var_268_to_fp16 = const()[name = tensor<string, []>("op_268_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13866176)))];
142
+ tensor<fp16, [1, 1500, 2048]> input_17_cast = linear(bias = var_268_to_fp16, weight = var_267_to_fp16, x = var_258_cast);
143
+ tensor<string, []> x_29_mode_0 = const()[name = tensor<string, []>("x_29_mode_0"), val = tensor<string, []>("EXACT")];
144
+ tensor<fp16, [1, 1500, 2048]> x_29_cast = gelu(mode = x_29_mode_0, x = input_17_cast);
145
+ tensor<fp16, [512, 2048]> var_273_to_fp16 = const()[name = tensor<string, []>("op_273_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13870336)))];
146
+ tensor<fp16, [512]> var_274_to_fp16 = const()[name = tensor<string, []>("op_274_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15967552)))];
147
+ tensor<fp16, [1, 1500, 512]> var_275_cast = linear(bias = var_274_to_fp16, weight = var_273_to_fp16, x = x_29_cast);
148
+ tensor<fp16, [1, 1500, 512]> x_31_cast = add(x = x_25_cast, y = var_275_cast);
149
+ tensor<int32, []> var_284 = const()[name = tensor<string, []>("op_284"), val = tensor<int32, []>(-1)];
150
+ tensor<int32, [1]> var_301_axes_0 = const()[name = tensor<string, []>("op_301_axes_0"), val = tensor<int32, [1]>([-1])];
151
+ tensor<fp16, [512]> blocks_2_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15968640)))];
152
+ tensor<fp16, [512]> blocks_2_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15969728)))];
153
+ tensor<fp16, []> var_290_to_fp16 = const()[name = tensor<string, []>("op_290_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
154
+ tensor<fp16, [1, 1500, 512]> var_301_cast = layer_norm(axes = var_301_axes_0, beta = blocks_2_attn_ln_bias_to_fp16, epsilon = var_290_to_fp16, gamma = blocks_2_attn_ln_weight_to_fp16, x = x_31_cast);
155
+ tensor<fp16, [512, 512]> var_312_to_fp16 = const()[name = tensor<string, []>("op_312_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15970816)))];
156
+ tensor<fp16, [512]> var_313_to_fp16 = const()[name = tensor<string, []>("op_313_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16495168)))];
157
+ tensor<fp16, [1, 1500, 512]> q_9_cast = linear(bias = var_313_to_fp16, weight = var_312_to_fp16, x = var_301_cast);
158
+ tensor<fp16, [512, 512]> var_316_to_fp16 = const()[name = tensor<string, []>("op_316_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16496256)))];
159
+ tensor<fp16, [512]> k_9_bias_0_to_fp16 = const()[name = tensor<string, []>("k_9_bias_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(17020608)))];
160
+ tensor<fp16, [1, 1500, 512]> k_9_cast = linear(bias = k_9_bias_0_to_fp16, weight = var_316_to_fp16, x = var_301_cast);
161
+ tensor<fp16, [512, 512]> var_320_to_fp16 = const()[name = tensor<string, []>("op_320_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(17021696)))];
162
+ tensor<fp16, [512]> var_321_to_fp16 = const()[name = tensor<string, []>("op_321_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(17546048)))];
163
+ tensor<fp16, [1, 1500, 512]> v_9_cast = linear(bias = var_321_to_fp16, weight = var_320_to_fp16, x = var_301_cast);
164
+ tensor<int32, [4]> var_329 = const()[name = tensor<string, []>("op_329"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
165
+ tensor<fp16, [1, 1500, 8, 64]> var_330_cast = reshape(shape = var_329, x = q_9_cast);
166
+ tensor<fp16, [1, 1, 1, 1]> const_46_to_fp16 = const()[name = tensor<string, []>("const_46_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
167
+ tensor<fp16, [1, 1500, 8, 64]> q_11_cast = mul(x = var_330_cast, y = const_46_to_fp16);
168
+ tensor<int32, [4]> var_336 = const()[name = tensor<string, []>("op_336"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
169
+ tensor<fp16, [1, 1500, 8, 64]> var_337_cast = reshape(shape = var_336, x = k_9_cast);
170
+ tensor<fp16, [1, 1, 1, 1]> const_47_to_fp16 = const()[name = tensor<string, []>("const_47_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
171
+ tensor<fp16, [1, 1500, 8, 64]> k_11_cast = mul(x = var_337_cast, y = const_47_to_fp16);
172
+ tensor<int32, [4]> var_343 = const()[name = tensor<string, []>("op_343"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
173
+ tensor<fp16, [1, 1500, 8, 64]> var_344_cast = reshape(shape = var_343, x = v_9_cast);
174
+ tensor<int32, [4]> var_345 = const()[name = tensor<string, []>("op_345"), val = tensor<int32, [4]>([0, 2, 1, 3])];
175
+ tensor<bool, []> qk_5_transpose_x_0 = const()[name = tensor<string, []>("qk_5_transpose_x_0"), val = tensor<bool, []>(false)];
176
+ tensor<bool, []> qk_5_transpose_y_0 = const()[name = tensor<string, []>("qk_5_transpose_y_0"), val = tensor<bool, []>(false)];
177
+ tensor<int32, [4]> transpose_16_perm_0 = const()[name = tensor<string, []>("transpose_16_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
178
+ tensor<int32, [4]> transpose_17_perm_0 = const()[name = tensor<string, []>("transpose_17_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
179
+ tensor<fp16, [1, 8, 64, 1500]> transpose_37 = transpose(perm = transpose_17_perm_0, x = k_11_cast);
180
+ tensor<fp16, [1, 8, 1500, 64]> transpose_38 = transpose(perm = transpose_16_perm_0, x = q_11_cast);
181
+ tensor<fp16, [1, 8, 1500, 1500]> qk_5_cast = matmul(transpose_x = qk_5_transpose_x_0, transpose_y = qk_5_transpose_y_0, x = transpose_38, y = transpose_37);
182
+ tensor<fp16, [1, 8, 1500, 1500]> var_349_cast = softmax(axis = var_284, x = qk_5_cast);
183
+ tensor<bool, []> var_351_transpose_x_0 = const()[name = tensor<string, []>("op_351_transpose_x_0"), val = tensor<bool, []>(false)];
184
+ tensor<bool, []> var_351_transpose_y_0 = const()[name = tensor<string, []>("op_351_transpose_y_0"), val = tensor<bool, []>(false)];
185
+ tensor<fp16, [1, 8, 1500, 64]> transpose_39 = transpose(perm = var_345, x = var_344_cast);
186
+ tensor<fp16, [1, 8, 1500, 64]> var_351_cast = matmul(transpose_x = var_351_transpose_x_0, transpose_y = var_351_transpose_y_0, x = var_349_cast, y = transpose_39);
187
+ tensor<int32, [4]> var_352 = const()[name = tensor<string, []>("op_352"), val = tensor<int32, [4]>([0, 2, 1, 3])];
188
+ tensor<int32, [3]> concat_2 = const()[name = tensor<string, []>("concat_2"), val = tensor<int32, [3]>([1, 1500, 512])];
189
+ tensor<fp16, [1, 1500, 8, 64]> transpose_36 = transpose(perm = var_352, x = var_351_cast);
190
+ tensor<fp16, [1, 1500, 512]> x_35_cast = reshape(shape = concat_2, x = transpose_36);
191
+ tensor<fp16, [512, 512]> var_357_to_fp16 = const()[name = tensor<string, []>("op_357_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(17547136)))];
192
+ tensor<fp16, [512]> var_358_to_fp16 = const()[name = tensor<string, []>("op_358_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(18071488)))];
193
+ tensor<fp16, [1, 1500, 512]> var_359_cast = linear(bias = var_358_to_fp16, weight = var_357_to_fp16, x = x_35_cast);
194
+ tensor<fp16, [1, 1500, 512]> x_37_cast = add(x = x_31_cast, y = var_359_cast);
195
+ tensor<int32, [1]> var_365_axes_0 = const()[name = tensor<string, []>("op_365_axes_0"), val = tensor<int32, [1]>([-1])];
196
+ tensor<fp16, [512]> blocks_2_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(18072576)))];
197
+ tensor<fp16, [512]> blocks_2_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(18073664)))];
198
+ tensor<fp16, [1, 1500, 512]> var_365_cast = layer_norm(axes = var_365_axes_0, beta = blocks_2_mlp_ln_bias_to_fp16, epsilon = var_290_to_fp16, gamma = blocks_2_mlp_ln_weight_to_fp16, x = x_37_cast);
199
+ tensor<fp16, [2048, 512]> var_374_to_fp16 = const()[name = tensor<string, []>("op_374_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(18074752)))];
200
+ tensor<fp16, [2048]> var_375_to_fp16 = const()[name = tensor<string, []>("op_375_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(20171968)))];
201
+ tensor<fp16, [1, 1500, 2048]> input_25_cast = linear(bias = var_375_to_fp16, weight = var_374_to_fp16, x = var_365_cast);
202
+ tensor<string, []> x_41_mode_0 = const()[name = tensor<string, []>("x_41_mode_0"), val = tensor<string, []>("EXACT")];
203
+ tensor<fp16, [1, 1500, 2048]> x_41_cast = gelu(mode = x_41_mode_0, x = input_25_cast);
204
+ tensor<fp16, [512, 2048]> var_380_to_fp16 = const()[name = tensor<string, []>("op_380_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(20176128)))];
205
+ tensor<fp16, [512]> var_381_to_fp16 = const()[name = tensor<string, []>("op_381_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22273344)))];
206
+ tensor<fp16, [1, 1500, 512]> var_382_cast = linear(bias = var_381_to_fp16, weight = var_380_to_fp16, x = x_41_cast);
207
+ tensor<fp16, [1, 1500, 512]> x_43_cast = add(x = x_37_cast, y = var_382_cast);
208
+ tensor<int32, []> var_391 = const()[name = tensor<string, []>("op_391"), val = tensor<int32, []>(-1)];
209
+ tensor<int32, [1]> var_408_axes_0 = const()[name = tensor<string, []>("op_408_axes_0"), val = tensor<int32, [1]>([-1])];
210
+ tensor<fp16, [512]> blocks_3_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_3_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22274432)))];
211
+ tensor<fp16, [512]> blocks_3_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_3_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22275520)))];
212
+ tensor<fp16, []> var_397_to_fp16 = const()[name = tensor<string, []>("op_397_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
213
+ tensor<fp16, [1, 1500, 512]> var_408_cast = layer_norm(axes = var_408_axes_0, beta = blocks_3_attn_ln_bias_to_fp16, epsilon = var_397_to_fp16, gamma = blocks_3_attn_ln_weight_to_fp16, x = x_43_cast);
214
+ tensor<fp16, [512, 512]> var_419_to_fp16 = const()[name = tensor<string, []>("op_419_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22276608)))];
215
+ tensor<fp16, [512]> var_420_to_fp16 = const()[name = tensor<string, []>("op_420_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22800960)))];
216
+ tensor<fp16, [1, 1500, 512]> q_13_cast = linear(bias = var_420_to_fp16, weight = var_419_to_fp16, x = var_408_cast);
217
+ tensor<fp16, [512, 512]> var_423_to_fp16 = const()[name = tensor<string, []>("op_423_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22802048)))];
218
+ tensor<fp16, [512]> k_13_bias_0_to_fp16 = const()[name = tensor<string, []>("k_13_bias_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(23326400)))];
219
+ tensor<fp16, [1, 1500, 512]> k_13_cast = linear(bias = k_13_bias_0_to_fp16, weight = var_423_to_fp16, x = var_408_cast);
220
+ tensor<fp16, [512, 512]> var_427_to_fp16 = const()[name = tensor<string, []>("op_427_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(23327488)))];
221
+ tensor<fp16, [512]> var_428_to_fp16 = const()[name = tensor<string, []>("op_428_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(23851840)))];
222
+ tensor<fp16, [1, 1500, 512]> v_13_cast = linear(bias = var_428_to_fp16, weight = var_427_to_fp16, x = var_408_cast);
223
+ tensor<int32, [4]> var_436 = const()[name = tensor<string, []>("op_436"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
224
+ tensor<fp16, [1, 1500, 8, 64]> var_437_cast = reshape(shape = var_436, x = q_13_cast);
225
+ tensor<fp16, [1, 1, 1, 1]> const_48_to_fp16 = const()[name = tensor<string, []>("const_48_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
226
+ tensor<fp16, [1, 1500, 8, 64]> q_15_cast = mul(x = var_437_cast, y = const_48_to_fp16);
227
+ tensor<int32, [4]> var_443 = const()[name = tensor<string, []>("op_443"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
228
+ tensor<fp16, [1, 1500, 8, 64]> var_444_cast = reshape(shape = var_443, x = k_13_cast);
229
+ tensor<fp16, [1, 1, 1, 1]> const_49_to_fp16 = const()[name = tensor<string, []>("const_49_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
230
+ tensor<fp16, [1, 1500, 8, 64]> k_15_cast = mul(x = var_444_cast, y = const_49_to_fp16);
231
+ tensor<int32, [4]> var_450 = const()[name = tensor<string, []>("op_450"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
232
+ tensor<fp16, [1, 1500, 8, 64]> var_451_cast = reshape(shape = var_450, x = v_13_cast);
233
+ tensor<int32, [4]> var_452 = const()[name = tensor<string, []>("op_452"), val = tensor<int32, [4]>([0, 2, 1, 3])];
234
+ tensor<bool, []> qk_7_transpose_x_0 = const()[name = tensor<string, []>("qk_7_transpose_x_0"), val = tensor<bool, []>(false)];
235
+ tensor<bool, []> qk_7_transpose_y_0 = const()[name = tensor<string, []>("qk_7_transpose_y_0"), val = tensor<bool, []>(false)];
236
+ tensor<int32, [4]> transpose_18_perm_0 = const()[name = tensor<string, []>("transpose_18_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
237
+ tensor<int32, [4]> transpose_19_perm_0 = const()[name = tensor<string, []>("transpose_19_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
238
+ tensor<fp16, [1, 8, 64, 1500]> transpose_33 = transpose(perm = transpose_19_perm_0, x = k_15_cast);
239
+ tensor<fp16, [1, 8, 1500, 64]> transpose_34 = transpose(perm = transpose_18_perm_0, x = q_15_cast);
240
+ tensor<fp16, [1, 8, 1500, 1500]> qk_7_cast = matmul(transpose_x = qk_7_transpose_x_0, transpose_y = qk_7_transpose_y_0, x = transpose_34, y = transpose_33);
241
+ tensor<fp16, [1, 8, 1500, 1500]> var_456_cast = softmax(axis = var_391, x = qk_7_cast);
242
+ tensor<bool, []> var_458_transpose_x_0 = const()[name = tensor<string, []>("op_458_transpose_x_0"), val = tensor<bool, []>(false)];
243
+ tensor<bool, []> var_458_transpose_y_0 = const()[name = tensor<string, []>("op_458_transpose_y_0"), val = tensor<bool, []>(false)];
244
+ tensor<fp16, [1, 8, 1500, 64]> transpose_35 = transpose(perm = var_452, x = var_451_cast);
245
+ tensor<fp16, [1, 8, 1500, 64]> var_458_cast = matmul(transpose_x = var_458_transpose_x_0, transpose_y = var_458_transpose_y_0, x = var_456_cast, y = transpose_35);
246
+ tensor<int32, [4]> var_459 = const()[name = tensor<string, []>("op_459"), val = tensor<int32, [4]>([0, 2, 1, 3])];
247
+ tensor<int32, [3]> concat_3 = const()[name = tensor<string, []>("concat_3"), val = tensor<int32, [3]>([1, 1500, 512])];
248
+ tensor<fp16, [1, 1500, 8, 64]> transpose_32 = transpose(perm = var_459, x = var_458_cast);
249
+ tensor<fp16, [1, 1500, 512]> x_47_cast = reshape(shape = concat_3, x = transpose_32);
250
+ tensor<fp16, [512, 512]> var_464_to_fp16 = const()[name = tensor<string, []>("op_464_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(23852928)))];
251
+ tensor<fp16, [512]> var_465_to_fp16 = const()[name = tensor<string, []>("op_465_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(24377280)))];
252
+ tensor<fp16, [1, 1500, 512]> var_466_cast = linear(bias = var_465_to_fp16, weight = var_464_to_fp16, x = x_47_cast);
253
+ tensor<fp16, [1, 1500, 512]> x_49_cast = add(x = x_43_cast, y = var_466_cast);
254
+ tensor<int32, [1]> var_472_axes_0 = const()[name = tensor<string, []>("op_472_axes_0"), val = tensor<int32, [1]>([-1])];
255
+ tensor<fp16, [512]> blocks_3_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_3_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(24378368)))];
256
+ tensor<fp16, [512]> blocks_3_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_3_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(24379456)))];
257
+ tensor<fp16, [1, 1500, 512]> var_472_cast = layer_norm(axes = var_472_axes_0, beta = blocks_3_mlp_ln_bias_to_fp16, epsilon = var_397_to_fp16, gamma = blocks_3_mlp_ln_weight_to_fp16, x = x_49_cast);
258
+ tensor<fp16, [2048, 512]> var_481_to_fp16 = const()[name = tensor<string, []>("op_481_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(24380544)))];
259
+ tensor<fp16, [2048]> var_482_to_fp16 = const()[name = tensor<string, []>("op_482_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(26477760)))];
260
+ tensor<fp16, [1, 1500, 2048]> input_33_cast = linear(bias = var_482_to_fp16, weight = var_481_to_fp16, x = var_472_cast);
261
+ tensor<string, []> x_53_mode_0 = const()[name = tensor<string, []>("x_53_mode_0"), val = tensor<string, []>("EXACT")];
262
+ tensor<fp16, [1, 1500, 2048]> x_53_cast = gelu(mode = x_53_mode_0, x = input_33_cast);
263
+ tensor<fp16, [512, 2048]> var_487_to_fp16 = const()[name = tensor<string, []>("op_487_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(26481920)))];
264
+ tensor<fp16, [512]> var_488_to_fp16 = const()[name = tensor<string, []>("op_488_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(28579136)))];
265
+ tensor<fp16, [1, 1500, 512]> var_489_cast = linear(bias = var_488_to_fp16, weight = var_487_to_fp16, x = x_53_cast);
266
+ tensor<fp16, [1, 1500, 512]> x_55_cast = add(x = x_49_cast, y = var_489_cast);
267
+ tensor<int32, []> var_498 = const()[name = tensor<string, []>("op_498"), val = tensor<int32, []>(-1)];
268
+ tensor<int32, [1]> var_515_axes_0 = const()[name = tensor<string, []>("op_515_axes_0"), val = tensor<int32, [1]>([-1])];
269
+ tensor<fp16, [512]> blocks_4_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_4_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(28580224)))];
270
+ tensor<fp16, [512]> blocks_4_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_4_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(28581312)))];
271
+ tensor<fp16, []> var_504_to_fp16 = const()[name = tensor<string, []>("op_504_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
272
+ tensor<fp16, [1, 1500, 512]> var_515_cast = layer_norm(axes = var_515_axes_0, beta = blocks_4_attn_ln_bias_to_fp16, epsilon = var_504_to_fp16, gamma = blocks_4_attn_ln_weight_to_fp16, x = x_55_cast);
273
+ tensor<fp16, [512, 512]> var_526_to_fp16 = const()[name = tensor<string, []>("op_526_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(28582400)))];
274
+ tensor<fp16, [512]> var_527_to_fp16 = const()[name = tensor<string, []>("op_527_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(29106752)))];
275
+ tensor<fp16, [1, 1500, 512]> q_17_cast = linear(bias = var_527_to_fp16, weight = var_526_to_fp16, x = var_515_cast);
276
+ tensor<fp16, [512, 512]> var_530_to_fp16 = const()[name = tensor<string, []>("op_530_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(29107840)))];
277
+ tensor<fp16, [512]> k_17_bias_0_to_fp16 = const()[name = tensor<string, []>("k_17_bias_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(29632192)))];
278
+ tensor<fp16, [1, 1500, 512]> k_17_cast = linear(bias = k_17_bias_0_to_fp16, weight = var_530_to_fp16, x = var_515_cast);
279
+ tensor<fp16, [512, 512]> var_534_to_fp16 = const()[name = tensor<string, []>("op_534_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(29633280)))];
280
+ tensor<fp16, [512]> var_535_to_fp16 = const()[name = tensor<string, []>("op_535_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30157632)))];
281
+ tensor<fp16, [1, 1500, 512]> v_17_cast = linear(bias = var_535_to_fp16, weight = var_534_to_fp16, x = var_515_cast);
282
+ tensor<int32, [4]> var_543 = const()[name = tensor<string, []>("op_543"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
283
+ tensor<fp16, [1, 1500, 8, 64]> var_544_cast = reshape(shape = var_543, x = q_17_cast);
284
+ tensor<fp16, [1, 1, 1, 1]> const_50_to_fp16 = const()[name = tensor<string, []>("const_50_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
285
+ tensor<fp16, [1, 1500, 8, 64]> q_19_cast = mul(x = var_544_cast, y = const_50_to_fp16);
286
+ tensor<int32, [4]> var_550 = const()[name = tensor<string, []>("op_550"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
287
+ tensor<fp16, [1, 1500, 8, 64]> var_551_cast = reshape(shape = var_550, x = k_17_cast);
288
+ tensor<fp16, [1, 1, 1, 1]> const_51_to_fp16 = const()[name = tensor<string, []>("const_51_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
289
+ tensor<fp16, [1, 1500, 8, 64]> k_19_cast = mul(x = var_551_cast, y = const_51_to_fp16);
290
+ tensor<int32, [4]> var_557 = const()[name = tensor<string, []>("op_557"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
291
+ tensor<fp16, [1, 1500, 8, 64]> var_558_cast = reshape(shape = var_557, x = v_17_cast);
292
+ tensor<int32, [4]> var_559 = const()[name = tensor<string, []>("op_559"), val = tensor<int32, [4]>([0, 2, 1, 3])];
293
+ tensor<bool, []> qk_9_transpose_x_0 = const()[name = tensor<string, []>("qk_9_transpose_x_0"), val = tensor<bool, []>(false)];
294
+ tensor<bool, []> qk_9_transpose_y_0 = const()[name = tensor<string, []>("qk_9_transpose_y_0"), val = tensor<bool, []>(false)];
295
+ tensor<int32, [4]> transpose_20_perm_0 = const()[name = tensor<string, []>("transpose_20_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
296
+ tensor<int32, [4]> transpose_21_perm_0 = const()[name = tensor<string, []>("transpose_21_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
297
+ tensor<fp16, [1, 8, 64, 1500]> transpose_29 = transpose(perm = transpose_21_perm_0, x = k_19_cast);
298
+ tensor<fp16, [1, 8, 1500, 64]> transpose_30 = transpose(perm = transpose_20_perm_0, x = q_19_cast);
299
+ tensor<fp16, [1, 8, 1500, 1500]> qk_9_cast = matmul(transpose_x = qk_9_transpose_x_0, transpose_y = qk_9_transpose_y_0, x = transpose_30, y = transpose_29);
300
+ tensor<fp16, [1, 8, 1500, 1500]> var_563_cast = softmax(axis = var_498, x = qk_9_cast);
301
+ tensor<bool, []> var_565_transpose_x_0 = const()[name = tensor<string, []>("op_565_transpose_x_0"), val = tensor<bool, []>(false)];
302
+ tensor<bool, []> var_565_transpose_y_0 = const()[name = tensor<string, []>("op_565_transpose_y_0"), val = tensor<bool, []>(false)];
303
+ tensor<fp16, [1, 8, 1500, 64]> transpose_31 = transpose(perm = var_559, x = var_558_cast);
304
+ tensor<fp16, [1, 8, 1500, 64]> var_565_cast = matmul(transpose_x = var_565_transpose_x_0, transpose_y = var_565_transpose_y_0, x = var_563_cast, y = transpose_31);
305
+ tensor<int32, [4]> var_566 = const()[name = tensor<string, []>("op_566"), val = tensor<int32, [4]>([0, 2, 1, 3])];
306
+ tensor<int32, [3]> concat_4 = const()[name = tensor<string, []>("concat_4"), val = tensor<int32, [3]>([1, 1500, 512])];
307
+ tensor<fp16, [1, 1500, 8, 64]> transpose_28 = transpose(perm = var_566, x = var_565_cast);
308
+ tensor<fp16, [1, 1500, 512]> x_59_cast = reshape(shape = concat_4, x = transpose_28);
309
+ tensor<fp16, [512, 512]> var_571_to_fp16 = const()[name = tensor<string, []>("op_571_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30158720)))];
310
+ tensor<fp16, [512]> var_572_to_fp16 = const()[name = tensor<string, []>("op_572_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30683072)))];
311
+ tensor<fp16, [1, 1500, 512]> var_573_cast = linear(bias = var_572_to_fp16, weight = var_571_to_fp16, x = x_59_cast);
312
+ tensor<fp16, [1, 1500, 512]> x_61_cast = add(x = x_55_cast, y = var_573_cast);
313
+ tensor<int32, [1]> var_579_axes_0 = const()[name = tensor<string, []>("op_579_axes_0"), val = tensor<int32, [1]>([-1])];
314
+ tensor<fp16, [512]> blocks_4_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_4_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30684160)))];
315
+ tensor<fp16, [512]> blocks_4_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_4_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30685248)))];
316
+ tensor<fp16, [1, 1500, 512]> var_579_cast = layer_norm(axes = var_579_axes_0, beta = blocks_4_mlp_ln_bias_to_fp16, epsilon = var_504_to_fp16, gamma = blocks_4_mlp_ln_weight_to_fp16, x = x_61_cast);
317
+ tensor<fp16, [2048, 512]> var_588_to_fp16 = const()[name = tensor<string, []>("op_588_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30686336)))];
318
+ tensor<fp16, [2048]> var_589_to_fp16 = const()[name = tensor<string, []>("op_589_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(32783552)))];
319
+ tensor<fp16, [1, 1500, 2048]> input_41_cast = linear(bias = var_589_to_fp16, weight = var_588_to_fp16, x = var_579_cast);
320
+ tensor<string, []> x_65_mode_0 = const()[name = tensor<string, []>("x_65_mode_0"), val = tensor<string, []>("EXACT")];
321
+ tensor<fp16, [1, 1500, 2048]> x_65_cast = gelu(mode = x_65_mode_0, x = input_41_cast);
322
+ tensor<fp16, [512, 2048]> var_594_to_fp16 = const()[name = tensor<string, []>("op_594_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(32787712)))];
323
+ tensor<fp16, [512]> var_595_to_fp16 = const()[name = tensor<string, []>("op_595_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(34884928)))];
324
+ tensor<fp16, [1, 1500, 512]> var_596_cast = linear(bias = var_595_to_fp16, weight = var_594_to_fp16, x = x_65_cast);
325
+ tensor<fp16, [1, 1500, 512]> x_67_cast = add(x = x_61_cast, y = var_596_cast);
326
+ tensor<int32, []> var_605 = const()[name = tensor<string, []>("op_605"), val = tensor<int32, []>(-1)];
327
+ tensor<int32, [1]> var_622_axes_0 = const()[name = tensor<string, []>("op_622_axes_0"), val = tensor<int32, [1]>([-1])];
328
+ tensor<fp16, [512]> blocks_5_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_5_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(34886016)))];
329
+ tensor<fp16, [512]> blocks_5_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_5_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(34887104)))];
330
+ tensor<fp16, []> var_611_to_fp16 = const()[name = tensor<string, []>("op_611_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
331
+ tensor<fp16, [1, 1500, 512]> var_622_cast = layer_norm(axes = var_622_axes_0, beta = blocks_5_attn_ln_bias_to_fp16, epsilon = var_611_to_fp16, gamma = blocks_5_attn_ln_weight_to_fp16, x = x_67_cast);
332
+ tensor<fp16, [512, 512]> var_633_to_fp16 = const()[name = tensor<string, []>("op_633_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(34888192)))];
333
+ tensor<fp16, [512]> var_634_to_fp16 = const()[name = tensor<string, []>("op_634_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(35412544)))];
334
+ tensor<fp16, [1, 1500, 512]> q_21_cast = linear(bias = var_634_to_fp16, weight = var_633_to_fp16, x = var_622_cast);
335
+ tensor<fp16, [512, 512]> var_637_to_fp16 = const()[name = tensor<string, []>("op_637_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(35413632)))];
336
+ tensor<fp16, [512]> k_21_bias_0_to_fp16 = const()[name = tensor<string, []>("k_21_bias_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(35937984)))];
337
+ tensor<fp16, [1, 1500, 512]> k_21_cast = linear(bias = k_21_bias_0_to_fp16, weight = var_637_to_fp16, x = var_622_cast);
338
+ tensor<fp16, [512, 512]> var_641_to_fp16 = const()[name = tensor<string, []>("op_641_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(35939072)))];
339
+ tensor<fp16, [512]> var_642_to_fp16 = const()[name = tensor<string, []>("op_642_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36463424)))];
340
+ tensor<fp16, [1, 1500, 512]> v_21_cast = linear(bias = var_642_to_fp16, weight = var_641_to_fp16, x = var_622_cast);
341
+ tensor<int32, [4]> var_650 = const()[name = tensor<string, []>("op_650"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
342
+ tensor<fp16, [1, 1500, 8, 64]> var_651_cast = reshape(shape = var_650, x = q_21_cast);
343
+ tensor<fp16, [1, 1, 1, 1]> const_52_to_fp16 = const()[name = tensor<string, []>("const_52_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
344
+ tensor<fp16, [1, 1500, 8, 64]> q_cast = mul(x = var_651_cast, y = const_52_to_fp16);
345
+ tensor<int32, [4]> var_657 = const()[name = tensor<string, []>("op_657"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
346
+ tensor<fp16, [1, 1500, 8, 64]> var_658_cast = reshape(shape = var_657, x = k_21_cast);
347
+ tensor<fp16, [1, 1, 1, 1]> const_53_to_fp16 = const()[name = tensor<string, []>("const_53_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
348
+ tensor<fp16, [1, 1500, 8, 64]> k_cast = mul(x = var_658_cast, y = const_53_to_fp16);
349
+ tensor<int32, [4]> var_664 = const()[name = tensor<string, []>("op_664"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
350
+ tensor<fp16, [1, 1500, 8, 64]> var_665_cast = reshape(shape = var_664, x = v_21_cast);
351
+ tensor<int32, [4]> var_666 = const()[name = tensor<string, []>("op_666"), val = tensor<int32, [4]>([0, 2, 1, 3])];
352
+ tensor<bool, []> qk_transpose_x_0 = const()[name = tensor<string, []>("qk_transpose_x_0"), val = tensor<bool, []>(false)];
353
+ tensor<bool, []> qk_transpose_y_0 = const()[name = tensor<string, []>("qk_transpose_y_0"), val = tensor<bool, []>(false)];
354
+ tensor<int32, [4]> transpose_22_perm_0 = const()[name = tensor<string, []>("transpose_22_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
355
+ tensor<int32, [4]> transpose_23_perm_0 = const()[name = tensor<string, []>("transpose_23_perm_0"), val = tensor<int32, [4]>([0, 2, 3, 1])];
356
+ tensor<fp16, [1, 8, 64, 1500]> transpose_25 = transpose(perm = transpose_23_perm_0, x = k_cast);
357
+ tensor<fp16, [1, 8, 1500, 64]> transpose_26 = transpose(perm = transpose_22_perm_0, x = q_cast);
358
+ tensor<fp16, [1, 8, 1500, 1500]> qk_cast = matmul(transpose_x = qk_transpose_x_0, transpose_y = qk_transpose_y_0, x = transpose_26, y = transpose_25);
359
+ tensor<fp16, [1, 8, 1500, 1500]> var_670_cast = softmax(axis = var_605, x = qk_cast);
360
+ tensor<bool, []> var_672_transpose_x_0 = const()[name = tensor<string, []>("op_672_transpose_x_0"), val = tensor<bool, []>(false)];
361
+ tensor<bool, []> var_672_transpose_y_0 = const()[name = tensor<string, []>("op_672_transpose_y_0"), val = tensor<bool, []>(false)];
362
+ tensor<fp16, [1, 8, 1500, 64]> transpose_27 = transpose(perm = var_666, x = var_665_cast);
363
+ tensor<fp16, [1, 8, 1500, 64]> var_672_cast = matmul(transpose_x = var_672_transpose_x_0, transpose_y = var_672_transpose_y_0, x = var_670_cast, y = transpose_27);
364
+ tensor<int32, [4]> var_673 = const()[name = tensor<string, []>("op_673"), val = tensor<int32, [4]>([0, 2, 1, 3])];
365
+ tensor<int32, [3]> concat_5 = const()[name = tensor<string, []>("concat_5"), val = tensor<int32, [3]>([1, 1500, 512])];
366
+ tensor<fp16, [1, 1500, 8, 64]> transpose_24 = transpose(perm = var_673, x = var_672_cast);
367
+ tensor<fp16, [1, 1500, 512]> x_71_cast = reshape(shape = concat_5, x = transpose_24);
368
+ tensor<fp16, [512, 512]> var_678_to_fp16 = const()[name = tensor<string, []>("op_678_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36464512)))];
369
+ tensor<fp16, [512]> var_679_to_fp16 = const()[name = tensor<string, []>("op_679_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36988864)))];
370
+ tensor<fp16, [1, 1500, 512]> var_680_cast = linear(bias = var_679_to_fp16, weight = var_678_to_fp16, x = x_71_cast);
371
+ tensor<fp16, [1, 1500, 512]> x_73_cast = add(x = x_67_cast, y = var_680_cast);
372
+ tensor<int32, [1]> var_686_axes_0 = const()[name = tensor<string, []>("op_686_axes_0"), val = tensor<int32, [1]>([-1])];
373
+ tensor<fp16, [512]> blocks_5_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_5_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36989952)))];
374
+ tensor<fp16, [512]> blocks_5_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_5_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36991040)))];
375
+ tensor<fp16, [1, 1500, 512]> var_686_cast = layer_norm(axes = var_686_axes_0, beta = blocks_5_mlp_ln_bias_to_fp16, epsilon = var_611_to_fp16, gamma = blocks_5_mlp_ln_weight_to_fp16, x = x_73_cast);
376
+ tensor<fp16, [2048, 512]> var_695_to_fp16 = const()[name = tensor<string, []>("op_695_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36992128)))];
377
+ tensor<fp16, [2048]> var_696_to_fp16 = const()[name = tensor<string, []>("op_696_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(39089344)))];
378
+ tensor<fp16, [1, 1500, 2048]> input_49_cast = linear(bias = var_696_to_fp16, weight = var_695_to_fp16, x = var_686_cast);
379
+ tensor<string, []> x_77_mode_0 = const()[name = tensor<string, []>("x_77_mode_0"), val = tensor<string, []>("EXACT")];
380
+ tensor<fp16, [1, 1500, 2048]> x_77_cast = gelu(mode = x_77_mode_0, x = input_49_cast);
381
+ tensor<fp16, [512, 2048]> var_701_to_fp16 = const()[name = tensor<string, []>("op_701_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(39093504)))];
382
+ tensor<fp16, [512]> var_702_to_fp16 = const()[name = tensor<string, []>("op_702_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41190720)))];
383
+ tensor<fp16, [1, 1500, 512]> var_703_cast = linear(bias = var_702_to_fp16, weight = var_701_to_fp16, x = x_77_cast);
384
+ tensor<fp16, [1, 1500, 512]> x_cast = add(x = x_73_cast, y = var_703_cast);
385
+ tensor<int32, [1]> var_716_axes_0 = const()[name = tensor<string, []>("op_716_axes_0"), val = tensor<int32, [1]>([-1])];
386
+ tensor<fp16, [512]> ln_post_weight_to_fp16 = const()[name = tensor<string, []>("ln_post_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41191808)))];
387
+ tensor<fp16, [512]> ln_post_bias_to_fp16 = const()[name = tensor<string, []>("ln_post_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41192896)))];
388
+ tensor<fp16, []> var_707_to_fp16 = const()[name = tensor<string, []>("op_707_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
389
+ tensor<fp16, [1, 1500, 512]> var_716_cast = layer_norm(axes = var_716_axes_0, beta = ln_post_bias_to_fp16, epsilon = var_707_to_fp16, gamma = ln_post_weight_to_fp16, x = x_cast);
390
+ tensor<string, []> var_716_cast_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_716_cast_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
391
+ tensor<fp32, [1, 1500, 512]> output = cast(dtype = var_716_cast_to_fp32_dtype_0, x = var_716_cast);
392
+ } -> (output);
393
+ }
ggml-base.en-encoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1578c2b8925c0ff867a8bb96dd56c0eb75df3b909ca9507dc2aa1e437c296dc9
3
+ size 41193984
ggml-large-v1-encoder.mlmodelc.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d10d24a4272572fee07843ffc2e4e4ceebd1d0b0831a142dd2fdb3d56d5bf56e
3
- size 1177529527
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:205f888f1f31bd06ff6650bec836e332e6f84f28dfbc2c00378559162b3584f4
3
+ size 1177530049
ggml-large-v1-encoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9b73481ce4a6af49154956c7f9c72dc02d96448fa69651f5c5a2124edfc8e31
3
+ size 243
ggml-large-v1-encoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05fe28591b40616fa0c34ad7b853133623f5300923ec812acb11459c411acf3b
3
+ size 149
ggml-large-v1-encoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float32",
10
+ "formattedType" : "MultiArray (Float32)",
11
+ "shortDescription" : "",
12
+ "shape" : "[]",
13
+ "name" : "output",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 6,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Linear" : 192,
23
+ "Matmul" : 64,
24
+ "Cast" : 2,
25
+ "Conv" : 2,
26
+ "Softmax" : 32,
27
+ "Add" : 65,
28
+ "LayerNorm" : 65,
29
+ "Mul" : 64,
30
+ "Transpose" : 129,
31
+ "Gelu" : 34,
32
+ "Reshape" : 128
33
+ },
34
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
35
+ "isUpdatable" : "0",
36
+ "availability" : {
37
+ "macOS" : "12.0",
38
+ "tvOS" : "15.0",
39
+ "visionOS" : "1.0",
40
+ "watchOS" : "8.0",
41
+ "iOS" : "15.0",
42
+ "macCatalyst" : "15.0"
43
+ },
44
+ "modelType" : {
45
+ "name" : "MLModelType_mlProgram"
46
+ },
47
+ "userDefinedMetadata" : {
48
+
49
+ },
50
+ "inputSchema" : [
51
+ {
52
+ "hasShapeFlexibility" : "0",
53
+ "isOptional" : "0",
54
+ "dataType" : "Float32",
55
+ "formattedType" : "MultiArray (Float32 1 × 80 × 3000)",
56
+ "shortDescription" : "",
57
+ "shape" : "[1, 80, 3000]",
58
+ "name" : "logmel_data",
59
+ "type" : "MultiArray"
60
+ }
61
+ ],
62
+ "generatedClassName" : "coreml_encoder_large_v1",
63
+ "method" : "predict"
64
+ }
65
+ ]
ggml-large-v1-encoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
ggml-large-v1-encoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe6163af221e89e96ebdd4163bddbf79670be7ee6aac34f1d662ce42f6fb014d
3
+ size 1273684480
ggml-large-v2-encoder.mlmodelc.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c65d28737f51d09dff3b9da9d26c2ab6f5d82857c9b049ce383d64c2502a5541
3
- size 1174643458
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08d4306667aa7378a8ed9ecbdc94337fe01a2d71c9afe40c4e8dfb967f7bfd41
3
+ size 1174643634
ggml-large-v2-encoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9b73481ce4a6af49154956c7f9c72dc02d96448fa69651f5c5a2124edfc8e31
3
+ size 243
ggml-large-v2-encoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05fe28591b40616fa0c34ad7b853133623f5300923ec812acb11459c411acf3b
3
+ size 149
ggml-large-v2-encoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float32",
10
+ "formattedType" : "MultiArray (Float32)",
11
+ "shortDescription" : "",
12
+ "shape" : "[]",
13
+ "name" : "output",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 6,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Linear" : 192,
23
+ "Matmul" : 64,
24
+ "Cast" : 2,
25
+ "Conv" : 2,
26
+ "Softmax" : 32,
27
+ "Add" : 65,
28
+ "LayerNorm" : 65,
29
+ "Mul" : 64,
30
+ "Transpose" : 129,
31
+ "Gelu" : 34,
32
+ "Reshape" : 128
33
+ },
34
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
35
+ "isUpdatable" : "0",
36
+ "availability" : {
37
+ "macOS" : "12.0",
38
+ "tvOS" : "15.0",
39
+ "visionOS" : "1.0",
40
+ "watchOS" : "8.0",
41
+ "iOS" : "15.0",
42
+ "macCatalyst" : "15.0"
43
+ },
44
+ "modelType" : {
45
+ "name" : "MLModelType_mlProgram"
46
+ },
47
+ "userDefinedMetadata" : {
48
+
49
+ },
50
+ "inputSchema" : [
51
+ {
52
+ "hasShapeFlexibility" : "0",
53
+ "isOptional" : "0",
54
+ "dataType" : "Float32",
55
+ "formattedType" : "MultiArray (Float32 1 × 80 × 3000)",
56
+ "shortDescription" : "",
57
+ "shape" : "[1, 80, 3000]",
58
+ "name" : "logmel_data",
59
+ "type" : "MultiArray"
60
+ }
61
+ ],
62
+ "generatedClassName" : "coreml_encoder_large_v2",
63
+ "method" : "predict"
64
+ }
65
+ ]
ggml-large-v2-encoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
ggml-large-v2-encoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42f92759b6f55ec227a3cb7e042c0424219cc781818ed3b8b0e72743714ed5d7
3
+ size 1273684480
ggml-large-v3-encoder.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9be410daad332c1756c34aa119479ef0b0de80c770b094da4ab5cc29ffa417ee
3
+ size 1174643677
ggml-large-v3-encoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9b73481ce4a6af49154956c7f9c72dc02d96448fa69651f5c5a2124edfc8e31
3
+ size 243
ggml-large-v3-encoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05fe28591b40616fa0c34ad7b853133623f5300923ec812acb11459c411acf3b
3
+ size 149
ggml-large-v3-encoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float32",
10
+ "formattedType" : "MultiArray (Float32)",
11
+ "shortDescription" : "",
12
+ "shape" : "[]",
13
+ "name" : "output",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 6,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Linear" : 192,
23
+ "Matmul" : 64,
24
+ "Cast" : 2,
25
+ "Conv" : 2,
26
+ "Softmax" : 32,
27
+ "Add" : 65,
28
+ "LayerNorm" : 65,
29
+ "Mul" : 64,
30
+ "Transpose" : 129,
31
+ "Gelu" : 34,
32
+ "Reshape" : 128
33
+ },
34
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
35
+ "isUpdatable" : "0",
36
+ "availability" : {
37
+ "macOS" : "12.0",
38
+ "tvOS" : "15.0",
39
+ "visionOS" : "1.0",
40
+ "watchOS" : "8.0",
41
+ "iOS" : "15.0",
42
+ "macCatalyst" : "15.0"
43
+ },
44
+ "modelType" : {
45
+ "name" : "MLModelType_mlProgram"
46
+ },
47
+ "userDefinedMetadata" : {
48
+
49
+ },
50
+ "inputSchema" : [
51
+ {
52
+ "hasShapeFlexibility" : "0",
53
+ "isOptional" : "0",
54
+ "dataType" : "Float32",
55
+ "formattedType" : "MultiArray (Float32 1 × 80 × 3000)",
56
+ "shortDescription" : "",
57
+ "shape" : "[1, 80, 3000]",
58
+ "name" : "logmel_data",
59
+ "type" : "MultiArray"
60
+ }
61
+ ],
62
+ "generatedClassName" : "coreml_encoder_large",
63
+ "method" : "predict"
64
+ }
65
+ ]
ggml-large-v3-encoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
ggml-large-v3-encoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3c79ca8b3de455956bb7e2af31f525413431950048a54b2e4c72c39d8c5acc7
3
+ size 1273684480
ggml-medium-encoder.mlmodelc.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:79b0b8d436d47d3f24dd3afc91f19447dd686a4f37521b2f6d9c30a642133fbd
3
  size 567829413
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:991295c77a7f73bd04b9310e8ca2aa279826acb7c643f7973cd27fa892487377
3
  size 567829413
ggml-medium-encoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adbe456375e7eb3407732a426ecb65bbda86860e4aa801f3a696b70b8a533cdd
3
+ size 207
ggml-medium-encoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05fe28591b40616fa0c34ad7b853133623f5300923ec812acb11459c411acf3b
3
+ size 149
ggml-medium-encoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float32",
10
+ "formattedType" : "MultiArray (Float32)",
11
+ "shortDescription" : "",
12
+ "shape" : "[]",
13
+ "name" : "output",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 6,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Linear" : 144,
23
+ "Matmul" : 48,
24
+ "Cast" : 2,
25
+ "Conv" : 2,
26
+ "Softmax" : 24,
27
+ "Add" : 49,
28
+ "LayerNorm" : 49,
29
+ "Mul" : 48,
30
+ "Transpose" : 97,
31
+ "Gelu" : 26,
32
+ "Reshape" : 96
33
+ },
34
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
35
+ "isUpdatable" : "0",
36
+ "availability" : {
37
+ "macOS" : "12.0",
38
+ "tvOS" : "15.0",
39
+ "watchOS" : "8.0",
40
+ "iOS" : "15.0",
41
+ "macCatalyst" : "15.0"
42
+ },
43
+ "modelType" : {
44
+ "name" : "MLModelType_mlProgram"
45
+ },
46
+ "userDefinedMetadata" : {
47
+
48
+ },
49
+ "inputSchema" : [
50
+ {
51
+ "hasShapeFlexibility" : "0",
52
+ "isOptional" : "0",
53
+ "dataType" : "Float32",
54
+ "formattedType" : "MultiArray (Float32 1 × 80 × 3000)",
55
+ "shortDescription" : "",
56
+ "shape" : "[1, 80, 3000]",
57
+ "name" : "logmel_data",
58
+ "type" : "MultiArray"
59
+ }
60
+ ],
61
+ "generatedClassName" : "coreml_encoder_medium",
62
+ "method" : "predict"
63
+ }
64
+ ]
ggml-medium-encoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
ggml-medium-encoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a188b0e4e3109f28f38f1f47ea2497ffe623923419df8e1ae12cb5f809a1815
3
+ size 614507008
ggml-medium.en-encoder.mlmodelc.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cdc44fee3c62b5743913e3147ed75f4e8ecfb52dd7a0f0f7387094b406ff0ee6
3
  size 566993085
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57cf30e4542de7ce7b798573d99e3198ebc435c2fd071672f9fd5c71d2984b99
3
  size 566993085
ggml-medium.en-encoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adbe456375e7eb3407732a426ecb65bbda86860e4aa801f3a696b70b8a533cdd
3
+ size 207
ggml-medium.en-encoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05fe28591b40616fa0c34ad7b853133623f5300923ec812acb11459c411acf3b
3
+ size 149
ggml-medium.en-encoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float32",
10
+ "formattedType" : "MultiArray (Float32)",
11
+ "shortDescription" : "",
12
+ "shape" : "[]",
13
+ "name" : "output",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 6,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Linear" : 144,
23
+ "Matmul" : 48,
24
+ "Cast" : 2,
25
+ "Conv" : 2,
26
+ "Softmax" : 24,
27
+ "Add" : 49,
28
+ "LayerNorm" : 49,
29
+ "Mul" : 48,
30
+ "Transpose" : 97,
31
+ "Gelu" : 26,
32
+ "Reshape" : 96
33
+ },
34
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
35
+ "isUpdatable" : "0",
36
+ "availability" : {
37
+ "macOS" : "12.0",
38
+ "tvOS" : "15.0",
39
+ "watchOS" : "8.0",
40
+ "iOS" : "15.0",
41
+ "macCatalyst" : "15.0"
42
+ },
43
+ "modelType" : {
44
+ "name" : "MLModelType_mlProgram"
45
+ },
46
+ "userDefinedMetadata" : {
47
+
48
+ },
49
+ "inputSchema" : [
50
+ {
51
+ "hasShapeFlexibility" : "0",
52
+ "isOptional" : "0",
53
+ "dataType" : "Float32",
54
+ "formattedType" : "MultiArray (Float32 1 × 80 × 3000)",
55
+ "shortDescription" : "",
56
+ "shape" : "[1, 80, 3000]",
57
+ "name" : "logmel_data",
58
+ "type" : "MultiArray"
59
+ }
60
+ ],
61
+ "generatedClassName" : "coreml_encoder_medium_en",
62
+ "method" : "predict"
63
+ }
64
+ ]
ggml-medium.en-encoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
ggml-medium.en-encoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63a6c6a2c6cb26aaca930ccc03fe96e2e5820d1ae2802010900eff36b226674d
3
+ size 614507008
ggml-small-encoder.mlmodelc.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:de43fb9fed471e95c19e60ae67575c2bf09e8fb607016da171b06ddad313988b
3
  size 163083239
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8b65b12409d1fd32029d42992903f4e5aef6679da3a4835bcb85738a19e1057
3
  size 163083239
ggml-small-encoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18ad2072ae82872c2ba8a187071e1e7d6c1105253685e7aa95138adcf07874e0
3
+ size 207
ggml-small-encoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05fe28591b40616fa0c34ad7b853133623f5300923ec812acb11459c411acf3b
3
+ size 149
ggml-small-encoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float32",
10
+ "formattedType" : "MultiArray (Float32)",
11
+ "shortDescription" : "",
12
+ "shape" : "[]",
13
+ "name" : "output",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 6,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Linear" : 72,
23
+ "Matmul" : 24,
24
+ "Cast" : 2,
25
+ "Conv" : 2,
26
+ "Softmax" : 12,
27
+ "Add" : 25,
28
+ "LayerNorm" : 25,
29
+ "Mul" : 24,
30
+ "Transpose" : 49,
31
+ "Gelu" : 14,
32
+ "Reshape" : 48
33
+ },
34
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
35
+ "isUpdatable" : "0",
36
+ "availability" : {
37
+ "macOS" : "12.0",
38
+ "tvOS" : "15.0",
39
+ "watchOS" : "8.0",
40
+ "iOS" : "15.0",
41
+ "macCatalyst" : "15.0"
42
+ },
43
+ "modelType" : {
44
+ "name" : "MLModelType_mlProgram"
45
+ },
46
+ "userDefinedMetadata" : {
47
+
48
+ },
49
+ "inputSchema" : [
50
+ {
51
+ "hasShapeFlexibility" : "0",
52
+ "isOptional" : "0",
53
+ "dataType" : "Float32",
54
+ "formattedType" : "MultiArray (Float32 1 × 80 × 3000)",
55
+ "shortDescription" : "",
56
+ "shape" : "[1, 80, 3000]",
57
+ "name" : "logmel_data",
58
+ "type" : "MultiArray"
59
+ }
60
+ ],
61
+ "generatedClassName" : "coreml_encoder_small",
62
+ "method" : "predict"
63
+ }
64
+ ]
ggml-small-encoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
ggml-small-encoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87eed4ae76f11a2d4a50786bc7423d4b45c2d0d9ca05577a3bd2557452072eaf
3
+ size 176339456
ggml-small.en-encoder.mlmodelc.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b2ef1c506378b825b4b4341979a93e1656b5d6c129f17114cfb8fb78aabc2f89
3
  size 162952446
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:677fad005c458eba9a1d69a8e4a0bef1b129b208585ca90a4dd8ba4f8610eaed
3
  size 162952446