ggbetz commited on
Commit
e9f5ad0
1 Parent(s): a5f082c

Upload results for model google/gemma-2b

Browse files
data/google/gemma-2b/orig/results_24-03-17-01:14:20.json ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "logiqa2_base": {
4
+ "acc,none": 0.23091603053435114,
5
+ "acc_stderr,none": 0.01063226588725422,
6
+ "alias": "logiqa2_base"
7
+ },
8
+ "logiqa_base": {
9
+ "acc,none": 0.2476038338658147,
10
+ "acc_stderr,none": 0.017264816260627345,
11
+ "alias": "logiqa_base"
12
+ },
13
+ "lsat-ar_base": {
14
+ "acc,none": 0.21304347826086956,
15
+ "acc_stderr,none": 0.027057754389936205,
16
+ "alias": "lsat-ar_base"
17
+ },
18
+ "lsat-lr_base": {
19
+ "acc,none": 0.19215686274509805,
20
+ "acc_stderr,none": 0.017463551875159446,
21
+ "alias": "lsat-lr_base"
22
+ },
23
+ "lsat-rc_base": {
24
+ "acc,none": 0.137546468401487,
25
+ "acc_stderr,none": 0.021039004061731873,
26
+ "alias": "lsat-rc_base"
27
+ }
28
+ },
29
+ "configs": {
30
+ "logiqa2_base": {
31
+ "task": "logiqa2_base",
32
+ "group": "logikon-bench",
33
+ "dataset_path": "logikon/logikon-bench",
34
+ "dataset_name": "logiqa2",
35
+ "test_split": "test",
36
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
37
+ "doc_to_target": "{{answer}}",
38
+ "doc_to_choice": "{{options}}",
39
+ "description": "",
40
+ "target_delimiter": " ",
41
+ "fewshot_delimiter": "\n\n",
42
+ "num_fewshot": 0,
43
+ "metric_list": [
44
+ {
45
+ "metric": "acc",
46
+ "aggregation": "mean",
47
+ "higher_is_better": true
48
+ }
49
+ ],
50
+ "output_type": "multiple_choice",
51
+ "repeats": 1,
52
+ "should_decontaminate": false,
53
+ "metadata": {
54
+ "version": 0.0
55
+ }
56
+ },
57
+ "logiqa_base": {
58
+ "task": "logiqa_base",
59
+ "group": "logikon-bench",
60
+ "dataset_path": "logikon/logikon-bench",
61
+ "dataset_name": "logiqa",
62
+ "test_split": "test",
63
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
64
+ "doc_to_target": "{{answer}}",
65
+ "doc_to_choice": "{{options}}",
66
+ "description": "",
67
+ "target_delimiter": " ",
68
+ "fewshot_delimiter": "\n\n",
69
+ "num_fewshot": 0,
70
+ "metric_list": [
71
+ {
72
+ "metric": "acc",
73
+ "aggregation": "mean",
74
+ "higher_is_better": true
75
+ }
76
+ ],
77
+ "output_type": "multiple_choice",
78
+ "repeats": 1,
79
+ "should_decontaminate": false,
80
+ "metadata": {
81
+ "version": 0.0
82
+ }
83
+ },
84
+ "lsat-ar_base": {
85
+ "task": "lsat-ar_base",
86
+ "group": "logikon-bench",
87
+ "dataset_path": "logikon/logikon-bench",
88
+ "dataset_name": "lsat-ar",
89
+ "test_split": "test",
90
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
91
+ "doc_to_target": "{{answer}}",
92
+ "doc_to_choice": "{{options}}",
93
+ "description": "",
94
+ "target_delimiter": " ",
95
+ "fewshot_delimiter": "\n\n",
96
+ "num_fewshot": 0,
97
+ "metric_list": [
98
+ {
99
+ "metric": "acc",
100
+ "aggregation": "mean",
101
+ "higher_is_better": true
102
+ }
103
+ ],
104
+ "output_type": "multiple_choice",
105
+ "repeats": 1,
106
+ "should_decontaminate": false,
107
+ "metadata": {
108
+ "version": 0.0
109
+ }
110
+ },
111
+ "lsat-lr_base": {
112
+ "task": "lsat-lr_base",
113
+ "group": "logikon-bench",
114
+ "dataset_path": "logikon/logikon-bench",
115
+ "dataset_name": "lsat-lr",
116
+ "test_split": "test",
117
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
118
+ "doc_to_target": "{{answer}}",
119
+ "doc_to_choice": "{{options}}",
120
+ "description": "",
121
+ "target_delimiter": " ",
122
+ "fewshot_delimiter": "\n\n",
123
+ "num_fewshot": 0,
124
+ "metric_list": [
125
+ {
126
+ "metric": "acc",
127
+ "aggregation": "mean",
128
+ "higher_is_better": true
129
+ }
130
+ ],
131
+ "output_type": "multiple_choice",
132
+ "repeats": 1,
133
+ "should_decontaminate": false,
134
+ "metadata": {
135
+ "version": 0.0
136
+ }
137
+ },
138
+ "lsat-rc_base": {
139
+ "task": "lsat-rc_base",
140
+ "group": "logikon-bench",
141
+ "dataset_path": "logikon/logikon-bench",
142
+ "dataset_name": "lsat-rc",
143
+ "test_split": "test",
144
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
145
+ "doc_to_target": "{{answer}}",
146
+ "doc_to_choice": "{{options}}",
147
+ "description": "",
148
+ "target_delimiter": " ",
149
+ "fewshot_delimiter": "\n\n",
150
+ "num_fewshot": 0,
151
+ "metric_list": [
152
+ {
153
+ "metric": "acc",
154
+ "aggregation": "mean",
155
+ "higher_is_better": true
156
+ }
157
+ ],
158
+ "output_type": "multiple_choice",
159
+ "repeats": 1,
160
+ "should_decontaminate": false,
161
+ "metadata": {
162
+ "version": 0.0
163
+ }
164
+ }
165
+ },
166
+ "versions": {
167
+ "logiqa2_base": 0.0,
168
+ "logiqa_base": 0.0,
169
+ "lsat-ar_base": 0.0,
170
+ "lsat-lr_base": 0.0,
171
+ "lsat-rc_base": 0.0
172
+ },
173
+ "n-shot": {
174
+ "logiqa2_base": 0,
175
+ "logiqa_base": 0,
176
+ "lsat-ar_base": 0,
177
+ "lsat-lr_base": 0,
178
+ "lsat-rc_base": 0
179
+ },
180
+ "config": {
181
+ "model": "vllm",
182
+ "model_args": "pretrained=google/gemma-2b,revision=main,dtype=bfloat16,tensor_parallel_size=1,gpu_memory_utilization=0.5,trust_remote_code=true,max_length=2048",
183
+ "batch_size": "auto",
184
+ "batch_sizes": [],
185
+ "device": null,
186
+ "use_cache": null,
187
+ "limit": null,
188
+ "bootstrap_iters": 100000,
189
+ "gen_kwargs": null
190
+ },
191
+ "git_hash": "f4fd67a"
192
+ }