Datasets:

Modalities:
Text
Formats:
json
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
fxmeng commited on
Commit
2fa9216
·
verified ·
1 Parent(s): 3878b22

Upload create.ipynb

Browse files
Files changed (1) hide show
  1. create.ipynb +473 -0
create.ipynb ADDED
@@ -0,0 +1,473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "from datasets import load_dataset, load_from_disk"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": null,
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "boolq = load_dataset(\"google/boolq\")\n",
19
+ "boolq"
20
+ ]
21
+ },
22
+ {
23
+ "cell_type": "code",
24
+ "execution_count": null,
25
+ "metadata": {},
26
+ "outputs": [],
27
+ "source": [
28
+ "commonsense_boolq = []\n",
29
+ "for data in boolq['train']:\n",
30
+ " question = data['question']\n",
31
+ " answer = {True:'true', False:'false'}[data['answer']]\n",
32
+ " commonsense_boolq.append(\n",
33
+ " {\n",
34
+ " 'instruction': f\"Please answer the following question with true or false, question: {question}?\\n\\nAnswer format: true/false\",\n",
35
+ " 'answer': answer,\n",
36
+ " 'input': '',\n",
37
+ " 'output': f'the correct answer is {answer}'\n",
38
+ " }\n",
39
+ " )"
40
+ ]
41
+ },
42
+ {
43
+ "cell_type": "code",
44
+ "execution_count": null,
45
+ "metadata": {},
46
+ "outputs": [],
47
+ "source": [
48
+ "piqa = load_dataset(\"skrishna/piqa_preop\")\n",
49
+ "piqa"
50
+ ]
51
+ },
52
+ {
53
+ "cell_type": "code",
54
+ "execution_count": null,
55
+ "metadata": {},
56
+ "outputs": [],
57
+ "source": [
58
+ "commonsense_piqa = []\n",
59
+ "for data in piqa['train']:\n",
60
+ " goal = data['goal']\n",
61
+ " sol1 = data['sol1']\n",
62
+ " sol2 = data['sol2']\n",
63
+ " label = data['label']+1\n",
64
+ " commonsense_piqa.append(\n",
65
+ " {\n",
66
+ " 'instruction': f\"Please choose the correct solution to the question: {goal}\\n\\nSolution1: {sol1}\\n\\nSolution2: {sol2}\\n\\nAnswer format: solution1/solution2\",\n",
67
+ " 'answer': f\"solution{label}\",\n",
68
+ " 'input': '',\n",
69
+ " 'output': f'the correct answer is solution{label}'\n",
70
+ " }\n",
71
+ " )"
72
+ ]
73
+ },
74
+ {
75
+ "cell_type": "code",
76
+ "execution_count": null,
77
+ "metadata": {},
78
+ "outputs": [],
79
+ "source": [
80
+ "siqa = load_dataset(\"lighteval/siqa\")\n",
81
+ "siqa"
82
+ ]
83
+ },
84
+ {
85
+ "cell_type": "code",
86
+ "execution_count": null,
87
+ "metadata": {},
88
+ "outputs": [],
89
+ "source": [
90
+ "commonsense_siqa = []\n",
91
+ "for data in siqa['train']:\n",
92
+ " context = data['context']\n",
93
+ " question = data['question']\n",
94
+ " answerA = data['answerA']\n",
95
+ " answerB = data['answerB']\n",
96
+ " answerC = data['answerC']\n",
97
+ " label = data['label']\n",
98
+ " commonsense_siqa.append(\n",
99
+ " {\n",
100
+ " 'instruction': f\"Please choose the correct answer to the question: {context} {question}\\n\\nAnswer1: {answerA} Answer2: {answerB} Answer3: {answerC}\\n\\nAnswer format: answer1/answer2/answer3\",\n",
101
+ " 'answer': f\"answer{label}\",\n",
102
+ " 'input': '',\n",
103
+ " 'output': f'the correct answer is answer{label}'\n",
104
+ " }\n",
105
+ " )"
106
+ ]
107
+ },
108
+ {
109
+ "cell_type": "code",
110
+ "execution_count": null,
111
+ "metadata": {},
112
+ "outputs": [],
113
+ "source": [
114
+ "hellaswag = load_dataset(\"/Users/mengfanxu/hellaswag/hellaswag_train\")\n",
115
+ "hellaswag"
116
+ ]
117
+ },
118
+ {
119
+ "cell_type": "code",
120
+ "execution_count": null,
121
+ "metadata": {},
122
+ "outputs": [],
123
+ "source": [
124
+ "commonsense_hellaswag = []\n",
125
+ "for data in hellaswag['train']:\n",
126
+ " activity_label = data['activity_label']\n",
127
+ " ctx = data['ctx']\n",
128
+ " ed1 = data['endings'][0]\n",
129
+ " ed2 = data['endings'][1]\n",
130
+ " ed3 = data['endings'][2]\n",
131
+ " ed4 = data['endings'][3]\n",
132
+ " label = data['label']+1\n",
133
+ " commonsense_hellaswag.append(\n",
134
+ " {\n",
135
+ " 'instruction': f\"Please choose the correct ending to complete the given sentence: {activity_label}: {ctx}\\n\\nEnding1: {ed1} Ending2: {ed2} Ending3: {ed3} Ending4: {ed4}\\n\\nAnswer format: ending1/ending2/ending3/ending4\",\n",
136
+ " 'answer': f\"ending{label}\",\n",
137
+ " 'input': '',\n",
138
+ " 'output': f'the correct answer is ending{label}'\n",
139
+ " }\n",
140
+ " )"
141
+ ]
142
+ },
143
+ {
144
+ "cell_type": "code",
145
+ "execution_count": null,
146
+ "metadata": {},
147
+ "outputs": [],
148
+ "source": [
149
+ "openbookqa = load_from_disk(\"/Users/mengfanxu/openbookqa\")\n",
150
+ "openbookqa"
151
+ ]
152
+ },
153
+ {
154
+ "cell_type": "code",
155
+ "execution_count": null,
156
+ "metadata": {},
157
+ "outputs": [],
158
+ "source": [
159
+ "commonsense_openbookqa = []\n",
160
+ "for data in openbookqa['train']:\n",
161
+ " question_stem = data['question_stem']\n",
162
+ " ed1 = data['choices']['text'][0]\n",
163
+ " ed2 = data['choices']['text'][1]\n",
164
+ " ed3 = data['choices']['text'][2]\n",
165
+ " ed4 = data['choices']['text'][3]\n",
166
+ " label = {\"A\":1,\"B\":2,\"C\":3,\"D\":4}[data['answerKey']]\n",
167
+ " commonsense_openbookqa.append(\n",
168
+ " {\n",
169
+ " 'instruction': f\"Please choose the correct answer to the question: {question_stem}\\n\\nAnswer1: {ed1} Answer2: {ed2} Answer3: {ed3} Answer4: {ed4}\\n\\nAnswer format: answer1/answer2/answer3/answer4\",\n",
170
+ " 'answer': f\"answer{label}\",\n",
171
+ " 'input': '',\n",
172
+ " 'output': f'the correct answer is answer{label}'\n",
173
+ " }\n",
174
+ " )"
175
+ ]
176
+ },
177
+ {
178
+ "cell_type": "code",
179
+ "execution_count": null,
180
+ "metadata": {},
181
+ "outputs": [],
182
+ "source": [
183
+ "arc_c = load_dataset(\"allenai/ai2_arc\", \"ARC-Challenge\")\n",
184
+ "arc_c"
185
+ ]
186
+ },
187
+ {
188
+ "cell_type": "code",
189
+ "execution_count": null,
190
+ "metadata": {},
191
+ "outputs": [],
192
+ "source": [
193
+ "commonsense_arc_c = []\n",
194
+ "for data in arc_c['train']:\n",
195
+ " question = data['question']\n",
196
+ " ed1 = data['choices']['text'][0]\n",
197
+ " ed2 = data['choices']['text'][1]\n",
198
+ " ed3 = data['choices']['text'][2]\n",
199
+ " instruction = f\"Please choose the correct answer to the question: {question}\\n\\nAnswer1: {ed1} Answer2: {ed2} Answer3: {ed3}\\n\\nAnswer format: answer1/answer2/answer3\"\n",
200
+ " if len(data['choices']['text'])>=4:\n",
201
+ " ed4 = data['choices']['text'][3]\n",
202
+ " instruction = f\"Please choose the correct answer to the question: {question}\\n\\nAnswer1: {ed1} Answer2: {ed2} Answer3: {ed3} Answer4: {ed4}\\n\\nAnswer format: answer1/answer2/answer3/answer4\"\n",
203
+ " if len(data['choices']['text'])>=5:\n",
204
+ " ed5 = data['choices']['text'][4]\n",
205
+ " instruction = f\"Please choose the correct answer to the question: {question}\\n\\nAnswer1: {ed1} Answer2: {ed2} Answer3: {ed3} Answer4: {ed4} Answer5: {ed5}\\n\\nAnswer format: answer1/answer2/answer3/answer4/answer5\"\n",
206
+ " label = {\"A\":1,\"B\":2,\"C\":3,\"D\":4,\"E\":5, \"2\":2, \"4\":4, \"1\":1, \"3\":3}[data['answerKey']]\n",
207
+ " \n",
208
+ " commonsense_arc_c.append(\n",
209
+ " {\n",
210
+ " 'instruction': instruction,\n",
211
+ " 'answer': f\"answer{label}\",\n",
212
+ " 'input': '',\n",
213
+ " 'output': f'the correct answer is answer{label}'\n",
214
+ " }\n",
215
+ " )"
216
+ ]
217
+ },
218
+ {
219
+ "cell_type": "code",
220
+ "execution_count": null,
221
+ "metadata": {},
222
+ "outputs": [],
223
+ "source": [
224
+ "arc_e = load_dataset(\"allenai/ai2_arc\", \"ARC-Easy\")\n",
225
+ "arc_e"
226
+ ]
227
+ },
228
+ {
229
+ "cell_type": "code",
230
+ "execution_count": null,
231
+ "metadata": {},
232
+ "outputs": [],
233
+ "source": [
234
+ "commonsense_arc_e = []\n",
235
+ "for data in arc_e['train']:\n",
236
+ " question = data['question']\n",
237
+ " ed1 = data['choices']['text'][0]\n",
238
+ " ed2 = data['choices']['text'][1]\n",
239
+ " ed3 = data['choices']['text'][2]\n",
240
+ " instruction = f\"Please choose the correct answer to the question: {question}\\n\\nAnswer1: {ed1} Answer2: {ed2} Answer3: {ed3}\\n\\nAnswer format: answer1/answer2/answer3\"\n",
241
+ " if len(data['choices']['text'])>=4:\n",
242
+ " ed4 = data['choices']['text'][3]\n",
243
+ " instruction = f\"Please choose the correct answer to the question: {question}\\n\\nAnswer1: {ed1} Answer2: {ed2} Answer3: {ed3} Answer4: {ed4}\\n\\nAnswer format: answer1/answer2/answer3/answer4\"\n",
244
+ " if len(data['choices']['text'])>=5:\n",
245
+ " ed5 = data['choices']['text'][4]\n",
246
+ " instruction = f\"Please choose the correct answer to the question: {question}\\n\\nAnswer1: {ed1} Answer2: {ed2} Answer3: {ed3} Answer4: {ed4} Answer5: {ed5}\\n\\nAnswer format: answer1/answer2/answer3/answer4/answer5\"\n",
247
+ " label = {\"A\":1,\"B\":2,\"C\":3,\"D\":4,\"E\":5, \"2\":2, \"4\":4, \"1\":1, \"3\":3}[data['answerKey']]\n",
248
+ " \n",
249
+ " commonsense_arc_e.append(\n",
250
+ " {\n",
251
+ " 'instruction': instruction,\n",
252
+ " 'answer': f\"answer{label}\",\n",
253
+ " 'input': '',\n",
254
+ " 'output': f'the correct answer is answer{label}'\n",
255
+ " }\n",
256
+ " )"
257
+ ]
258
+ },
259
+ {
260
+ "cell_type": "code",
261
+ "execution_count": null,
262
+ "metadata": {},
263
+ "outputs": [],
264
+ "source": [
265
+ "winogrande = load_dataset(\"/Users/mengfanxu/Downloads/winogrande_1.1/train\")\n",
266
+ "winogrande"
267
+ ]
268
+ },
269
+ {
270
+ "cell_type": "code",
271
+ "execution_count": null,
272
+ "metadata": {},
273
+ "outputs": [],
274
+ "source": [
275
+ "commonsense_winogrande = []\n",
276
+ "for data in winogrande['train']:\n",
277
+ " sentence = data['sentence']\n",
278
+ " option1 = data['option1']\n",
279
+ " option2 = data['option2']\n",
280
+ " answer = data['answer']\n",
281
+ "\n",
282
+ " commonsense_winogrande.append(\n",
283
+ " {\n",
284
+ " 'instruction': f\"Please choose the correct answer to fill in the blank to complete the given sentence: {sentence}\\n\\nOption1: {option1} Option2: {option2} Answer format: option1/option2\",\n",
285
+ " 'answer': f\"option{answer}\",\n",
286
+ " 'input': '',\n",
287
+ " 'output': f'the correct answer is option{answer}'\n",
288
+ " }\n",
289
+ " )"
290
+ ]
291
+ },
292
+ {
293
+ "cell_type": "code",
294
+ "execution_count": null,
295
+ "metadata": {},
296
+ "outputs": [],
297
+ "source": [
298
+ "eval_boolq = load_from_disk(\"/Users/mengfanxu/Downloads/winogrande_1.1/PiSSA/inference/data/eval_boolq\")\n",
299
+ "eval_piqa = load_from_disk(\"/Users/mengfanxu/Downloads/winogrande_1.1/PiSSA/inference/data/eval_piqa\")\n",
300
+ "eval_social_interaction_qa = load_from_disk(\"/Users/mengfanxu/Downloads/winogrande_1.1/PiSSA/inference/data/eval_social_interaction_qa\")\n",
301
+ "eval_hellaswag = load_from_disk(\"/Users/mengfanxu/Downloads/winogrande_1.1/PiSSA/inference/data/eval_hellaswag\")\n",
302
+ "eval_winogrande = load_from_disk(\"/Users/mengfanxu/Downloads/winogrande_1.1/PiSSA/inference/data/eval_winogrande\")\n",
303
+ "eval_arc_challenge = load_from_disk(\"/Users/mengfanxu/Downloads/winogrande_1.1/PiSSA/inference/data/eval_arc_challenge\")\n",
304
+ "eval_arc_easy = load_from_disk(\"/Users/mengfanxu/Downloads/winogrande_1.1/PiSSA/inference/data/eval_arc_easy\")\n",
305
+ "eval_openbookqa = load_from_disk(\"/Users/mengfanxu/Downloads/winogrande_1.1/PiSSA/inference/data/eval_openbookqa\")"
306
+ ]
307
+ },
308
+ {
309
+ "cell_type": "code",
310
+ "execution_count": null,
311
+ "metadata": {},
312
+ "outputs": [],
313
+ "source": [
314
+ "import json\n",
315
+ "\n",
316
+ "with open(\"commonsense_filtered/boolq/train.json\", \"w\", encoding=\"utf-8\") as f:\n",
317
+ " for item in commonsense_boolq:\n",
318
+ " f.write(json.dumps(item, ensure_ascii=False) + \"\\n\")\n",
319
+ "\n",
320
+ "with open(\"commonsense_filtered/boolq/test.json\", \"w\", encoding=\"utf-8\") as f:\n",
321
+ " for item in eval_boolq:\n",
322
+ " f.write(json.dumps(item, ensure_ascii=False) + \"\\n\")"
323
+ ]
324
+ },
325
+ {
326
+ "cell_type": "code",
327
+ "execution_count": null,
328
+ "metadata": {},
329
+ "outputs": [],
330
+ "source": [
331
+ "import json\n",
332
+ "\n",
333
+ "with open(\"commonsense_filtered/piqa/train.json\", \"w\", encoding=\"utf-8\") as f:\n",
334
+ " for item in commonsense_piqa:\n",
335
+ " f.write(json.dumps(item, ensure_ascii=False) + \"\\n\")\n",
336
+ "\n",
337
+ "with open(\"commonsense_filtered/piqa/test.json\", \"w\", encoding=\"utf-8\") as f:\n",
338
+ " for item in eval_piqa:\n",
339
+ " f.write(json.dumps(item, ensure_ascii=False) + \"\\n\")"
340
+ ]
341
+ },
342
+ {
343
+ "cell_type": "code",
344
+ "execution_count": null,
345
+ "metadata": {},
346
+ "outputs": [],
347
+ "source": [
348
+ "import json\n",
349
+ "\n",
350
+ "with open(\"commonsense_filtered/siqa/train.json\", \"w\", encoding=\"utf-8\") as f:\n",
351
+ " for item in commonsense_siqa:\n",
352
+ " f.write(json.dumps(item, ensure_ascii=False) + \"\\n\")\n",
353
+ "\n",
354
+ "with open(\"commonsense_filtered/siqa/test.json\", \"w\", encoding=\"utf-8\") as f:\n",
355
+ " for item in eval_social_interaction_qa:\n",
356
+ " f.write(json.dumps(item, ensure_ascii=False) + \"\\n\")"
357
+ ]
358
+ },
359
+ {
360
+ "cell_type": "code",
361
+ "execution_count": null,
362
+ "metadata": {},
363
+ "outputs": [],
364
+ "source": [
365
+ "import json\n",
366
+ "\n",
367
+ "with open(\"commonsense_filtered/hellaswag/train.json\", \"w\", encoding=\"utf-8\") as f:\n",
368
+ " for item in commonsense_hellaswag:\n",
369
+ " f.write(json.dumps(item, ensure_ascii=False) + \"\\n\")\n",
370
+ "\n",
371
+ "with open(\"commonsense_filtered/hellaswag/test.json\", \"w\", encoding=\"utf-8\") as f:\n",
372
+ " for item in eval_hellaswag:\n",
373
+ " f.write(json.dumps(item, ensure_ascii=False) + \"\\n\")"
374
+ ]
375
+ },
376
+ {
377
+ "cell_type": "code",
378
+ "execution_count": null,
379
+ "metadata": {},
380
+ "outputs": [],
381
+ "source": [
382
+ "import json\n",
383
+ "\n",
384
+ "with open(\"commonsense_filtered/winogrande/train.json\", \"w\", encoding=\"utf-8\") as f:\n",
385
+ " for item in commonsense_winogrande:\n",
386
+ " f.write(json.dumps(item, ensure_ascii=False) + \"\\n\")\n",
387
+ "\n",
388
+ "with open(\"commonsense_filtered/winogrande/test.json\", \"w\", encoding=\"utf-8\") as f:\n",
389
+ " for item in eval_winogrande:\n",
390
+ " f.write(json.dumps(item, ensure_ascii=False) + \"\\n\")"
391
+ ]
392
+ },
393
+ {
394
+ "cell_type": "code",
395
+ "execution_count": null,
396
+ "metadata": {},
397
+ "outputs": [],
398
+ "source": [
399
+ "import json\n",
400
+ "\n",
401
+ "with open(\"commonsense_filtered/arc_challenge/train.json\", \"w\", encoding=\"utf-8\") as f:\n",
402
+ " for item in commonsense_arc_c:\n",
403
+ " f.write(json.dumps(item, ensure_ascii=False) + \"\\n\")\n",
404
+ "\n",
405
+ "with open(\"commonsense_filtered/arc_challenge/test.json\", \"w\", encoding=\"utf-8\") as f:\n",
406
+ " for item in eval_arc_challenge:\n",
407
+ " f.write(json.dumps(item, ensure_ascii=False) + \"\\n\")"
408
+ ]
409
+ },
410
+ {
411
+ "cell_type": "code",
412
+ "execution_count": null,
413
+ "metadata": {},
414
+ "outputs": [],
415
+ "source": [
416
+ "import json\n",
417
+ "\n",
418
+ "with open(\"commonsense_filtered/arc_easy/train.json\", \"w\", encoding=\"utf-8\") as f:\n",
419
+ " for item in commonsense_arc_e:\n",
420
+ " f.write(json.dumps(item, ensure_ascii=False) + \"\\n\")\n",
421
+ "\n",
422
+ "with open(\"commonsense_filtered/arc_easy/test.json\", \"w\", encoding=\"utf-8\") as f:\n",
423
+ " for item in eval_arc_easy:\n",
424
+ " f.write(json.dumps(item, ensure_ascii=False) + \"\\n\")"
425
+ ]
426
+ },
427
+ {
428
+ "cell_type": "code",
429
+ "execution_count": null,
430
+ "metadata": {},
431
+ "outputs": [],
432
+ "source": [
433
+ "import json\n",
434
+ "\n",
435
+ "with open(\"commonsense_filtered/openbookqa/train.json\", \"w\", encoding=\"utf-8\") as f:\n",
436
+ " for item in commonsense_openbookqa:\n",
437
+ " f.write(json.dumps(item, ensure_ascii=False) + \"\\n\")\n",
438
+ "\n",
439
+ "with open(\"commonsense_filtered/openbookqa/test.json\", \"w\", encoding=\"utf-8\") as f:\n",
440
+ " for item in eval_openbookqa:\n",
441
+ " f.write(json.dumps(item, ensure_ascii=False) + \"\\n\")"
442
+ ]
443
+ },
444
+ {
445
+ "cell_type": "code",
446
+ "execution_count": null,
447
+ "metadata": {},
448
+ "outputs": [],
449
+ "source": []
450
+ }
451
+ ],
452
+ "metadata": {
453
+ "kernelspec": {
454
+ "display_name": "base",
455
+ "language": "python",
456
+ "name": "python3"
457
+ },
458
+ "language_info": {
459
+ "codemirror_mode": {
460
+ "name": "ipython",
461
+ "version": 3
462
+ },
463
+ "file_extension": ".py",
464
+ "mimetype": "text/x-python",
465
+ "name": "python",
466
+ "nbconvert_exporter": "python",
467
+ "pygments_lexer": "ipython3",
468
+ "version": "3.9.16"
469
+ }
470
+ },
471
+ "nbformat": 4,
472
+ "nbformat_minor": 2
473
+ }