Update README.md
Browse files
README.md
CHANGED
@@ -90,9 +90,11 @@ configs:
|
|
90 |
---
|
91 |
|
92 |
# Overview
|
|
|
|
|
|
|
93 |
|
94 |
-
#
|
95 |
-
## Data Instances Structure
|
96 |
An example of a Multi-SWE-bench datum is as follows:
|
97 |
```
|
98 |
org: (str) - Organization name identifier from Github.
|
@@ -116,4 +118,98 @@ fix_patch_result: (dict) - The result after all the patches were applied.
|
|
116 |
instance_id: (str) - A formatted instance identifier, usually as org__repo_PR-number.
|
117 |
```
|
118 |
|
119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
---
|
91 |
|
92 |
# Overview
|
93 |
+
We are extremely delighted to release Multi-SWE-Bench.
|
94 |
+
Multi-SWE-Bench aims to build a multi-language benchmark dataset containing real software engineering scenarios for evaluating the ability of LLM to solve real software engineering problems.
|
95 |
+
The dataset supports multiple languages, currently including C, C++, Java, Javascript, Typescript, Rust, Go.
|
96 |
|
97 |
+
# Data Instances Structure
|
|
|
98 |
An example of a Multi-SWE-bench datum is as follows:
|
99 |
```
|
100 |
org: (str) - Organization name identifier from Github.
|
|
|
118 |
instance_id: (str) - A formatted instance identifier, usually as org__repo_PR-number.
|
119 |
```
|
120 |
|
121 |
+
# Usage
|
122 |
+
Because huggingface's dataset library does not support complex nested structures,
|
123 |
+
there are nested structures within these fields that have been serialized in the original dataset(huggingface),
|
124 |
+
and you'll have to deserialize these if you want to use this dataset.
|
125 |
+
```python
|
126 |
+
SERIALIZATION_FIELDS = [
|
127 |
+
'base', 'fixed tests', 'p2p_tests', 'f2p_tests',
|
128 |
+
's2p_tests', 'n2p_tests', 'run_result',
|
129 |
+
'test_patch_result', 'fix_patch_result'
|
130 |
+
]
|
131 |
+
```
|
132 |
+
|
133 |
+
## sample
|
134 |
+
```python
|
135 |
+
from datasets import load_dataset, config
|
136 |
+
import pandas as pd
|
137 |
+
import os
|
138 |
+
import json
|
139 |
+
|
140 |
+
# Constant definitions
|
141 |
+
# There are nested structures within these fields, which were serialized in the original dataset, and now these need to be deserialized
|
142 |
+
SERIALIZATION_FIELDS = [
|
143 |
+
'base', 'fixed tests', 'p2p_tests', 'f2p_tests',
|
144 |
+
's2p_tests', 'n2p_tests', 'run_result',
|
145 |
+
'test_patch_result', 'fix_patch_result'
|
146 |
+
]
|
147 |
+
CACHE_DIR = 'D:/huggingface_cache'
|
148 |
+
|
149 |
+
def safe_deserialize(value):
|
150 |
+
"""Safely deserialize a JSON string"""
|
151 |
+
try:
|
152 |
+
if value in (None, ''):
|
153 |
+
return None
|
154 |
+
return json.loads(value)
|
155 |
+
except (TypeError, json.JSONDecodeError) as e:
|
156 |
+
print(f"Deserialization failed: {str(e)}")
|
157 |
+
return value
|
158 |
+
|
159 |
+
def load_hf_dataset():
|
160 |
+
"""Load a HuggingFace dataset"""
|
161 |
+
os.environ['HF_HOME'] = CACHE_DIR
|
162 |
+
config.HF_DATASETS_CACHE = CACHE_DIR
|
163 |
+
return load_dataset("Hagon/test2", split='cpp')
|
164 |
+
|
165 |
+
def analyze_dataset_structure(dataset):
|
166 |
+
"""Analyze and print the dataset structure"""
|
167 |
+
print(f"Dataset size: {len(dataset)}")
|
168 |
+
print("\nDataset structure analysis: " + "-" * 50)
|
169 |
+
print("Field names and types:")
|
170 |
+
for name, dtype in dataset.features.items():
|
171 |
+
print(f" {name}: {str(dtype)}")
|
172 |
+
|
173 |
+
def print_data_types(dataset, sample_count=3):
|
174 |
+
"""Print the data types of sample data"""
|
175 |
+
print(f"\nData types of the first {sample_count} samples:")
|
176 |
+
for i in range(min(sample_count, len(dataset))):
|
177 |
+
print(f"\nSample {i}:")
|
178 |
+
for key, value in dataset[i].items():
|
179 |
+
print(f" {key}: {type(value).__name__} ({len(str(value))} chars)")
|
180 |
+
|
181 |
+
def analyze_serialization(dataset, sample_count=3):
|
182 |
+
"""Analyze the deserialization results of fields"""
|
183 |
+
print("\nDeserialization result analysis: " + "-" * 50)
|
184 |
+
for i in range(min(sample_count, len(dataset))):
|
185 |
+
print(f"\nSample {i}:")
|
186 |
+
item = dataset[i]
|
187 |
+
for key in SERIALIZATION_FIELDS:
|
188 |
+
safe_key = key.replace(' ', '_')
|
189 |
+
raw_value = item.get(safe_key)
|
190 |
+
deserialized = safe_deserialize(raw_value)
|
191 |
+
|
192 |
+
print(f"Field [{key}]:")
|
193 |
+
print(f" Original type: {type(raw_value).__name__}")
|
194 |
+
print(f" Deserialized type: {type(deserialized).__name__ if deserialized else 'None'}")
|
195 |
+
|
196 |
+
if isinstance(deserialized, dict):
|
197 |
+
sample = dict(list(deserialized.items())[:2])
|
198 |
+
print(f" Sample content: {str(sample)[:200]}...")
|
199 |
+
elif deserialized:
|
200 |
+
print(f" Content preview: {str(deserialized)[:200]}...")
|
201 |
+
else:
|
202 |
+
print(" Empty/Invalid data")
|
203 |
+
|
204 |
+
def main():
|
205 |
+
"""Main function entry"""
|
206 |
+
dataset = load_hf_dataset()
|
207 |
+
# analyze_dataset_structure(dataset)
|
208 |
+
# print_data_types(dataset)
|
209 |
+
analyze_serialization(dataset)
|
210 |
+
|
211 |
+
if __name__ == "__main__":
|
212 |
+
main()
|
213 |
+
|
214 |
+
```
|
215 |
+
|