Upload 6 files
Browse files- data-00000-of-00001.arrow +3 -0
- dataset_info.json +32 -0
- nv-blog-jp.py +407 -0
- nvidia_ja_jp_en_us_dev_blog_dataset.jsonl +0 -0
- nvidia_ja_jp_en_us_dev_blog_dataset_partial_10.jsonl +0 -0
- state.json +13 -0
data-00000-of-00001.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:57a06170ee87d1e7ddd2d0dd6a70e2025450dcd889806b127a1a2d84917b56a3
|
3 |
+
size 555568
|
dataset_info.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"citation": "",
|
3 |
+
"description": "",
|
4 |
+
"features": {
|
5 |
+
"en_url": {
|
6 |
+
"dtype": "string",
|
7 |
+
"_type": "Value"
|
8 |
+
},
|
9 |
+
"en_title": {
|
10 |
+
"dtype": "string",
|
11 |
+
"_type": "Value"
|
12 |
+
},
|
13 |
+
"en_content": {
|
14 |
+
"dtype": "string",
|
15 |
+
"_type": "Value"
|
16 |
+
},
|
17 |
+
"jp_url": {
|
18 |
+
"dtype": "string",
|
19 |
+
"_type": "Value"
|
20 |
+
},
|
21 |
+
"jp_title": {
|
22 |
+
"dtype": "string",
|
23 |
+
"_type": "Value"
|
24 |
+
},
|
25 |
+
"jp_content": {
|
26 |
+
"dtype": "string",
|
27 |
+
"_type": "Value"
|
28 |
+
}
|
29 |
+
},
|
30 |
+
"homepage": "",
|
31 |
+
"license": ""
|
32 |
+
}
|
nv-blog-jp.py
ADDED
@@ -0,0 +1,407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
from bs4 import BeautifulSoup
|
3 |
+
import time
|
4 |
+
import json
|
5 |
+
from datasets import Dataset
|
6 |
+
import re
|
7 |
+
|
8 |
+
# Base URLs for English and Japanese versions
|
9 |
+
BASE_EN = "https://developer.nvidia.com/blog"
|
10 |
+
BASE_JP = "https://developer.nvidia.com/ja-jp/blog"
|
11 |
+
JP_RECENT_POSTS = "https://developer.nvidia.com/ja-jp/blog/recent-posts/"
|
12 |
+
|
13 |
+
# Archive URLs by year
|
14 |
+
JP_ARCHIVES = {
|
15 |
+
"2025": "https://developer.nvidia.com/ja-jp/blog/2025/",
|
16 |
+
"2024": "https://developer.nvidia.com/ja-jp/blog/2024/",
|
17 |
+
"2023": "https://developer.nvidia.com/ja-jp/blog/2023/",
|
18 |
+
"2022": "https://developer.nvidia.com/ja-jp/blog/2022/",
|
19 |
+
"2021": "https://developer.nvidia.com/ja-jp/blog/2021/",
|
20 |
+
# Add more years if needed
|
21 |
+
}
|
22 |
+
|
23 |
+
# Custom headers (helps to mimic a browser)
|
24 |
+
headers = {
|
25 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
26 |
+
'Accept-Language': 'en-US,en;q=0.9,ja;q=0.8',
|
27 |
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
|
28 |
+
'Connection': 'keep-alive',
|
29 |
+
'Upgrade-Insecure-Requests': '1',
|
30 |
+
'Cache-Control': 'max-age=0'
|
31 |
+
}
|
32 |
+
|
33 |
+
def get_article_content(url):
|
34 |
+
"""
|
35 |
+
Fetches the content of an article given its URL.
|
36 |
+
Returns a tuple: (title, content).
|
37 |
+
"""
|
38 |
+
try:
|
39 |
+
print(f"Fetching content from: {url}")
|
40 |
+
response = requests.get(url, headers=headers)
|
41 |
+
if response.status_code != 200:
|
42 |
+
print(f"Failed to fetch {url} (Status code: {response.status_code})")
|
43 |
+
return None, None
|
44 |
+
|
45 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
46 |
+
|
47 |
+
# Find the title
|
48 |
+
title_tag = soup.find("h1")
|
49 |
+
title = title_tag.get_text(strip=True) if title_tag else "No title found"
|
50 |
+
|
51 |
+
# Find the main content - try different possible content containers
|
52 |
+
content_selectors = [
|
53 |
+
"div.entry-content",
|
54 |
+
"div.post-content",
|
55 |
+
"article.post",
|
56 |
+
"div.blog-post-content"
|
57 |
+
]
|
58 |
+
|
59 |
+
content = ""
|
60 |
+
for selector in content_selectors:
|
61 |
+
content_div = soup.select_one(selector)
|
62 |
+
if content_div:
|
63 |
+
# Remove any script, style elements, and navigation
|
64 |
+
for element in content_div.find_all(['script', 'style', 'nav', 'footer']):
|
65 |
+
element.decompose()
|
66 |
+
content = content_div.get_text(separator="\n", strip=True)
|
67 |
+
break
|
68 |
+
|
69 |
+
if not content:
|
70 |
+
# Fallback: get the main content area
|
71 |
+
main_content = soup.find("main") or soup.find("article") or soup.find("div", id="content")
|
72 |
+
if main_content:
|
73 |
+
for element in main_content.find_all(['script', 'style', 'nav', 'footer', 'header']):
|
74 |
+
element.decompose()
|
75 |
+
content = main_content.get_text(separator="\n", strip=True)
|
76 |
+
else:
|
77 |
+
content = "No content found"
|
78 |
+
|
79 |
+
return title, content
|
80 |
+
|
81 |
+
except Exception as e:
|
82 |
+
print(f"Exception occurred while fetching {url}: {e}")
|
83 |
+
return None, None
|
84 |
+
|
85 |
+
def extract_article_links_from_page(soup):
|
86 |
+
"""
|
87 |
+
Extract article links from a BeautifulSoup object
|
88 |
+
"""
|
89 |
+
links = []
|
90 |
+
links_found = False
|
91 |
+
|
92 |
+
# Method 1: Look for article headings (h3 tags with links)
|
93 |
+
article_headings = soup.find_all(["h2", "h3", "h4"])
|
94 |
+
for heading in article_headings:
|
95 |
+
link = heading.find("a", href=True)
|
96 |
+
if link and link.get('href'):
|
97 |
+
href = link.get('href')
|
98 |
+
if '/blog/' in href:
|
99 |
+
if href.startswith('/'):
|
100 |
+
href = f"https://developer.nvidia.com{href}"
|
101 |
+
if href not in links:
|
102 |
+
links.append(href)
|
103 |
+
print(f"Found article from heading: {href}")
|
104 |
+
links_found = True
|
105 |
+
|
106 |
+
# Method 2: Look for "投稿を見る" links
|
107 |
+
view_post_links = soup.find_all("a", string=["投稿を見る", "記事を読む", "続きを読む"])
|
108 |
+
for link in view_post_links:
|
109 |
+
href = link.get('href')
|
110 |
+
if href and '/blog/' in href:
|
111 |
+
if href.startswith('/'):
|
112 |
+
href = f"https://developer.nvidia.com{href}"
|
113 |
+
if href not in links:
|
114 |
+
links.append(href)
|
115 |
+
print(f"Found article from view post link: {href}")
|
116 |
+
links_found = True
|
117 |
+
|
118 |
+
# Method 3: Look for all article links
|
119 |
+
if not links_found:
|
120 |
+
# Find all links that might be to articles
|
121 |
+
all_post_links = soup.find_all("a", href=True)
|
122 |
+
for link in all_post_links:
|
123 |
+
href = link.get('href')
|
124 |
+
if href and '/ja-jp/blog/' in href and not '/category/' in href and not '/recent-posts/' in href:
|
125 |
+
# Skip archive links
|
126 |
+
if any(f"/ja-jp/blog/{year}" in href for year in JP_ARCHIVES.keys()):
|
127 |
+
if href.count('/') > 5: # This is likely an article, not just a year archive
|
128 |
+
if href.startswith('/'):
|
129 |
+
href = f"https://developer.nvidia.com{href}"
|
130 |
+
if href not in links:
|
131 |
+
links.append(href)
|
132 |
+
print(f"Found article: {href}")
|
133 |
+
links_found = True
|
134 |
+
else:
|
135 |
+
if href.startswith('/'):
|
136 |
+
href = f"https://developer.nvidia.com{href}"
|
137 |
+
if href not in links:
|
138 |
+
links.append(href)
|
139 |
+
print(f"Found article: {href}")
|
140 |
+
links_found = True
|
141 |
+
|
142 |
+
return links
|
143 |
+
|
144 |
+
def get_articles_from_archive(archive_url, num_articles=100):
|
145 |
+
"""
|
146 |
+
Gets articles from a specific archive URL (like a year archive)
|
147 |
+
"""
|
148 |
+
all_links = []
|
149 |
+
page = 1
|
150 |
+
|
151 |
+
while len(all_links) < num_articles:
|
152 |
+
try:
|
153 |
+
if page == 1:
|
154 |
+
url = archive_url
|
155 |
+
else:
|
156 |
+
url = f"{archive_url}page/{page}/"
|
157 |
+
|
158 |
+
print(f"Fetching archive page {page}: {url}")
|
159 |
+
response = requests.get(url, headers=headers)
|
160 |
+
|
161 |
+
if response.status_code != 200:
|
162 |
+
print(f"Failed to fetch page {page}: {response.status_code}")
|
163 |
+
break
|
164 |
+
|
165 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
166 |
+
|
167 |
+
# Extract article links from the page
|
168 |
+
page_links = extract_article_links_from_page(soup)
|
169 |
+
|
170 |
+
if not page_links:
|
171 |
+
print(f"No articles found on page {page}")
|
172 |
+
break
|
173 |
+
|
174 |
+
# Add new links to our collection
|
175 |
+
for link in page_links:
|
176 |
+
if link not in all_links:
|
177 |
+
all_links.append(link)
|
178 |
+
if len(all_links) >= num_articles:
|
179 |
+
break
|
180 |
+
|
181 |
+
if len(all_links) >= num_articles:
|
182 |
+
break
|
183 |
+
|
184 |
+
# Check if there's a next page
|
185 |
+
next_page = soup.select_one("a.next, .pagination .next a, .nav-links .next")
|
186 |
+
if not next_page:
|
187 |
+
print("No next page found")
|
188 |
+
break
|
189 |
+
|
190 |
+
page += 1
|
191 |
+
time.sleep(2) # Be polite
|
192 |
+
|
193 |
+
except Exception as e:
|
194 |
+
print(f"Error on page {page}: {e}")
|
195 |
+
break
|
196 |
+
|
197 |
+
return all_links[:num_articles]
|
198 |
+
|
199 |
+
def get_japanese_articles(num_articles=1000):
|
200 |
+
"""
|
201 |
+
Gets Japanese articles from all available sources
|
202 |
+
"""
|
203 |
+
all_links = []
|
204 |
+
|
205 |
+
# First, get articles from the recent posts page
|
206 |
+
print("Getting articles from recent posts page...")
|
207 |
+
recent_links = get_articles_from_archive(JP_RECENT_POSTS, num_articles)
|
208 |
+
all_links.extend(recent_links)
|
209 |
+
print(f"Found {len(recent_links)} articles from recent posts")
|
210 |
+
|
211 |
+
# If we need more articles, go through the yearly archives
|
212 |
+
if len(all_links) < num_articles:
|
213 |
+
for year, url in JP_ARCHIVES.items():
|
214 |
+
if len(all_links) >= num_articles:
|
215 |
+
break
|
216 |
+
|
217 |
+
print(f"\nGetting articles from {year} archive...")
|
218 |
+
year_links = get_articles_from_archive(url, num_articles - len(all_links))
|
219 |
+
|
220 |
+
# Add new links to our collection
|
221 |
+
for link in year_links:
|
222 |
+
if link not in all_links:
|
223 |
+
all_links.append(link)
|
224 |
+
if len(all_links) >= num_articles:
|
225 |
+
break
|
226 |
+
|
227 |
+
print(f"Found {len(year_links)} articles from {year}")
|
228 |
+
time.sleep(2) # Be polite between years
|
229 |
+
|
230 |
+
return all_links[:num_articles]
|
231 |
+
|
232 |
+
def get_blog_posts(lang='ja-jp', num_articles=100):
|
233 |
+
"""
|
234 |
+
Gets blog posts using a different approach - directly searching for post links
|
235 |
+
"""
|
236 |
+
if lang == 'ja-jp':
|
237 |
+
# For Japanese, use our specialized function
|
238 |
+
return get_japanese_articles(num_articles)
|
239 |
+
else:
|
240 |
+
# For English, use the regular approach
|
241 |
+
base_url = BASE_EN
|
242 |
+
all_links = []
|
243 |
+
page = 1
|
244 |
+
|
245 |
+
while len(all_links) < num_articles:
|
246 |
+
try:
|
247 |
+
if page == 1:
|
248 |
+
url = base_url
|
249 |
+
else:
|
250 |
+
url = f"{base_url}/page/{page}"
|
251 |
+
|
252 |
+
print(f"Fetching blog listing page {page}: {url}")
|
253 |
+
response = requests.get(url, headers=headers)
|
254 |
+
|
255 |
+
if response.status_code != 200:
|
256 |
+
print(f"Failed to fetch page {page}: {response.status_code}")
|
257 |
+
break
|
258 |
+
|
259 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
260 |
+
|
261 |
+
# Look for blog post links - try different selectors
|
262 |
+
links_found = False
|
263 |
+
|
264 |
+
# Method 1: Look for article elements
|
265 |
+
articles = soup.find_all("article")
|
266 |
+
if articles:
|
267 |
+
for article in articles:
|
268 |
+
links = article.find_all("a", href=True)
|
269 |
+
for link in links:
|
270 |
+
href = link['href']
|
271 |
+
if '/blog/' in href:
|
272 |
+
if href.startswith('/'):
|
273 |
+
href = f"https://developer.nvidia.com{href}"
|
274 |
+
if href not in all_links:
|
275 |
+
all_links.append(href)
|
276 |
+
print(f"Found article: {href}")
|
277 |
+
links_found = True
|
278 |
+
|
279 |
+
# Method 2: Look for blog post cards or listings
|
280 |
+
if not links_found:
|
281 |
+
post_links = soup.select("a.blog-post-link, a.post-link, .post-title a, .entry-title a")
|
282 |
+
for link in post_links:
|
283 |
+
href = link.get('href')
|
284 |
+
if href and '/blog/' in href:
|
285 |
+
if href.startswith('/'):
|
286 |
+
href = f"https://developer.nvidia.com{href}"
|
287 |
+
if href not in all_links:
|
288 |
+
all_links.append(href)
|
289 |
+
print(f"Found article: {href}")
|
290 |
+
links_found = True
|
291 |
+
|
292 |
+
# Method 3: Find all links that might be blog posts
|
293 |
+
if not links_found:
|
294 |
+
all_post_links = soup.find_all("a", href=True)
|
295 |
+
for link in all_post_links:
|
296 |
+
href = link.get('href')
|
297 |
+
if href and '/blog/' in href and not href.endswith('/page/') and not '/category/' in href:
|
298 |
+
if href.startswith('/'):
|
299 |
+
href = f"https://developer.nvidia.com{href}"
|
300 |
+
if href not in all_links:
|
301 |
+
all_links.append(href)
|
302 |
+
print(f"Found article: {href}")
|
303 |
+
links_found = True
|
304 |
+
|
305 |
+
if not links_found:
|
306 |
+
print(f"No articles found on page {page}")
|
307 |
+
break
|
308 |
+
|
309 |
+
if len(all_links) >= num_articles:
|
310 |
+
break
|
311 |
+
|
312 |
+
# Check if there's a next page
|
313 |
+
next_page = soup.select_one("a.next, .pagination .next a, .nav-links .next")
|
314 |
+
if not next_page:
|
315 |
+
print("No next page found")
|
316 |
+
break
|
317 |
+
|
318 |
+
page += 1
|
319 |
+
time.sleep(2) # Be polite
|
320 |
+
|
321 |
+
except Exception as e:
|
322 |
+
print(f"Error on page {page}: {e}")
|
323 |
+
break
|
324 |
+
|
325 |
+
return all_links[:num_articles]
|
326 |
+
|
327 |
+
def get_corresponding_url(url, from_lang='ja-jp', to_lang='en-us'):
|
328 |
+
"""
|
329 |
+
Convert URL between languages
|
330 |
+
"""
|
331 |
+
if from_lang == 'ja-jp' and '/ja-jp/' in url:
|
332 |
+
return url.replace('/ja-jp/', '/')
|
333 |
+
elif from_lang == 'en-us' and '/blog/' in url:
|
334 |
+
return url.replace('/blog/', '/ja-jp/blog/')
|
335 |
+
return url
|
336 |
+
|
337 |
+
if __name__ == '__main__':
|
338 |
+
num_articles = 1000 # Adjust as needed
|
339 |
+
|
340 |
+
# Try a different approach - get Japanese articles first
|
341 |
+
print("Getting Japanese blog posts...")
|
342 |
+
jp_links = get_blog_posts(lang='ja-jp', num_articles=num_articles)
|
343 |
+
print(f"Found {len(jp_links)} Japanese articles")
|
344 |
+
|
345 |
+
article_pairs = []
|
346 |
+
|
347 |
+
for jp_link in jp_links:
|
348 |
+
try:
|
349 |
+
print(f"\nProcessing Japanese article: {jp_link}")
|
350 |
+
|
351 |
+
# Get corresponding English URL
|
352 |
+
en_link = get_corresponding_url(jp_link, from_lang='ja-jp', to_lang='en-us')
|
353 |
+
print(f"Corresponding English URL: {en_link}")
|
354 |
+
|
355 |
+
# Get contents from both versions
|
356 |
+
jp_title, jp_content = get_article_content(jp_link)
|
357 |
+
|
358 |
+
if not jp_title or not jp_content:
|
359 |
+
print("Skipping due to missing Japanese content")
|
360 |
+
continue
|
361 |
+
|
362 |
+
en_title, en_content = get_article_content(en_link)
|
363 |
+
|
364 |
+
if not en_title or not en_content:
|
365 |
+
print("Skipping due to missing English content")
|
366 |
+
continue
|
367 |
+
|
368 |
+
# If both pages were fetched successfully, save the pair
|
369 |
+
article_pairs.append({
|
370 |
+
'en_url': en_link,
|
371 |
+
'en_title': en_title,
|
372 |
+
'en_content': en_content,
|
373 |
+
'jp_url': jp_link,
|
374 |
+
'jp_title': jp_title,
|
375 |
+
'jp_content': jp_content,
|
376 |
+
})
|
377 |
+
print(f"Successfully paired: {jp_title}")
|
378 |
+
|
379 |
+
# Save progress periodically
|
380 |
+
if len(article_pairs) % 10 == 0:
|
381 |
+
print(f"Saving progress with {len(article_pairs)} pairs...")
|
382 |
+
temp_dataset = Dataset.from_list(article_pairs)
|
383 |
+
temp_dataset.to_json(f"nvidia_ja_jp_en_us_dev_blog_dataset_partial_{len(article_pairs)}.jsonl",
|
384 |
+
orient="records", force_ascii=False)
|
385 |
+
|
386 |
+
except Exception as e:
|
387 |
+
print(f"Error processing article pair: {e}")
|
388 |
+
|
389 |
+
# Be polite to the server
|
390 |
+
time.sleep(2)
|
391 |
+
|
392 |
+
print(f"\nCollected {len(article_pairs)} article pairs")
|
393 |
+
|
394 |
+
# Create and save dataset
|
395 |
+
if article_pairs:
|
396 |
+
hf_dataset = Dataset.from_list(article_pairs)
|
397 |
+
|
398 |
+
# Save as Hugging Face dataset
|
399 |
+
hf_dataset.save_to_disk("nvidia_ja_jp_dev_blog_dataset")
|
400 |
+
|
401 |
+
# Save as JSONL
|
402 |
+
hf_dataset.to_json("nvidia_ja_jp_en_us_dev_blog_dataset.jsonl", orient="records", force_ascii=False)
|
403 |
+
|
404 |
+
print("Dataset saved successfully")
|
405 |
+
else:
|
406 |
+
print("No article pairs collected")
|
407 |
+
|
nvidia_ja_jp_en_us_dev_blog_dataset.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
nvidia_ja_jp_en_us_dev_blog_dataset_partial_10.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
state.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_data_files": [
|
3 |
+
{
|
4 |
+
"filename": "data-00000-of-00001.arrow"
|
5 |
+
}
|
6 |
+
],
|
7 |
+
"_fingerprint": "8da7ad6c58f088a8",
|
8 |
+
"_format_columns": null,
|
9 |
+
"_format_kwargs": {},
|
10 |
+
"_format_type": null,
|
11 |
+
"_output_all_columns": false,
|
12 |
+
"_split": null
|
13 |
+
}
|