Update README.md
Browse files
README.md
CHANGED
@@ -132,13 +132,23 @@ Contains the raw multi-turn dialogue and metadata.
|
|
132 |
|
133 |
**Install dependencies**
|
134 |
|
135 |
-
```
|
136 |
pip install pandas pyarrow datasets
|
137 |
```
|
138 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
**Load a single Parquet split**
|
140 |
|
141 |
-
```
|
142 |
import pandas as pd
|
143 |
|
144 |
df = pd.read_parquet("data/bittensor-conversational-tags-and-embeddings-part-0000.parquet")
|
@@ -147,7 +157,7 @@ print(df.head())
|
|
147 |
|
148 |
**Load all tag splits**
|
149 |
|
150 |
-
```
|
151 |
import pandas as pd
|
152 |
import glob
|
153 |
|
@@ -159,21 +169,21 @@ print(f"Loaded {len(df_tags)} tag records.")
|
|
159 |
|
160 |
**Load tag dictionary**
|
161 |
|
162 |
-
```
|
163 |
tag_dict = pd.read_parquet("tag_to_id.parquet")
|
164 |
print(tag_dict.head())
|
165 |
```
|
166 |
|
167 |
**Load conversation to tags mapping**
|
168 |
|
169 |
-
```
|
170 |
df_mapping = pd.read_parquet("conversations_to_tags.parquet")
|
171 |
print(df_mapping.head())
|
172 |
```
|
173 |
|
174 |
**Load full conversations dialog and metadata**
|
175 |
|
176 |
-
```
|
177 |
df_conversations = pd.read_parquet("conversations_train.parquet")
|
178 |
print(df_conversations.head())
|
179 |
```
|
@@ -182,7 +192,7 @@ print(df_conversations.head())
|
|
182 |
|
183 |
## 🔥 Example: Reconstruct Tags for a Conversation
|
184 |
|
185 |
-
```
|
186 |
# Build tag lookup
|
187 |
tag_lookup = dict(zip(tag_dict['tag_id'], tag_dict['tag']))
|
188 |
|
@@ -209,7 +219,7 @@ print(f"Conversation {c_guid} has tags: {tags}")
|
|
209 |
|
210 |
**Example (streaming with Hugging Face `datasets`)**
|
211 |
|
212 |
-
```
|
213 |
from datasets import load_dataset
|
214 |
|
215 |
dataset = load_dataset(
|
|
|
132 |
|
133 |
**Install dependencies**
|
134 |
|
135 |
+
```python
|
136 |
pip install pandas pyarrow datasets
|
137 |
```
|
138 |
|
139 |
+
**Download the dataset**
|
140 |
+
```python
|
141 |
+
import datasets
|
142 |
+
|
143 |
+
path = "ReadyAi/5000-podcast-conversations-with-metadata-and-embedding-dataset"
|
144 |
+
dataset = datasets.load_dataset(path)
|
145 |
+
|
146 |
+
print(dataset['train'].column_names)
|
147 |
+
```
|
148 |
+
|
149 |
**Load a single Parquet split**
|
150 |
|
151 |
+
```python
|
152 |
import pandas as pd
|
153 |
|
154 |
df = pd.read_parquet("data/bittensor-conversational-tags-and-embeddings-part-0000.parquet")
|
|
|
157 |
|
158 |
**Load all tag splits**
|
159 |
|
160 |
+
```python
|
161 |
import pandas as pd
|
162 |
import glob
|
163 |
|
|
|
169 |
|
170 |
**Load tag dictionary**
|
171 |
|
172 |
+
```python
|
173 |
tag_dict = pd.read_parquet("tag_to_id.parquet")
|
174 |
print(tag_dict.head())
|
175 |
```
|
176 |
|
177 |
**Load conversation to tags mapping**
|
178 |
|
179 |
+
```python
|
180 |
df_mapping = pd.read_parquet("conversations_to_tags.parquet")
|
181 |
print(df_mapping.head())
|
182 |
```
|
183 |
|
184 |
**Load full conversations dialog and metadata**
|
185 |
|
186 |
+
```python
|
187 |
df_conversations = pd.read_parquet("conversations_train.parquet")
|
188 |
print(df_conversations.head())
|
189 |
```
|
|
|
192 |
|
193 |
## 🔥 Example: Reconstruct Tags for a Conversation
|
194 |
|
195 |
+
```python
|
196 |
# Build tag lookup
|
197 |
tag_lookup = dict(zip(tag_dict['tag_id'], tag_dict['tag']))
|
198 |
|
|
|
219 |
|
220 |
**Example (streaming with Hugging Face `datasets`)**
|
221 |
|
222 |
+
```python
|
223 |
from datasets import load_dataset
|
224 |
|
225 |
dataset = load_dataset(
|