Improve language tag

#1
by lbourdois - opened
Files changed (1) hide show
  1. README.md +84 -72
README.md CHANGED
@@ -1,73 +1,85 @@
1
- ---
2
- library_name: transformers
3
- tags:
4
- - code
5
- - NextJS
6
- language:
7
- - en
8
- base_model:
9
- - Qwen/Qwen2.5-1.5B-Instruct
10
- base_model_relation: finetune
11
- pipeline_tag: text-generation
12
- ---
13
-
14
- # Model Information
15
- The Qwen2.5-1.5B-NextJs-code is a quantized, fine-tuned version of the Qwen2.5-1.5B-Instruct model designed specifically for generating NextJs code.
16
-
17
- - **Base model:** Qwen/Qwen2.5-1.5B-Instruct
18
-
19
-
20
- # How to use
21
- Starting with transformers version 4.44.0 and later, you can run conversational inference using the Transformers pipeline.
22
-
23
- Make sure to update your transformers installation via pip install --upgrade transformers.
24
-
25
- ```python
26
- import torch
27
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
28
- ```
29
-
30
- ```python
31
- def get_pipline():
32
- model_name = "nirusanan/Qwen2.5-1.5B-NextJs-code"
33
-
34
- tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
35
- tokenizer.pad_token = tokenizer.eos_token
36
-
37
- model = AutoModelForCausalLM.from_pretrained(
38
- model_name,
39
- torch_dtype=torch.float16,
40
- device_map="cuda:0",
41
- trust_remote_code=True
42
- )
43
-
44
- pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=3500)
45
-
46
- return pipe
47
-
48
- pipe = get_pipline()
49
- ```
50
-
51
- ```python
52
- def generate_prompt(project_title, description):
53
- prompt = f"""Below is an instruction that describes a project. Write Nextjs 14 code to accomplish the project described below.
54
-
55
- ### Instruction:
56
- Project:
57
- {project_title}
58
-
59
- Project Description:
60
- {description}
61
-
62
- ### Response:
63
- """
64
- return prompt
65
- ```
66
-
67
-
68
- ```python
69
- prompt = generate_prompt(project_title = "Your NextJs project", description = "Your NextJs project description")
70
- result = pipe(prompt)
71
- generated_text = result[0]['generated_text']
72
- print(generated_text.split("### End")[0])
 
 
 
 
 
 
 
 
 
 
 
 
73
  ```
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - code
5
+ - NextJS
6
+ language:
7
+ - zho
8
+ - eng
9
+ - fra
10
+ - spa
11
+ - por
12
+ - deu
13
+ - ita
14
+ - rus
15
+ - jpn
16
+ - kor
17
+ - vie
18
+ - tha
19
+ - ara
20
+ base_model:
21
+ - Qwen/Qwen2.5-1.5B-Instruct
22
+ base_model_relation: finetune
23
+ pipeline_tag: text-generation
24
+ ---
25
+
26
+ # Model Information
27
+ The Qwen2.5-1.5B-NextJs-code is a quantized, fine-tuned version of the Qwen2.5-1.5B-Instruct model designed specifically for generating NextJs code.
28
+
29
+ - **Base model:** Qwen/Qwen2.5-1.5B-Instruct
30
+
31
+
32
+ # How to use
33
+ Starting with transformers version 4.44.0 and later, you can run conversational inference using the Transformers pipeline.
34
+
35
+ Make sure to update your transformers installation via pip install --upgrade transformers.
36
+
37
+ ```python
38
+ import torch
39
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
40
+ ```
41
+
42
+ ```python
43
+ def get_pipline():
44
+ model_name = "nirusanan/Qwen2.5-1.5B-NextJs-code"
45
+
46
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
47
+ tokenizer.pad_token = tokenizer.eos_token
48
+
49
+ model = AutoModelForCausalLM.from_pretrained(
50
+ model_name,
51
+ torch_dtype=torch.float16,
52
+ device_map="cuda:0",
53
+ trust_remote_code=True
54
+ )
55
+
56
+ pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=3500)
57
+
58
+ return pipe
59
+
60
+ pipe = get_pipline()
61
+ ```
62
+
63
+ ```python
64
+ def generate_prompt(project_title, description):
65
+ prompt = f"""Below is an instruction that describes a project. Write Nextjs 14 code to accomplish the project described below.
66
+
67
+ ### Instruction:
68
+ Project:
69
+ {project_title}
70
+
71
+ Project Description:
72
+ {description}
73
+
74
+ ### Response:
75
+ """
76
+ return prompt
77
+ ```
78
+
79
+
80
+ ```python
81
+ prompt = generate_prompt(project_title = "Your NextJs project", description = "Your NextJs project description")
82
+ result = pipe(prompt)
83
+ generated_text = result[0]['generated_text']
84
+ print(generated_text.split("### End")[0])
85
  ```