NextGenC commited on
Commit
4ccf135
·
verified ·
1 Parent(s): fbc2cb2

Upload 9 files

Browse files
LICENSE.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Erynn AI - License v1.0 (Modified MIT-Based)
2
+ Copyright (c) 2025 Erynn Project
3
+
4
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated resources (collectively, the “Software”), to use the Software without restriction, including the rights to use, copy, modify, publish, distribute, sublicense, and/or sell content or systems derived from it, subject to the following conditions:
5
+
6
+ 1. AI-Generated Content
7
+ Outputs generated using this Software are owned by the user. You may use them freely, including for commercial purposes. However, when publishing such outputs publicly, you must:
8
+
9
+ Clearly disclose that AI-assisted generation was involved.
10
+
11
+ Not misrepresent the content as entirely human-created.
12
+
13
+ 2. Licensing Obligations
14
+ This license text and the copyright notice must remain intact in all substantial Software distributions.
15
+
16
+ Attribution is not required for content generated using the Software.
17
+
18
+ Underlying model architecture and training methods are intentionally undisclosed; usage assumes awareness of applicable third-party and upstream licenses.
19
+
20
+ 3. Legal Disclaimer
21
+ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGE, OR LIABILITY ARISING FROM THE USE OR PERFORMANCE OF THIS SOFTWARE.
22
+
23
+ 4. Usage Conditions
24
+ Use the Software responsibly and ethically.
25
+
26
+ Do not attempt to circumvent or disable built-in safety mechanisms.
27
+
28
+ Ensure compliance with any applicable laws or regulatory policies in your jurisdiction.
29
+
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "C:\\Users\\j\\Desktop\\Erynn\\gpt2-large",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.1,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 8,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "c_attn",
28
+ "c_proj"
29
+ ],
30
+ "task_type": "CAUSAL_LM",
31
+ "trainable_token_indices": null,
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72276d3267744af8dcf155788827f1418daf258216d8c06146126957a2ac01bb
3
+ size 16247848
inference_final.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ from peft import PeftModel
4
+
5
+ # Paths to model and adapter
6
+ MODEL_PATH = r"C:\Users\j\Desktop\Erynn\gpt2-large"
7
+ ADAPTER_PATH = r"C:\Users\j\Desktop\Erynn\erynn_adapter"
8
+
9
+ def load_model():
10
+ """Load the model and tokenizer."""
11
+ # Load model with low memory usage
12
+ model = AutoModelForCausalLM.from_pretrained(
13
+ MODEL_PATH,
14
+ device_map="auto",
15
+ torch_dtype=torch.float16
16
+ )
17
+ # Add LoRA adapter
18
+ model = PeftModel.from_pretrained(model, ADAPTER_PATH)
19
+ # Load tokenizer
20
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
21
+ tokenizer.pad_token = tokenizer.eos_token
22
+ return model, tokenizer
23
+
24
+ def get_response(model, tokenizer, instruction, context=None):
25
+ """
26
+ Generate a response for the given instruction and optional context.
27
+ Example: get_response(model, tokenizer, "Write an ad for a phone")
28
+ """
29
+ # Build simple prompt
30
+ prompt = f"Instruction: {instruction}\n"
31
+ if context and context.strip():
32
+ prompt += f"Context: {context}\n"
33
+ prompt += "Response: "
34
+
35
+ # Tokenize input
36
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
37
+
38
+ # Generate response
39
+ with torch.no_grad():
40
+ output = model.generate(
41
+ input_ids=inputs["input_ids"],
42
+ attention_mask=inputs["attention_mask"],
43
+ max_new_tokens=100, # Short and focused responses
44
+ temperature=0.7,
45
+ top_p=0.9,
46
+ repetition_penalty=1.2,
47
+ do_sample=True, # Added for warnings
48
+ pad_token_id=tokenizer.eos_token_id
49
+ )
50
+
51
+ # Extract response
52
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
53
+ response_start = response.find("Response: ") + len("Response: ")
54
+ return response[response_start:].strip()
55
+
56
+ def main():
57
+ """Run example instructions to test the model."""
58
+ print("Erynn is ready! Testing some examples...\n")
59
+
60
+ # Load model and tokenizer
61
+ model, tokenizer = load_model()
62
+
63
+ # Test 1: Short explanation
64
+ print("Test 1: Explain AI briefly")
65
+ response = get_response(model, tokenizer, "Explain artificial intelligence in 50 words or less.")
66
+ print(response, "\n" + "-"*40)
67
+
68
+ # Test 2: Summarization
69
+ print("\nTest 2: Summarize this text")
70
+ context = "Deep learning is a key AI technology. It excels in computer vision and natural language processing, driving advances in image recognition and speech synthesis."
71
+ response = get_response(model, tokenizer, "Summarize this text in 30 words or less.", context)
72
+ print(response, "\n" + "-"*40)
73
+
74
+ # Test 3: Advertisement
75
+ print("\nTest 3: Write a smartwatch ad")
76
+ response = get_response(model, tokenizer, "Write a short advertisement for a smartwatch in 40 words.")
77
+ print(response, "\n" + "-"*40)
78
+
79
+ # Test 4: List
80
+ print("\nTest 4: List Python advantages")
81
+ response = get_response(model, tokenizer, "List three advantages of Python programming.")
82
+ print(response)
83
+
84
+ print("\nTry your own instruction: get_response(model, tokenizer, 'Your instruction here')")
85
+
86
+ if __name__ == "__main__":
87
+ main()
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": false,
15
+ "eos_token": "<|endoftext|>",
16
+ "extra_special_tokens": {},
17
+ "model_max_length": 1024,
18
+ "pad_token": "<|endoftext|>",
19
+ "tokenizer_class": "GPT2Tokenizer",
20
+ "unk_token": "<|endoftext|>"
21
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff