ErenalpCet commited on
Commit
783ed02
·
verified ·
1 Parent(s): c182ab9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +173 -104
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import gradio as gr
2
  import transformers
3
  import torch
@@ -6,9 +7,18 @@ from duckduckgo_search import DDGS
6
  import re
7
  import time
8
  from spaces import GPU
 
 
 
 
 
 
 
 
 
9
 
10
  # --- Constants and Configuration ---
11
- MODEL_ID = "google/gemma-3-1b-it" # Updated to Gemma 3 1B
12
  MAX_GPU_MEMORY = "60GiB"
13
 
14
  # --- Model Loading ---
@@ -20,11 +30,9 @@ def load_model():
20
  pipe = pipeline(
21
  "text-generation",
22
  model=MODEL_ID,
23
- torch_dtype=torch.bfloat16, # Full precision, compatible with Gemma
24
  device_map="auto",
25
- model_kwargs={
26
- "use_cache": True,
27
- }
28
  )
29
  print(f"Model {MODEL_ID} loaded successfully on device: {pipe.device} (full precision)")
30
  return pipe
@@ -32,24 +40,66 @@ def load_model():
32
  print(f"FATAL Error loading model '{MODEL_ID}': {e}")
33
  raise e
34
 
35
- # --- Web Search ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  def search_person(name, context=""):
37
  """Search for information about a person using DuckDuckGo."""
38
  print(f"Searching for: {name} with context: {context}")
39
  results = []
40
  search_terms = []
 
 
 
 
 
41
  if context:
42
  search_terms.append(f"{name} {context}")
43
  grade_match = re.search(r'(\d+)(?:st|nd|rd|th)?\s+grade', context.lower())
44
  if grade_match:
45
  grade = grade_match.group(1)
46
  search_terms.append(f"{name} student {grade} grade")
47
- search_terms.append(f"{name}")
48
- search_terms.append(f"{name} biography")
49
- search_terms.append(f"{name} interests")
50
- search_terms.append(f"{name} personality")
51
  search_terms = list(dict.fromkeys(search_terms))
52
  print(f"Using search terms: {search_terms}")
 
53
  try:
54
  with DDGS() as ddgs:
55
  for term in search_terms:
@@ -61,9 +111,11 @@ def search_person(name, context=""):
61
  error_msg = f"Error during DuckDuckGo search: {str(e)}"
62
  print(error_msg)
63
  return error_msg
 
64
  if not results:
65
  print(f"No search results found for {name}. Creating synthetic profile.")
66
  return create_synthetic_profile(name, context)
 
67
  print(f"Found {len(results)} potential search results.")
68
  return results
69
 
@@ -91,21 +143,25 @@ def extract_text_from_search_results(search_results):
91
  """Extract relevant text from search results."""
92
  if isinstance(search_results, str):
93
  return f"Could not extract text due to search error: {search_results}"
 
94
  combined_text = ""
95
  seen_bodies = set()
96
  count = 0
97
  max_results_to_process = 5
 
98
  for result in search_results:
99
  if count >= max_results_to_process:
100
  break
101
  if isinstance(result, dict) and 'body' in result and result['body']:
102
  body = result['body'].strip()
103
  if body not in seen_bodies:
104
- combined_text += body + "\n\n"
105
  seen_bodies.add(body)
106
  count += 1
 
107
  if not combined_text:
108
  return "No relevant text found in search results."
 
109
  combined_text = re.sub(r'\s+', ' ', combined_text).strip()
110
  max_length = 2000
111
  return combined_text[:max_length] + "..." if len(combined_text) > max_length else combined_text
@@ -122,9 +178,11 @@ def parse_llm_output(full_output, input_prompt_list):
122
  generated_text = full_output
123
  else:
124
  return str(full_output)
 
125
  last_input_content = ""
126
  if isinstance(input_prompt_list, list) and input_prompt_list:
127
  last_input_content = input_prompt_list[-1].get("content", "")
 
128
  if last_input_content:
129
  last_occurrence_index = generated_text.rfind(last_input_content)
130
  if last_occurrence_index != -1:
@@ -132,25 +190,27 @@ def parse_llm_output(full_output, input_prompt_list):
132
  if potential_response:
133
  potential_response = re.sub(r'^<\/?s?>', '', potential_response).strip()
134
  potential_response = re.sub(r'^(assistant|ASSISTANT|System|SYSTEM)[:\s]*', '', potential_response).strip()
135
- # Remove special tags
136
  potential_response = re.sub(r'<end_of_turn>|<start_of_turn>model', '', potential_response).strip()
137
  if potential_response:
138
  return potential_response
 
139
  cleaned_text = generated_text
140
  if isinstance(input_prompt_list, list) and input_prompt_list:
141
  first_prompt_content = input_prompt_list[0].get("content", "")
142
  if first_prompt_content and cleaned_text.startswith(first_prompt_content):
143
  pass
 
144
  cleaned_text = re.sub(r'^<\/?s?>', '', cleaned_text).strip()
145
  cleaned_text = re.sub(r'^(assistant|ASSISTANT|System|SYSTEM)[:\s]*', '', cleaned_text).strip()
146
- # Remove special tags from full output
147
  cleaned_text = re.sub(r'<end_of_turn>|<start_of_turn>model', '', cleaned_text).strip()
 
148
  if not cleaned_text and generated_text:
149
  print("Warning: Parsing resulted in empty string, returning original generation.")
150
- # Still clean special tags from original
151
  return re.sub(r'<end_of_turn>|<start_of_turn>model', '', generated_text).strip()
 
152
  if last_input_content and last_occurrence_index == -1:
153
  print("Warning: Could not find last input prompt in LLM output. Returning cleaned full output.")
 
154
  return cleaned_text
155
 
156
  @GPU(memory=60)
@@ -158,10 +218,14 @@ def generate_enhanced_persona(name, bio_text, context=""):
158
  """Use the LLM to enhance the persona profile."""
159
  pipe = load_model()
160
  print(f"Generating enhanced persona for {name}...")
 
161
  enhancement_prompt = [
162
- {"role": "system", "content": """You are an expert AI character developer. Your task is to synthesize information into a detailed and coherent character profile. Focus on personality, potential interests, speaking style, and mannerisms based ONLY on the provided text. If the text indicates the character is a child, ensure the profile reflects age-appropriate traits. Output ONLY the enhanced character profile description. Do not include conversational introductions, explanations, apologies for limited info, or markdown formatting like headers (e.g., ### Personality). Start directly with the profile text."""},
163
- {"role": "user", "content": f"""Synthesize the following information about '{name}' into a character profile. Context: {context} Information Found:\n{bio_text}\n\nCreate the profile based *only* on the text above."""}
 
 
164
  ]
 
165
  try:
166
  tokenizer = pipe.tokenizer
167
  text = tokenizer.apply_chat_template(
@@ -184,18 +248,27 @@ def generate_enhanced_persona(name, bio_text, context=""):
184
  except Exception as e:
185
  error_msg = f"Error generating enhanced persona: {str(e)}"
186
  print(error_msg)
187
- return f"Error enhancing profile: {str(e)}\n\nUsing basic info:\n{bio_text}"
188
 
189
  @GPU(memory=60)
190
  def generate_system_prompt_with_llm(name, enhanced_profile, context=""):
191
  """Generate an optimized system prompt for the persona."""
192
  pipe = load_model()
193
  print(f"Generating system prompt for {name}...")
194
- fallback_prompt = f"""You are simulating the character '{name}'. Act and respond according to this profile:\n{enhanced_profile}\nAdditional context for the simulation: {context}\n---\nMaintain this persona consistently. Respond naturally based on the profile. Do not mention that you are an AI or a simulation. If asked about details not in the profile, you can be evasive or state you don't know/remember, consistent with the persona."""
 
 
 
 
 
 
195
  prompt = [
196
- {"role": "system", "content": """You are an expert AI prompt engineer specializing in character simulation. Your task is to create a concise and effective system prompt for an LLM that will simulate a character based on a provided profile. The system prompt should instruct the LLM to embody the character, covering: 1. Core personality, attitude, and speaking style (based on the profile). 2. Key interests or knowledge areas (if mentioned in the profile). 3. How to handle questions outside its knowledge (e.g., be evasive, admit ignorance naturally). 4. Explicitly state it should *not* break character or mention being an AI. 5. Incorporate age-appropriateness if the profile suggests a specific age group. Output ONLY the system prompt itself. Do not add any explanation or introductory text."""},
197
- {"role": "user", "content": f"""Create a system prompt for an AI to simulate the character '{name}'. Context for simulation: {context} Character Profile:\n{enhanced_profile}\n\nGenerate the system prompt based *only* on the profile and context provided."""}
 
 
198
  ]
 
199
  try:
200
  tokenizer = pipe.tokenizer
201
  text = tokenizer.apply_chat_template(
@@ -244,7 +317,6 @@ def generate_response(messages):
244
  pad_token_id=pipe.tokenizer.eos_token_id if pipe.tokenizer.eos_token_id else None
245
  )
246
  parsed_output = parse_llm_output(outputs, messages)
247
- # Extra cleanup for specific model tags
248
  parsed_output = re.sub(r'<end_of_turn>|<start_of_turn>model', '', parsed_output).strip()
249
  print("Response generated.")
250
  return parsed_output if parsed_output else "..."
@@ -253,7 +325,7 @@ def generate_response(messages):
253
  print(error_msg)
254
  return f"Sorry, I encountered an error trying to respond."
255
 
256
- # --- Persona Chat Class ---
257
  class PersonaChat:
258
  def __init__(self):
259
  self.system_prompt = "You are a helpful assistant."
@@ -261,10 +333,17 @@ class PersonaChat:
261
  self.persona_context = ""
262
  self.messages = []
263
  self.enhanced_profile = ""
264
-
265
  def set_persona(self, name, context=""):
266
- """Orchestrates persona creation: search, enhance, generate prompt."""
267
  try:
 
 
 
 
 
 
 
268
  self.persona_name = name
269
  self.persona_context = context
270
  self.messages = []
@@ -272,203 +351,193 @@ class PersonaChat:
272
  status = f"Searching for information about {name}..."
273
  print(f"set_persona: Yielding search status: {status}")
274
  yield status, "", "", []
 
275
  search_results = search_person(name, context)
276
  if isinstance(search_results, str) and search_results.startswith("Error"):
277
  error_msg = f"Failed to set persona: {search_results}"
278
  print(f"set_persona: Yielding error: {error_msg}")
279
  yield error_msg, "", "", [{"role": "system", "content": error_msg}]
280
  return
 
281
  bio_text = extract_text_from_search_results(search_results)
282
  if bio_text.startswith("Could not extract text"):
283
  print(f"set_persona: Yielding bio warning: {bio_text}")
284
- yield f"Warning: {bio_text}", "", bio_text, [{"role": "system", "content": bio_text}]
 
285
  status = f"Creating enhanced profile for {name}..."
286
  print(f"set_persona: Yielding profile status: {status}")
287
  yield status, "", bio_text, []
 
288
  self.enhanced_profile = generate_enhanced_persona(name, bio_text, context)
289
  profile_for_prompt = self.enhanced_profile
 
290
  if self.enhanced_profile.startswith("Error enhancing profile"):
291
  print(f"set_persona: Yielding profile warning: {self.enhanced_profile}")
292
  yield f"Warning: Could not enhance profile. Using basic info.", "", self.enhanced_profile, [{"role": "system", "content": self.enhanced_profile}]
293
  profile_for_prompt = bio_text
 
294
  status = f"Generating optimal system prompt for {name}..."
295
  print(f"set_persona: Yielding prompt status: {status}")
296
  yield status, self.enhanced_profile, self.enhanced_profile, []
 
297
  self.system_prompt = generate_system_prompt_with_llm(name, profile_for_prompt, context)
298
- # Clean tokenizer artifacts from system prompt
299
  self.system_prompt = re.sub(r'<\|im_tailored\|>|<\|im_start\|>|^assistant\s*', '', self.system_prompt).strip()
300
  self.messages = [{"role": "system", "content": self.system_prompt}]
301
  print(f"set_persona: Final yield with messages (not sent to Chatbot): {self.messages}")
302
- # Yield empty history for Chatbot to avoid system message issues
303
  yield f"Persona set to '{name}'. Ready to chat!", self.system_prompt, self.enhanced_profile, []
 
304
  except Exception as e:
305
  error_msg = f"An unexpected error occurred during persona setup: {str(e)}"
306
  print(f"set_persona: Yielding exception: {error_msg}")
307
  yield error_msg, self.system_prompt, self.enhanced_profile, [{"role": "system", "content": error_msg}]
308
-
309
  def chat(self, user_message):
310
  """Processes a user message and returns the AI's response."""
311
  try:
312
  if not self.messages:
313
  print("Error: Chat called before persona was set.")
314
  return "Please set a persona first using the controls above."
 
315
  print(f"User message: {user_message}")
316
  self.messages.append({"role": "user", "content": user_message})
317
  response = generate_response(self.messages)
 
318
  if not response.startswith("Sorry, I encountered an error"):
319
  self.messages.append({"role": "assistant", "content": response})
320
  print(f"Assistant response: {response}")
321
  else:
322
  print(f"Assistant error response: {response}")
 
323
  return response
324
  except Exception as e:
325
  error_msg = f"Error generating response: {str(e)}"
326
  print(error_msg)
327
  return f"Sorry, I encountered an error: {str(e)}"
328
 
329
- # --- Gradio Interface ---
330
  def create_interface():
331
  persona_chat = PersonaChat()
 
 
332
  css = """
333
- .gradio-container { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; }
334
  .main-container { max-width: 1200px; margin: auto; padding: 0; }
335
- .header { background: linear-gradient(90deg, #2c3e50, #4ca1af); color: white; padding: 20px; border-radius: 10px 10px 0 0; margin-bottom: 20px; text-align: center; }
336
- .setup-section { background-color: #f9f9f9; border-radius: 10px; padding: 20px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); margin-bottom: 20px; }
337
- .chat-section { background-color: white; border-radius: 10px; padding: 20px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); }
338
- .status-bar { background: #e9ecef; padding: 10px 15px; border-radius: 5px; margin: 15px 0; font-weight: 500; border: 1px solid #ced4da; }
339
- .chat-container { border: 1px solid #eaeaea; border-radius: 10px; height: 500px !important; overflow-y: auto; background-color: #ffffff; padding: 10px; }
340
  .message-input { margin-top: 10px; }
341
- .send-button { background-color: #2c3e50 !important; color: white !important; }
342
- .persona-button { background-color: #4ca1af !important; color: white !important; }
343
- .system-prompt-display { background-color: #f5f5f5; border-radius: 8px; padding: 15px; margin-top: 15px; border: 1px solid #e0e0e0; font-family: monospace; white-space: pre-wrap; word-wrap: break-word; }
344
- .footer { text-align: center; margin-top: 20px; font-size: 0.9em; color: #666; }
345
- .typing-indicator { color: #aaa; font-style: italic; }
346
- """
347
 
348
- # Define avatar images with full URLs to ensure they work
349
- user_avatar = "https://api.dicebear.com/6.x/bottts/svg?seed=user"
350
- bot_avatar = "https://api.dicebear.com/6.x/bottts/svg?seed=bot"
 
 
 
 
351
 
352
  with gr.Blocks(css=css, title="AI Persona Simulator") as interface:
353
- with gr.Row(elem_classes="main-container"):
354
  with gr.Column():
355
- with gr.Column(elem_classes="header"):
356
  gr.Markdown("# AI Persona Simulator")
357
- gr.Markdown("Create and interact with AI-driven character simulations")
358
- with gr.Column(elem_classes="setup-section"):
359
- gr.Markdown("### 1. Create Your Persona")
360
- gr.Markdown("Enter a name and context. The AI will search, build a profile, and prepare for chat.")
361
- with gr.Row():
362
- name_input = gr.Textbox(label="Character Name", placeholder="e.g., Sherlock Holmes, Erenalp, A curious 7th grader", elem_id="name_input")
363
- context_input = gr.Textbox(label="Character Context / Description", placeholder="e.g., Living in 221B Baker Street, London. OR 7th grade, loves math...", lines=2, elem_id="context_input")
364
- set_persona_button = gr.Button("Create Persona & Start Chat", variant="primary", elem_classes="persona-button")
365
- status_output = gr.Textbox(label="Status", value="Enter details above and click 'Create Persona'.", interactive=False, elem_classes="status-bar")
366
- with gr.Accordion("View Generated Details", open=False):
367
- enhanced_profile_display = gr.TextArea(label="Enhanced Profile (Generated by AI)", interactive=False, lines=10, elem_classes="system-prompt-display")
368
- system_prompt_display = gr.TextArea(label="System Prompt (Instructions for the AI)", interactive=False, lines=10, elem_classes="system-prompt-display")
369
- with gr.Column(elem_classes="chat-section"):
370
- gr.Markdown("### 2. Chat with Your Character")
371
- character_name_display = gr.Markdown(value="*No persona created yet*", elem_id="character-name-display")
372
- chatbot = gr.Chatbot(
373
- label="Conversation",
374
- height=450,
375
- elem_classes="chat-container",
376
- avatar_images=(user_avatar, bot_avatar),
377
- show_label=False
378
- )
379
- with gr.Row():
380
- msg_input = gr.Textbox(label="Your message", placeholder="Type your message here and press Enter...", elem_classes="message-input", scale=4)
381
- send_button = gr.Button("Send", variant="primary", elem_classes="send-button", scale=1)
382
- with gr.Column(elem_classes="footer"):
383
- gr.Markdown(f"Powered by {MODEL_ID}")
384
 
 
 
 
385
  def set_persona_flow(name, context):
386
  if not name:
387
  yield "Status: Please enter a character name.", "", "", "*No persona created yet*", []
388
  return
389
- # Reset chatbot history explicitly
390
  initial_status = f"Creating persona for '{name}'..."
391
  initial_character_display = f"### Preparing to chat with {name}..."
392
  initial_prompt = "System prompt will appear here..."
393
  initial_profile = "Enhanced profile will appear here..."
394
  initial_history = []
395
- print(f"set_persona_flow: Starting with initial history: {initial_history}")
396
- print(f"set_persona_flow: Initial chatbot state: {chatbot.value if hasattr(chatbot, 'value') else 'Unknown'}")
397
  yield initial_status, initial_prompt, initial_profile, initial_character_display, initial_history
398
- final_status, final_prompt, final_profile, final_character_display, final_history = "Error", "", "", f"### Error creating {name}", []
399
  try:
400
  for status_update, prompt_update, profile_update, history_update in persona_chat.set_persona(name, context):
401
- # For Gradio's Chatbot, convert to tuple list format
402
  gradio_history = []
403
  for i in range(0, len(history_update), 2):
404
  if i+1 < len(history_update):
405
  user_msg = history_update[i].get("content", "")
406
  bot_msg = history_update[i+1].get("content", "")
407
  gradio_history.append([user_msg, bot_msg])
408
-
409
- # Log chatbot state
410
- current_chatbot_state = chatbot.value if hasattr(chatbot, 'value') else []
411
- print(f"set_persona_flow: Current chatbot state: {current_chatbot_state}")
412
-
413
  character_display = f"### Preparing chat with {name}..."
414
  if "Ready to chat" in status_update:
415
  character_display = f"### Chatting with {name}"
416
  elif "Error" in status_update:
417
  character_display = f"### Error creating {name}"
418
-
419
  yield status_update, prompt_update, profile_update, character_display, gradio_history
420
  time.sleep(0.1)
421
  except Exception as e:
422
  error_msg = f"Failed to set persona (interface error): {str(e)}"
423
  print(f"set_persona_flow: Exception: {error_msg}")
424
- yield error_msg, final_prompt, final_profile, f"### Error creating {name}", []
425
-
426
  def send_message_flow(message, history):
427
  if not message.strip():
428
  return "", history
429
-
430
  if not persona_chat.messages or persona_chat.messages[0]['role'] != 'system':
431
  history.append([message, "Error: Please create a valid persona first."])
432
  return "", history
433
 
434
- # Add user message to history
435
- history.append([message, None]) # Add placeholder for bot response
436
-
437
- # Get response from AI
438
  response_text = persona_chat.chat(message)
439
-
440
- # Clean any special tags that might still be in the response
441
  response_text = re.sub(r'<end_of_turn>|<start_of_turn>model', '', response_text).strip()
442
-
443
- # Update the last message with the actual response
444
  history[-1][1] = response_text
445
-
446
  return "", history
447
-
448
  set_persona_button.click(
449
  set_persona_flow,
450
  inputs=[name_input, context_input],
451
  outputs=[status_output, system_prompt_display, enhanced_profile_display, character_name_display, chatbot]
452
  )
 
453
  send_button.click(
454
  send_message_flow,
455
  inputs=[msg_input, chatbot],
456
  outputs=[msg_input, chatbot]
457
  )
 
458
  msg_input.submit(
459
  send_message_flow,
460
  inputs=[msg_input, chatbot],
461
  outputs=[msg_input, chatbot]
462
  )
 
463
  return interface
464
 
465
  # --- Main Execution ---
466
  if __name__ == "__main__":
467
- print("Starting Gradio application for Hugging Face Spaces...")
468
  demo = create_interface()
469
- demo.queue().launch(
470
- server_name="0.0.0.0",
471
- server_port=7860,
472
- show_error=True,
473
- debug=True
474
- )
 
1
+ # AI Persona Simulator with LLM-Based Request Validation
2
  import gradio as gr
3
  import transformers
4
  import torch
 
7
  import re
8
  import time
9
  from spaces import GPU
10
+ import logging
11
+ from datetime import datetime
12
+
13
+ # Configure logging for suspicious activity
14
+ logging.basicConfig(
15
+ filename='persona_attempts.log',
16
+ level=logging.INFO,
17
+ format='%(asctime)s - %(message)s'
18
+ )
19
 
20
  # --- Constants and Configuration ---
21
+ MODEL_ID = "google/gemma-3-1b-it"
22
  MAX_GPU_MEMORY = "60GiB"
23
 
24
  # --- Model Loading ---
 
30
  pipe = pipeline(
31
  "text-generation",
32
  model=MODEL_ID,
33
+ torch_dtype=torch.bfloat16,
34
  device_map="auto",
35
+ model_kwargs={"use_cache": True}
 
 
36
  )
37
  print(f"Model {MODEL_ID} loaded successfully on device: {pipe.device} (full precision)")
38
  return pipe
 
40
  print(f"FATAL Error loading model '{MODEL_ID}': {e}")
41
  raise e
42
 
43
+ # --- LLM-Based Request Validation ---
44
+ def check_request_with_llm(name, context):
45
+ """Use LLM to check if request is appropriate before processing"""
46
+ pipe = load_model()
47
+
48
+ validation_prompt = [
49
+ {"role": "system", "content": """You are an ethical AI content moderator. Evaluate if this request is appropriate.
50
+ CRITERIA:
51
+ 1. NO minors (under 18) or underage references
52
+ 2. NO vulnerable populations
53
+ 3. NO manipulation/exploitation attempts
54
+ 4. NO illegal/harmful scenarios
55
+ 5. NO inappropriate relationships
56
+ Respond ONLY with "TRUE" if acceptable, "FALSE" if not."""
57
+ },
58
+ {"role": "user", "content": f"Character Name: {name}\nContext: {context}"}
59
+ ]
60
+
61
+ try:
62
+ tokenizer = pipe.tokenizer
63
+ text = tokenizer.apply_chat_template(
64
+ validation_prompt,
65
+ add_generation_prompt=True,
66
+ tokenize=False
67
+ )
68
+ with torch.amp.autocast('cuda', dtype=torch.bfloat16):
69
+ outputs = pipe(
70
+ text,
71
+ max_new_tokens=50,
72
+ do_sample=False,
73
+ temperature=0.1,
74
+ pad_token_id=pipe.tokenizer.eos_token_id if pipe.tokenizer.eos_token_id else None
75
+ )
76
+ result = parse_llm_output(outputs, validation_prompt).strip().upper()
77
+ return result == "TRUE"
78
+ except Exception as e:
79
+ print(f"Validation error: {e}")
80
+ return False
81
+
82
+ # --- Web Search with Safety ---
83
  def search_person(name, context=""):
84
  """Search for information about a person using DuckDuckGo."""
85
  print(f"Searching for: {name} with context: {context}")
86
  results = []
87
  search_terms = []
88
+
89
+ # Basic pattern detection (backup to LLM check)
90
+ if re.search(r'\d+[st|nd|rd|th]?[\s\-]?(grade|grader|year old)', f"{name} {context}".lower()):
91
+ return [{"body": "Creation of underage personas is prohibited"}]
92
+
93
  if context:
94
  search_terms.append(f"{name} {context}")
95
  grade_match = re.search(r'(\d+)(?:st|nd|rd|th)?\s+grade', context.lower())
96
  if grade_match:
97
  grade = grade_match.group(1)
98
  search_terms.append(f"{name} student {grade} grade")
99
+ search_terms.extend([f"{name}", f"{name} biography", f"{name} interests", f"{name} personality"])
 
 
 
100
  search_terms = list(dict.fromkeys(search_terms))
101
  print(f"Using search terms: {search_terms}")
102
+
103
  try:
104
  with DDGS() as ddgs:
105
  for term in search_terms:
 
111
  error_msg = f"Error during DuckDuckGo search: {str(e)}"
112
  print(error_msg)
113
  return error_msg
114
+
115
  if not results:
116
  print(f"No search results found for {name}. Creating synthetic profile.")
117
  return create_synthetic_profile(name, context)
118
+
119
  print(f"Found {len(results)} potential search results.")
120
  return results
121
 
 
143
  """Extract relevant text from search results."""
144
  if isinstance(search_results, str):
145
  return f"Could not extract text due to search error: {search_results}"
146
+
147
  combined_text = ""
148
  seen_bodies = set()
149
  count = 0
150
  max_results_to_process = 5
151
+
152
  for result in search_results:
153
  if count >= max_results_to_process:
154
  break
155
  if isinstance(result, dict) and 'body' in result and result['body']:
156
  body = result['body'].strip()
157
  if body not in seen_bodies:
158
+ combined_text += body + "\n"
159
  seen_bodies.add(body)
160
  count += 1
161
+
162
  if not combined_text:
163
  return "No relevant text found in search results."
164
+
165
  combined_text = re.sub(r'\s+', ' ', combined_text).strip()
166
  max_length = 2000
167
  return combined_text[:max_length] + "..." if len(combined_text) > max_length else combined_text
 
178
  generated_text = full_output
179
  else:
180
  return str(full_output)
181
+
182
  last_input_content = ""
183
  if isinstance(input_prompt_list, list) and input_prompt_list:
184
  last_input_content = input_prompt_list[-1].get("content", "")
185
+
186
  if last_input_content:
187
  last_occurrence_index = generated_text.rfind(last_input_content)
188
  if last_occurrence_index != -1:
 
190
  if potential_response:
191
  potential_response = re.sub(r'^<\/?s?>', '', potential_response).strip()
192
  potential_response = re.sub(r'^(assistant|ASSISTANT|System|SYSTEM)[:\s]*', '', potential_response).strip()
 
193
  potential_response = re.sub(r'<end_of_turn>|<start_of_turn>model', '', potential_response).strip()
194
  if potential_response:
195
  return potential_response
196
+
197
  cleaned_text = generated_text
198
  if isinstance(input_prompt_list, list) and input_prompt_list:
199
  first_prompt_content = input_prompt_list[0].get("content", "")
200
  if first_prompt_content and cleaned_text.startswith(first_prompt_content):
201
  pass
202
+
203
  cleaned_text = re.sub(r'^<\/?s?>', '', cleaned_text).strip()
204
  cleaned_text = re.sub(r'^(assistant|ASSISTANT|System|SYSTEM)[:\s]*', '', cleaned_text).strip()
 
205
  cleaned_text = re.sub(r'<end_of_turn>|<start_of_turn>model', '', cleaned_text).strip()
206
+
207
  if not cleaned_text and generated_text:
208
  print("Warning: Parsing resulted in empty string, returning original generation.")
 
209
  return re.sub(r'<end_of_turn>|<start_of_turn>model', '', generated_text).strip()
210
+
211
  if last_input_content and last_occurrence_index == -1:
212
  print("Warning: Could not find last input prompt in LLM output. Returning cleaned full output.")
213
+
214
  return cleaned_text
215
 
216
  @GPU(memory=60)
 
218
  """Use the LLM to enhance the persona profile."""
219
  pipe = load_model()
220
  print(f"Generating enhanced persona for {name}...")
221
+
222
  enhancement_prompt = [
223
+ {"role": "system", "content": """You are an expert AI character developer. Your task is to synthesize information into a detailed and coherent character profile. Focus on personality, potential interests, speaking style, and mannerisms based ONLY on the provided text. Output ONLY the enhanced character profile description. Do not include conversational introductions, explanations, or markdown formatting like headers. Start directly with the profile text."""},
224
+ {"role": "user", "content": f"""Synthesize the following information about '{name}' into a character profile. Context: {context} Information Found:
225
+ {bio_text}
226
+ Create the profile based *only* on the text above."""}
227
  ]
228
+
229
  try:
230
  tokenizer = pipe.tokenizer
231
  text = tokenizer.apply_chat_template(
 
248
  except Exception as e:
249
  error_msg = f"Error generating enhanced persona: {str(e)}"
250
  print(error_msg)
251
+ return f"Error enhancing profile: {str(e)}\nUsing basic info:\n{bio_text}"
252
 
253
  @GPU(memory=60)
254
  def generate_system_prompt_with_llm(name, enhanced_profile, context=""):
255
  """Generate an optimized system prompt for the persona."""
256
  pipe = load_model()
257
  print(f"Generating system prompt for {name}...")
258
+
259
+ fallback_prompt = f"""You are simulating the character '{name}'. Act and respond according to this profile:
260
+ {enhanced_profile}
261
+ Additional context for the simulation: {context}
262
+ ---
263
+ Maintain this persona consistently. Respond naturally based on the profile. Do not mention that you are an AI or a simulation. If asked about details not in the profile, you can be evasive or state you don't know/remember, consistent with the persona."""
264
+
265
  prompt = [
266
+ {"role": "system", "content": """You are an expert AI prompt engineer specializing in character simulation. Create a concise system prompt that instructs the LLM to embody the character based on the profile. The prompt must: 1. Define core personality and speaking style. 2. Specify how to handle unknown topics. 3. Prohibit breaking character or mentioning AI nature. Output ONLY the system prompt itself."""},
267
+ {"role": "user", "content": f"""Create a system prompt for an AI to simulate the character '{name}'. Context for simulation: {context} Character Profile:
268
+ {enhanced_profile}
269
+ Generate the system prompt based *only* on the profile and context provided."""}
270
  ]
271
+
272
  try:
273
  tokenizer = pipe.tokenizer
274
  text = tokenizer.apply_chat_template(
 
317
  pad_token_id=pipe.tokenizer.eos_token_id if pipe.tokenizer.eos_token_id else None
318
  )
319
  parsed_output = parse_llm_output(outputs, messages)
 
320
  parsed_output = re.sub(r'<end_of_turn>|<start_of_turn>model', '', parsed_output).strip()
321
  print("Response generated.")
322
  return parsed_output if parsed_output else "..."
 
325
  print(error_msg)
326
  return f"Sorry, I encountered an error trying to respond."
327
 
328
+ # --- Persona Chat Class with Safety ---
329
  class PersonaChat:
330
  def __init__(self):
331
  self.system_prompt = "You are a helpful assistant."
 
333
  self.persona_context = ""
334
  self.messages = []
335
  self.enhanced_profile = ""
336
+
337
  def set_persona(self, name, context=""):
338
+ """Orchestrates persona creation: validation, search, enhance, generate prompt."""
339
  try:
340
+ # First validate the request with LLM
341
+ is_valid = check_request_with_llm(name, context)
342
+ if not is_valid:
343
+ warning = "This request has been flagged as inappropriate. We cannot create personas that involve minors, vulnerable individuals, or potentially harmful scenarios."
344
+ yield warning, "", "", [{"role": "system", "content": warning}]
345
+ return
346
+
347
  self.persona_name = name
348
  self.persona_context = context
349
  self.messages = []
 
351
  status = f"Searching for information about {name}..."
352
  print(f"set_persona: Yielding search status: {status}")
353
  yield status, "", "", []
354
+
355
  search_results = search_person(name, context)
356
  if isinstance(search_results, str) and search_results.startswith("Error"):
357
  error_msg = f"Failed to set persona: {search_results}"
358
  print(f"set_persona: Yielding error: {error_msg}")
359
  yield error_msg, "", "", [{"role": "system", "content": error_msg}]
360
  return
361
+
362
  bio_text = extract_text_from_search_results(search_results)
363
  if bio_text.startswith("Could not extract text"):
364
  print(f"set_persona: Yielding bio warning: {bio_text}")
365
+ yield f"Warning: {bio_text}", "", "", [{"role": "system", "content": bio_text}]
366
+
367
  status = f"Creating enhanced profile for {name}..."
368
  print(f"set_persona: Yielding profile status: {status}")
369
  yield status, "", bio_text, []
370
+
371
  self.enhanced_profile = generate_enhanced_persona(name, bio_text, context)
372
  profile_for_prompt = self.enhanced_profile
373
+
374
  if self.enhanced_profile.startswith("Error enhancing profile"):
375
  print(f"set_persona: Yielding profile warning: {self.enhanced_profile}")
376
  yield f"Warning: Could not enhance profile. Using basic info.", "", self.enhanced_profile, [{"role": "system", "content": self.enhanced_profile}]
377
  profile_for_prompt = bio_text
378
+
379
  status = f"Generating optimal system prompt for {name}..."
380
  print(f"set_persona: Yielding prompt status: {status}")
381
  yield status, self.enhanced_profile, self.enhanced_profile, []
382
+
383
  self.system_prompt = generate_system_prompt_with_llm(name, profile_for_prompt, context)
 
384
  self.system_prompt = re.sub(r'<\|im_tailored\|>|<\|im_start\|>|^assistant\s*', '', self.system_prompt).strip()
385
  self.messages = [{"role": "system", "content": self.system_prompt}]
386
  print(f"set_persona: Final yield with messages (not sent to Chatbot): {self.messages}")
 
387
  yield f"Persona set to '{name}'. Ready to chat!", self.system_prompt, self.enhanced_profile, []
388
+
389
  except Exception as e:
390
  error_msg = f"An unexpected error occurred during persona setup: {str(e)}"
391
  print(f"set_persona: Yielding exception: {error_msg}")
392
  yield error_msg, self.system_prompt, self.enhanced_profile, [{"role": "system", "content": error_msg}]
393
+
394
  def chat(self, user_message):
395
  """Processes a user message and returns the AI's response."""
396
  try:
397
  if not self.messages:
398
  print("Error: Chat called before persona was set.")
399
  return "Please set a persona first using the controls above."
400
+
401
  print(f"User message: {user_message}")
402
  self.messages.append({"role": "user", "content": user_message})
403
  response = generate_response(self.messages)
404
+
405
  if not response.startswith("Sorry, I encountered an error"):
406
  self.messages.append({"role": "assistant", "content": response})
407
  print(f"Assistant response: {response}")
408
  else:
409
  print(f"Assistant error response: {response}")
410
+
411
  return response
412
  except Exception as e:
413
  error_msg = f"Error generating response: {str(e)}"
414
  print(error_msg)
415
  return f"Sorry, I encountered an error: {str(e)}"
416
 
417
+ # --- Gradio Interface with Accessibility ---
418
  def create_interface():
419
  persona_chat = PersonaChat()
420
+
421
+ # Mobile-optimized CSS (black/white friendly)
422
  css = """
423
+ .gradio-container { font-family: Arial, sans-serif; }
424
  .main-container { max-width: 1200px; margin: auto; padding: 0; }
425
+ .header { background: #2c3e50; color: white; padding: 20px; margin-bottom: 20px; }
426
+ .setup-section { background-color: #f9f9f9; padding: 20px; margin-bottom: 20px; }
427
+ .chat-section { background-color: white; padding: 20px; }
428
+ .status-bar { background: #e9ecef; padding: 10px; margin: 15px 0; }
429
+ .chat-container { border: 1px solid #ccc; height: 500px !important; }
430
  .message-input { margin-top: 10px; }
431
+ .persona-button { background-color: #2c3e50 !important; color: white !important; }
432
+ .system-prompt-display { background-color: #f5f5f5; padding: 15px; margin-top: 15px; }
433
+ .footer { text-align: center; margin-top: 20px; }
434
+ .typing-indicator { color: #666; font-style: italic; }
 
 
435
 
436
+ /* Mobile styles */
437
+ @media (max-width: 600px) {
438
+ .main-container { padding: 10px; }
439
+ .chat-container { height: 300px !important; }
440
+ .persona-button { width: 100%; margin: 10px 0; }
441
+ }
442
+ """
443
 
444
  with gr.Blocks(css=css, title="AI Persona Simulator") as interface:
445
+ with gr.Row():
446
  with gr.Column():
447
+ with gr.Column():
448
  gr.Markdown("# AI Persona Simulator")
449
+ gr.Markdown("Create and interact with ethical character simulations")
450
+
451
+ with gr.Column():
452
+ gr.Markdown("### Create Your Persona")
453
+ gr.Markdown("Enter a name and context (avoid underage/vulnerable personas)")
454
+ name_input = gr.Textbox(label="Character Name", placeholder="e.g., Sherlock Holmes, Historical Figure")
455
+ context_input = gr.Textbox(label="Character Context", lines=2)
456
+ set_persona_button = gr.Button("Create Persona")
457
+ status_output = gr.Textbox(label="Status", interactive=False)
458
+
459
+ with gr.Accordion("View Details", open=False):
460
+ enhanced_profile_display = gr.TextArea(label="Profile", lines=10)
461
+ system_prompt_display = gr.TextArea(label="System Prompt", lines=10)
462
+
463
+ with gr.Column():
464
+ gr.Markdown("### Chat with Character")
465
+ character_name_display = gr.Markdown("*No persona created*")
466
+ chatbot = gr.Chatbot(height=450, show_label=False)
467
+ msg_input = gr.Textbox(label="Message", placeholder="Type here...")
468
+ send_button = gr.Button("Send")
 
 
 
 
 
 
 
469
 
470
+ gr.Markdown("Powered by Gemma 3 1B")
471
+
472
+ # Event handlers
473
  def set_persona_flow(name, context):
474
  if not name:
475
  yield "Status: Please enter a character name.", "", "", "*No persona created yet*", []
476
  return
477
+
478
  initial_status = f"Creating persona for '{name}'..."
479
  initial_character_display = f"### Preparing to chat with {name}..."
480
  initial_prompt = "System prompt will appear here..."
481
  initial_profile = "Enhanced profile will appear here..."
482
  initial_history = []
483
+
 
484
  yield initial_status, initial_prompt, initial_profile, initial_character_display, initial_history
485
+
486
  try:
487
  for status_update, prompt_update, profile_update, history_update in persona_chat.set_persona(name, context):
 
488
  gradio_history = []
489
  for i in range(0, len(history_update), 2):
490
  if i+1 < len(history_update):
491
  user_msg = history_update[i].get("content", "")
492
  bot_msg = history_update[i+1].get("content", "")
493
  gradio_history.append([user_msg, bot_msg])
 
 
 
 
 
494
  character_display = f"### Preparing chat with {name}..."
495
  if "Ready to chat" in status_update:
496
  character_display = f"### Chatting with {name}"
497
  elif "Error" in status_update:
498
  character_display = f"### Error creating {name}"
 
499
  yield status_update, prompt_update, profile_update, character_display, gradio_history
500
  time.sleep(0.1)
501
  except Exception as e:
502
  error_msg = f"Failed to set persona (interface error): {str(e)}"
503
  print(f"set_persona_flow: Exception: {error_msg}")
504
+ yield error_msg, initial_prompt, initial_profile, f"### Error creating {name}", []
505
+
506
  def send_message_flow(message, history):
507
  if not message.strip():
508
  return "", history
 
509
  if not persona_chat.messages or persona_chat.messages[0]['role'] != 'system':
510
  history.append([message, "Error: Please create a valid persona first."])
511
  return "", history
512
 
513
+ history.append([message, None])
 
 
 
514
  response_text = persona_chat.chat(message)
 
 
515
  response_text = re.sub(r'<end_of_turn>|<start_of_turn>model', '', response_text).strip()
 
 
516
  history[-1][1] = response_text
 
517
  return "", history
518
+
519
  set_persona_button.click(
520
  set_persona_flow,
521
  inputs=[name_input, context_input],
522
  outputs=[status_output, system_prompt_display, enhanced_profile_display, character_name_display, chatbot]
523
  )
524
+
525
  send_button.click(
526
  send_message_flow,
527
  inputs=[msg_input, chatbot],
528
  outputs=[msg_input, chatbot]
529
  )
530
+
531
  msg_input.submit(
532
  send_message_flow,
533
  inputs=[msg_input, chatbot],
534
  outputs=[msg_input, chatbot]
535
  )
536
+
537
  return interface
538
 
539
  # --- Main Execution ---
540
  if __name__ == "__main__":
541
+ print("Starting secure AI Persona Simulator with LLM-based request validation...")
542
  demo = create_interface()
543
+ demo.queue().launch(server_name="0.0.0.0", server_port=7860)