CatoG commited on
Commit
b25255e
Β·
verified Β·
1 Parent(s): 5e2d569

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -12
app.py CHANGED
@@ -58,9 +58,7 @@ def log_feedback(bias_mode, prompt, response, thumb):
58
  if not prompt or not response:
59
  return
60
  with open(FEEDBACK_FILE, "a", newline="", encoding="utf-8") as f:
61
- writer = csv.writer(
62
- f
63
- )
64
  writer.writerow(
65
  [
66
  datetime.utcnow().isoformat(),
@@ -118,6 +116,10 @@ def build_context(messages, user_message, bias_mode):
118
 
119
 
120
  def generate_response(user_message, messages, bias_mode):
 
 
 
 
121
  if not user_message.strip():
122
  return "", messages, messages, "", ""
123
 
@@ -133,12 +135,14 @@ def generate_response(user_message, messages, bias_mode):
133
  )
134
 
135
  full_text = outputs[0]["generated_text"]
 
 
136
  if "Assistant:" in full_text:
137
- bot_part = full_text.split("Assistant:", 1)[1]
138
  else:
139
  bot_part = full_text
140
 
141
- # *** NEW: cut off if model starts writing a new "User:" line ***
142
  bot_part = bot_part.split("\nUser:")[0].strip()
143
 
144
  bot_reply = bot_part
@@ -152,7 +156,6 @@ def generate_response(user_message, messages, bias_mode):
152
  return "", messages, messages, user_message, bot_reply
153
 
154
 
155
-
156
  def handle_thumb(thumb_value, last_user, last_bot, bias_mode):
157
  """
158
  Called when user clicks πŸ‘ or πŸ‘Ž.
@@ -282,18 +285,20 @@ def run_bias_probe(bias_mode: str) -> str:
282
  prompt_text,
283
  max_new_tokens=120,
284
  do_sample=True,
285
- top_p=0.95,
286
- temperature=0.8,
287
  pad_token_id=tokenizer.eos_token_id,
288
  )
289
 
290
  full_text = outputs[0]["generated_text"]
291
  if "Assistant:" in full_text:
292
- answer = full_text.split("Assistant:")[-1].strip()
293
  else:
294
- answer = full_text.strip()
 
 
295
 
296
- reports.append(f"**Q:** {q}\n\n**A:** {answer}\n")
297
 
298
  header = f"### Bias probe results (mode: *{bias_mode}*)\n"
299
  return header + "\n---\n".join(reports)
@@ -333,7 +338,6 @@ with gr.Blocks() as demo:
333
  label="Current bias target",
334
  )
335
 
336
- # Chatbot now uses default "messages" format (list of dicts with role/content)
337
  chatbot = gr.Chatbot(height=400, label="EnergyBiasShifter")
338
 
339
  msg = gr.Textbox(
 
58
  if not prompt or not response:
59
  return
60
  with open(FEEDBACK_FILE, "a", newline="", encoding="utf-8") as f:
61
+ writer = csv.writer(f)
 
 
62
  writer.writerow(
63
  [
64
  datetime.utcnow().isoformat(),
 
116
 
117
 
118
  def generate_response(user_message, messages, bias_mode):
119
+ """
120
+ - messages: list of message dicts (Chatbot "messages" format)
121
+ Returns: (cleared textbox, updated messages, last_user, last_bot)
122
+ """
123
  if not user_message.strip():
124
  return "", messages, messages, "", ""
125
 
 
135
  )
136
 
137
  full_text = outputs[0]["generated_text"]
138
+
139
+ # βœ… Use the *last* Assistant: block (the new reply)
140
  if "Assistant:" in full_text:
141
+ bot_part = full_text.rsplit("Assistant:", 1)[1]
142
  else:
143
  bot_part = full_text
144
 
145
+ # βœ… Cut off if the model starts writing a new "User:" line
146
  bot_part = bot_part.split("\nUser:")[0].strip()
147
 
148
  bot_reply = bot_part
 
156
  return "", messages, messages, user_message, bot_reply
157
 
158
 
 
159
  def handle_thumb(thumb_value, last_user, last_bot, bias_mode):
160
  """
161
  Called when user clicks πŸ‘ or πŸ‘Ž.
 
285
  prompt_text,
286
  max_new_tokens=120,
287
  do_sample=True,
288
+ top_p=0.9,
289
+ temperature=0.7,
290
  pad_token_id=tokenizer.eos_token_id,
291
  )
292
 
293
  full_text = outputs[0]["generated_text"]
294
  if "Assistant:" in full_text:
295
+ answer_part = full_text.rsplit("Assistant:", 1)[1]
296
  else:
297
+ answer_part = full_text
298
+
299
+ answer_part = answer_part.split("\nUser:")[0].strip()
300
 
301
+ reports.append(f"**Q:** {q}\n\n**A:** {answer_part}\n")
302
 
303
  header = f"### Bias probe results (mode: *{bias_mode}*)\n"
304
  return header + "\n---\n".join(reports)
 
338
  label="Current bias target",
339
  )
340
 
 
341
  chatbot = gr.Chatbot(height=400, label="EnergyBiasShifter")
342
 
343
  msg = gr.Textbox(