codelion commited on
Commit
a6a987d
Β·
verified Β·
1 Parent(s): b48eba1

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -9
app.py CHANGED
@@ -648,12 +648,34 @@ def create_config_file(model: str, work_dir: str):
648
  # Create custom system template for PROMPT optimization (not code)
649
  system_template = """You are an expert prompt engineer tasked with iteratively improving prompts for language models.
650
  Your job is to analyze the current prompt and suggest improvements based on performance feedback.
651
- Focus on making the prompt clearer, more specific, and more effective at achieving its goal.
652
- Consider:
653
- - Clarity and specificity of instructions
654
- - Examples and demonstrations that guide the model
655
- - Formatting that makes the prompt easier to follow
656
- - Edge cases and error handling in the instructions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
657
  """
658
 
659
  with open(os.path.join(templates_dir, "system_message.txt"), "w") as f:
@@ -676,11 +698,19 @@ Consider:
676
 
677
  # Task
678
  Rewrite the prompt above to improve its performance on the specified metrics.
 
 
 
 
 
 
 
 
679
  Provide a complete new version of the prompt that:
680
  1. Maintains the same input/output format (keep placeholders like {{input}}, {{text}}, etc.)
681
- 2. Improves clarity and effectiveness
682
- 3. Adds helpful examples or instructions if beneficial
683
- 4. Is more likely to get correct results
684
 
685
  Output ONLY the new prompt text between ```text markers:
686
 
 
648
  # Create custom system template for PROMPT optimization (not code)
649
  system_template = """You are an expert prompt engineer tasked with iteratively improving prompts for language models.
650
  Your job is to analyze the current prompt and suggest improvements based on performance feedback.
651
+
652
+ CRITICAL RULES:
653
+ 1. Keep prompts BRIEF and DIRECT - shorter is usually better
654
+ 2. Preserve the EXACT output format that the evaluation expects
655
+ 3. Do NOT make prompts conversational or verbose
656
+ 4. Do NOT ask for explanations - just ask for the answer
657
+ 5. Maintain all placeholder variables like {input}, {text}, etc.
658
+ 6. Focus on clarity and directness, not linguistic elegance
659
+ 7. Avoid prompts that might cause the model to discuss multiple possibilities
660
+
661
+ For classification tasks:
662
+ - Ask for direct classification (e.g., "The sentiment is positive")
663
+ - Avoid asking "what", "why", or "explain" - just ask for the label
664
+ - Ensure the response will include the label word (positive/negative/neutral)
665
+ - Keep prompts short enough that responses stay focused
666
+ - IMPORTANT: The prompt should naturally cause the model to echo the task type in its response
667
+ (e.g., if classifying sentiment, the response should include the word "sentiment")
668
+
669
+ Good examples for sentiment:
670
+ - "Review sentiment {input}" β†’ model responds "The sentiment is positive"
671
+ - "Classify sentiment: {input}" β†’ model responds "Sentiment: positive"
672
+ - "Determine the sentiment of: {input}" β†’ model responds "The sentiment is negative"
673
+
674
+ Bad examples for sentiment:
675
+ - "Is this positive or negative: {input}" β†’ model might respond just "Positive" (missing "sentiment" keyword)
676
+ - "Classify: {input}" β†’ too vague, unclear what to classify
677
+ - "What sentiment: {input}" β†’ conversational, might get verbose response
678
+ - "Analyze the following text and provide a detailed explanation of its sentiment: {input}" β†’ way too verbose
679
  """
680
 
681
  with open(os.path.join(templates_dir, "system_message.txt"), "w") as f:
 
698
 
699
  # Task
700
  Rewrite the prompt above to improve its performance on the specified metrics.
701
+
702
+ REMEMBER:
703
+ - SHORTER is usually BETTER - avoid adding unnecessary words
704
+ - Keep the EXACT same output format (especially placeholder variables like {{input}})
705
+ - Focus on DIRECTNESS - what's the clearest way to ask for what we need?
706
+ - Avoid conversational language that might confuse the model
707
+ - For classification: ask directly for the label, don't ask for explanations
708
+
709
  Provide a complete new version of the prompt that:
710
  1. Maintains the same input/output format (keep placeholders like {{input}}, {{text}}, etc.)
711
+ 2. Is brief and direct
712
+ 3. Clearly asks for the classification/answer without asking for reasoning
713
+ 4. Will cause the model to output the label word in its response
714
 
715
  Output ONLY the new prompt text between ```text markers:
716