File size: 6,259 Bytes
9b5b26a
 
 
 
c19d193
6aae614
17bf943
8fe992b
9b5b26a
 
5df72d6
17bf943
 
 
 
 
 
 
 
9b5b26a
8b1b935
9b5b26a
17bf943
9b5b26a
17bf943
 
9b5b26a
17bf943
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b5b26a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8c01ffb
 
6aae614
ae7a494
 
 
 
e121372
bf6d34c
 
29ec968
fe328e0
13d500a
8c01ffb
 
9b5b26a
 
8c01ffb
861422e
 
9b5b26a
8c01ffb
8fe992b
17bf943
8c01ffb
 
 
 
 
 
861422e
8fe992b
 
9b5b26a
8c01ffb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from transformers import pipeline  # For local NLP analysis

from Gradio_UI import GradioUI

# Below is an example of a tool that does nothing. Amaze us with your creativity !

# Tools and model already present in the environment
search_tool = DuckDuckGoSearchTool()

# Set up a local NLP pipeline with Hugging Face for text analysis
sentiment_analyzer = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
topic_classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")

@tool
def my_custom_tool(x_username: str, days_in_past:int)-> str: #it's import to specify the return type
    #Keep this format for the description / args / args description but feel free to modify the tool
    """A tool that creates a fictional psychological portrait based on an X user's recent activity using Hugging Face tools.
    Args:
        x_username: The X username to analyze (e.g., 'elonmusk')
        days_in_past: Number of days in the past to analyze (max 30)
    """
    # Check if the number of days is within acceptable range
    if days_in_past < 1 or days_in_past > 30:
        return "Please choose a number of days between 1 and 30."

    # Calculate the time range
    current_date = datetime.datetime.now()
    start_date = current_date - datetime.timedelta(days=days_in_past)
    date_range = f"from {start_date.strftime('%Y-%m-%d')} to {current_date.strftime('%Y-%m-%d')}"

    # Analyze available data using adapted tools
    posts_data = analyze_x_activity_with_hf(x_username, days_in_past)
    if not posts_data or not posts_data.get("content"):
        return f"No recent activity data found for @{x_username} in the last {days_in_past} days."

    # Generate the psychological portrait
    portrait = craft_psychological_portrait(x_username, posts_data, date_range)
    return portrait

def analyze_x_activity_with_hf(username: str, days: int) -> dict:
    """Use Hugging Face-compatible tools to analyze X activity."""
    # Search via DuckDuckGo to simulate posts (no direct X access)
    query = f"site:x.com {username} -inurl:(signup | login)"
    try:
        search_results = search_tool(query)
        content = " ".join([result["snippet"] for result in search_results[:5] if result.get("snippet")])
    except Exception:
        content = f"Recent activity by {username}"  # Fallback if search fails

    # Return empty dict if no content is found
    if not content:
        return {}

    # Analyze tone with DistilBERT
    sentiment = sentiment_analyzer(content[:512])[0]  # Limit to 512 tokens
    tone = "positive" if sentiment["label"] == "POSITIVE" else "negative" if sentiment["label"] == "NEGATIVE" else "neutral"

    # Extract themes with zero-shot classification
    candidate_labels = ["tech", "politics", "humor", "science", "personal", "nature","philosophy"]
    theme_result = topic_classifier(content[:512], candidate_labels, multi_label=False)
    top_themes = [label for label, score in zip(theme_result["labels"], theme_result["scores"]) if score > 0.5][:2]
    if not top_themes:
        top_themes = [theme_result["labels"][0]]  # Take the most probable if nothing above 0.5

    # Count words
    word_count = len(content.split())

    return {
        "content": content,
        "tone": tone,
        "themes": top_themes,
        "word_count": word_count
    }

def craft_psychological_portrait(username: str, posts_data: dict, date_range: str) -> str:
    """Helper function to craft a fictional psychological portrait."""
    tone = posts_data["tone"]
    themes = " and ".join(posts_data["themes"])
    word_count = posts_data["word_count"]

    # Generate a creative description based on tone
    if tone == "positive":
        intro = f"@{username}, over {date_range}, emerges as a radiant soul, gazing at the world with unyielding hope."
        trait = f"Your words weave {themes} into a tapestry of possibility, each of your {word_count} words a spark of light."
    elif tone == "negative":
        intro = f"@{username}, across {date_range}, walks a quiet path, shadowed by gentle sorrow."
        trait = f"In {themes}, your {word_count} words murmur like rain, painting a world both tender and lost."
    else:  # neutral or other
        intro = f"@{username}, within {date_range}, stands as an explorer of the unknown, eyes wide with wonder."
        trait = f"Your {word_count} words chase {themes}, each a question unfurling toward the infinite."

    return f"{intro} {trait}"

@tool
def get_current_time_in_timezone(timezone: str) -> str:
    """A tool that fetches the current local time in a specified timezone.
    Args:
        timezone: A string representing a valid timezone (e.g., 'America/New_York').
    """
    try:
        # Create timezone object
        tz = pytz.timezone(timezone)
        # Get current time in that timezone
        local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
        return f"The current local time in {timezone} is: {local_time}"
    except Exception as e:
        return f"Error fetching time for timezone '{timezone}': {str(e)}"


final_answer = FinalAnswerTool()

# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' 

model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
custom_role_conversions=None,
)


# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)

with open("prompts.yaml", 'r') as stream:
    prompt_templates = yaml.safe_load(stream)
    
agent = CodeAgent(
    model=model,
    tools=[final_answer,my_custom_tool, search_tool], # Add compatible HF tools
    max_steps=6,
    verbosity_level=1,
    grammar=None,
    planning_interval=None,
    name=None,
    description=None,
    prompt_templates=prompt_templates
)


GradioUI(agent).launch()