Files changed (1) hide show
  1. app.py +65 -45
app.py CHANGED
@@ -1,69 +1,89 @@
1
- from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
2
- import datetime
3
- import requests
4
- import pytz
5
- import yaml
6
  from tools.final_answer import FinalAnswerTool
7
-
8
  from Gradio_UI import GradioUI
9
 
10
- # Below is an example of a tool that does nothing. Amaze us with your creativity !
11
- @tool
12
- def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
13
- #Keep this format for the description / args / args description but feel free to modify the tool
14
- """A tool that does nothing yet
15
- Args:
16
- arg1: the first argument
17
- arg2: the second argument
18
- """
19
- return "What magic will you build ?"
20
 
 
 
 
21
  @tool
22
- def get_current_time_in_timezone(timezone: str) -> str:
23
- """A tool that fetches the current local time in a specified timezone.
24
- Args:
25
- timezone: A string representing a valid timezone (e.g., 'America/New_York').
26
- """
27
  try:
28
- # Create timezone object
29
- tz = pytz.timezone(timezone)
30
- # Get current time in that timezone
31
- local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
32
- return f"The current local time in {timezone} is: {local_time}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  except Exception as e:
34
- return f"Error fetching time for timezone '{timezone}': {str(e)}"
35
 
36
 
37
- final_answer = FinalAnswerTool()
 
 
 
 
 
 
 
 
 
 
38
 
39
- # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
40
- # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
41
 
 
 
 
42
  model = HfApiModel(
43
- max_tokens=2096,
44
- temperature=0.5,
45
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
46
- custom_role_conversions=None,
47
  )
48
 
49
 
50
- # Import tool from Hub
51
- image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
 
 
52
 
53
- with open("prompts.yaml", 'r') as stream:
54
- prompt_templates = yaml.safe_load(stream)
55
-
56
  agent = CodeAgent(
57
  model=model,
58
- tools=[final_answer], ## add your tools here (don't remove final answer)
59
  max_steps=6,
60
  verbosity_level=1,
61
- grammar=None,
62
- planning_interval=None,
63
- name=None,
64
- description=None,
65
  prompt_templates=prompt_templates
66
  )
67
 
68
 
69
- GradioUI(agent).launch()
 
 
 
 
1
+ from smolagents import CodeAgent, HfApiModel, load_tool, tool
 
 
 
 
2
  from tools.final_answer import FinalAnswerTool
 
3
  from Gradio_UI import GradioUI
4
 
5
+ import requests
6
+ import yaml
7
+
 
 
 
 
 
 
 
8
 
9
+ # ----------------------------
10
+ # 1️⃣ OUTIL : MÉTÉO À PARIS
11
+ # ----------------------------
12
  @tool
13
+ def get_weather_paris() -> str:
14
+ """Renvoie la météo actuelle à Paris (température + description)."""
 
 
 
15
  try:
16
+ url = "https://api.open-meteo.com/v1/forecast?latitude=48.8566&longitude=2.3522&current_weather=true"
17
+ response = requests.get(url).json()
18
+
19
+ weather = response["current_weather"]
20
+ temperature = weather["temperature"]
21
+ weather_code = weather["weathercode"]
22
+
23
+ descriptions = {
24
+ 0: "ciel clair",
25
+ 1: "principalement clair",
26
+ 2: "partiellement nuageux",
27
+ 3: "couvert",
28
+ 45: "brouillard",
29
+ 48: "brouillard givrant",
30
+ 51: "bruine légère",
31
+ 53: "bruine",
32
+ 55: "bruine forte",
33
+ 61: "pluie légère",
34
+ 63: "pluie",
35
+ 65: "pluie forte",
36
+ 71: "neige légère",
37
+ 73: "neige",
38
+ 75: "neige forte",
39
+ }
40
+
41
+ description = descriptions.get(weather_code, "conditions météo inconnues")
42
+
43
+ return f"À Paris, il fait {temperature}°C avec un {description}."
44
+
45
  except Exception as e:
46
+ return f"Erreur lors de la récupération de la météo : {str(e)}"
47
 
48
 
49
+ # ---------------------------------------------------
50
+ # 2️⃣ OUTIL : GÉNÉRATION D’IMAGE
51
+ # ---------------------------------------------------
52
+ image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
53
+
54
+
55
+ # ---------------------------------
56
+ # 3️⃣ CHARGEMENT DU PROMPT SYSTÈME
57
+ # ---------------------------------
58
+ with open("prompts.yaml", "r") as stream:
59
+ prompt_templates = yaml.safe_load(stream)
60
 
 
 
61
 
62
+ # -------------------------
63
+ # 4️⃣ MODÈLE UTILISÉ
64
+ # -------------------------
65
  model = HfApiModel(
66
+ model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
67
+ max_tokens=2048,
68
+ temperature=0.5,
 
69
  )
70
 
71
 
72
+ # -------------------------
73
+ # 5️⃣ CRÉATION DE L’AGENT
74
+ # -------------------------
75
+ final_answer = FinalAnswerTool()
76
 
 
 
 
77
  agent = CodeAgent(
78
  model=model,
79
+ tools=[final_answer, get_weather_paris, image_generation_tool],
80
  max_steps=6,
81
  verbosity_level=1,
 
 
 
 
82
  prompt_templates=prompt_templates
83
  )
84
 
85
 
86
+ # -------------------------
87
+ # 6️⃣ LANCEMENT DE GRADIO
88
+ # -------------------------
89
+ GradioUI(agent).launch()