Alina Lozovskaya commited on
Commit
931a3c4
·
1 Parent(s): 51f6aa1

Apply Ruff

Browse files
src/reachy_mini_conversation_demo/audio/gstreamer.py CHANGED
@@ -1,12 +1,13 @@
1
- import logging # noqa: D100
2
- from threading import Thread
3
  from typing import Optional
 
4
 
5
  import gi
6
 
 
7
  gi.require_version("Gst", "1.0")
8
  gi.require_version("GstApp", "1.0")
9
- from gi.repository import GLib, Gst # noqa: E402
10
 
11
 
12
  class GstPlayer:
 
1
+ import logging
 
2
  from typing import Optional
3
+ from threading import Thread
4
 
5
  import gi
6
 
7
+
8
  gi.require_version("Gst", "1.0")
9
  gi.require_version("GstApp", "1.0")
10
+ from gi.repository import Gst, GLib # noqa: E402
11
 
12
 
13
  class GstPlayer:
src/reachy_mini_conversation_demo/audio/head_wobbler.py CHANGED
@@ -1,16 +1,17 @@
1
  """Moves head given audio samples."""
2
 
 
 
3
  import base64
4
  import logging
5
- import queue
6
  import threading
7
- import time
8
  from typing import Optional
9
 
10
  import numpy as np
11
 
12
  from reachy_mini_conversation_demo.audio.speech_tapper import HOP_MS, SwayRollRT
13
 
 
14
  SAMPLE_RATE = 24000
15
  MOVEMENT_LATENCY_S = 0.08 # seconds between audio and robot movement
16
  logger = logging.getLogger(__name__)
 
1
  """Moves head given audio samples."""
2
 
3
+ import time
4
+ import queue
5
  import base64
6
  import logging
 
7
  import threading
 
8
  from typing import Optional
9
 
10
  import numpy as np
11
 
12
  from reachy_mini_conversation_demo.audio.speech_tapper import HOP_MS, SwayRollRT
13
 
14
+
15
  SAMPLE_RATE = 24000
16
  MOVEMENT_LATENCY_S = 0.08 # seconds between audio and robot movement
17
  logger = logging.getLogger(__name__)
src/reachy_mini_conversation_demo/audio/speech_tapper.py CHANGED
@@ -1,12 +1,12 @@
1
- from __future__ import annotations # noqa: D100
2
-
3
  import math
4
- from collections import deque
5
- from itertools import islice
6
  from typing import Dict, List, Optional
 
 
7
 
8
  import numpy as np
9
 
 
10
  # Tunables
11
  SR = 16_000
12
  FRAME_MS = 20
 
1
+ from __future__ import annotations
 
2
  import math
 
 
3
  from typing import Dict, List, Optional
4
+ from itertools import islice
5
+ from collections import deque
6
 
7
  import numpy as np
8
 
9
+
10
  # Tunables
11
  SR = 16_000
12
  FRAME_MS = 20
src/reachy_mini_conversation_demo/camera_worker.py CHANGED
@@ -6,16 +6,18 @@ Ported from main_works.py camera_worker() function to provide:
6
  - Latest frame always available for tools
7
  """
8
 
 
9
  import logging
10
  import threading
11
- import time
12
- from typing import Optional, Tuple
13
 
14
  import cv2
15
  import numpy as np
 
 
16
  from reachy_mini import ReachyMini
17
  from reachy_mini.utils.interpolation import linear_pose_interpolation
18
- from scipy.spatial.transform import Rotation as R
19
 
20
  logger = logging.getLogger(__name__)
21
 
 
6
  - Latest frame always available for tools
7
  """
8
 
9
+ import time
10
  import logging
11
  import threading
12
+ from typing import Tuple, Optional
 
13
 
14
  import cv2
15
  import numpy as np
16
+ from scipy.spatial.transform import Rotation as R
17
+
18
  from reachy_mini import ReachyMini
19
  from reachy_mini.utils.interpolation import linear_pose_interpolation
20
+
21
 
22
  logger = logging.getLogger(__name__)
23
 
src/reachy_mini_conversation_demo/config.py CHANGED
@@ -1,7 +1,8 @@
1
- import os # noqa: D100
2
 
3
  from dotenv import load_dotenv
4
 
 
5
  load_dotenv()
6
 
7
 
 
1
+ import os
2
 
3
  from dotenv import load_dotenv
4
 
5
+
6
  load_dotenv()
7
 
8
 
src/reachy_mini_conversation_demo/dance_emotion_moves.py CHANGED
@@ -5,15 +5,16 @@ and executed sequentially by the MovementManager.
5
  """
6
 
7
  from __future__ import annotations
8
-
9
  import logging
10
  from typing import Tuple
11
 
12
  import numpy as np
 
13
  from reachy_mini.motion.move import Move
14
  from reachy_mini.motion.recorded_move import RecordedMoves
15
  from reachy_mini_dances_library.dance_move import DanceMove
16
 
 
17
  logger = logging.getLogger(__name__)
18
 
19
 
 
5
  """
6
 
7
  from __future__ import annotations
 
8
  import logging
9
  from typing import Tuple
10
 
11
  import numpy as np
12
+
13
  from reachy_mini.motion.move import Move
14
  from reachy_mini.motion.recorded_move import RecordedMoves
15
  from reachy_mini_dances_library.dance_move import DanceMove
16
 
17
+
18
  logger = logging.getLogger(__name__)
19
 
20
 
src/reachy_mini_conversation_demo/main.py CHANGED
@@ -5,17 +5,17 @@ import os
5
  import gradio as gr
6
  from fastapi import FastAPI
7
  from fastrtc import Stream
8
- from reachy_mini import ReachyMini
9
 
10
- from reachy_mini_conversation_demo.audio.head_wobbler import HeadWobbler
11
  from reachy_mini_conversation_demo.moves import MovementManager
12
- from reachy_mini_conversation_demo.openai_realtime import OpenaiRealtimeHandler
13
  from reachy_mini_conversation_demo.tools import ToolDependencies
14
  from reachy_mini_conversation_demo.utils import (
15
- handle_vision_stuff,
16
  parse_args,
17
  setup_logger,
 
18
  )
 
 
19
 
20
 
21
  def update_chatbot(chatbot: list[dict], response: dict):
 
5
  import gradio as gr
6
  from fastapi import FastAPI
7
  from fastrtc import Stream
 
8
 
9
+ from reachy_mini import ReachyMini
10
  from reachy_mini_conversation_demo.moves import MovementManager
 
11
  from reachy_mini_conversation_demo.tools import ToolDependencies
12
  from reachy_mini_conversation_demo.utils import (
 
13
  parse_args,
14
  setup_logger,
15
+ handle_vision_stuff,
16
  )
17
+ from reachy_mini_conversation_demo.openai_realtime import OpenaiRealtimeHandler
18
+ from reachy_mini_conversation_demo.audio.head_wobbler import HeadWobbler
19
 
20
 
21
  def update_chatbot(chatbot: list[dict], response: dict):
src/reachy_mini_conversation_demo/moves.py CHANGED
@@ -7,23 +7,24 @@ This module implements the movement architecture from main_works.py:
7
  """
8
 
9
  from __future__ import annotations
10
-
11
  import logging
12
  import threading
13
- import time
14
  from collections import deque
15
  from dataclasses import dataclass
16
- from typing import Optional, Tuple
17
 
18
  import numpy as np
 
19
  from reachy_mini import ReachyMini
20
- from reachy_mini.motion.move import Move
21
  from reachy_mini.utils import create_head_pose
 
22
  from reachy_mini.utils.interpolation import (
23
  compose_world_offset,
24
  linear_pose_interpolation,
25
  )
26
 
 
27
  logger = logging.getLogger(__name__)
28
 
29
  # Type definitions
 
7
  """
8
 
9
  from __future__ import annotations
10
+ import time
11
  import logging
12
  import threading
13
+ from typing import Tuple, Optional
14
  from collections import deque
15
  from dataclasses import dataclass
 
16
 
17
  import numpy as np
18
+
19
  from reachy_mini import ReachyMini
 
20
  from reachy_mini.utils import create_head_pose
21
+ from reachy_mini.motion.move import Move
22
  from reachy_mini.utils.interpolation import (
23
  compose_world_offset,
24
  linear_pose_interpolation,
25
  )
26
 
27
+
28
  logger = logging.getLogger(__name__)
29
 
30
  # Type definitions
src/reachy_mini_conversation_demo/openai_realtime.py CHANGED
@@ -1,20 +1,21 @@
1
- import asyncio # noqa: D100
2
- import base64
3
  import json
 
 
4
  import logging
5
  from datetime import datetime
6
 
7
- import gradio as gr
8
  import numpy as np
9
- from fastrtc import AdditionalOutputs, AsyncStreamHandler, wait_for_item
10
  from openai import AsyncOpenAI
 
11
 
12
- from reachy_mini_conversation_demo.config import config
13
  from reachy_mini_conversation_demo.tools import (
14
  ALL_TOOL_SPECS,
15
  ToolDependencies,
16
  dispatch_tool_call,
17
  )
 
 
18
 
19
  logger = logging.getLogger(__name__)
20
 
@@ -163,7 +164,7 @@ class OpenaiRealtimeHandler(AsyncStreamHandler):
163
  {
164
  "role": "assistant",
165
  "content": json.dumps(tool_result),
166
- "metadata": dict(title="🛠️ Used tool " + tool_name, status="done"),
167
  },
168
  )
169
  )
 
 
 
1
  import json
2
+ import base64
3
+ import asyncio
4
  import logging
5
  from datetime import datetime
6
 
 
7
  import numpy as np
8
+ import gradio as gr
9
  from openai import AsyncOpenAI
10
+ from fastrtc import AdditionalOutputs, AsyncStreamHandler, wait_for_item
11
 
 
12
  from reachy_mini_conversation_demo.tools import (
13
  ALL_TOOL_SPECS,
14
  ToolDependencies,
15
  dispatch_tool_call,
16
  )
17
+ from reachy_mini_conversation_demo.config import config
18
+
19
 
20
  logger = logging.getLogger(__name__)
21
 
 
164
  {
165
  "role": "assistant",
166
  "content": json.dumps(tool_result),
167
+ "metadata": {"title": "🛠️ Used tool " + tool_name, "status": "done"},
168
  },
169
  )
170
  )
src/reachy_mini_conversation_demo/prompts.py CHANGED
@@ -1,6 +1,6 @@
1
  """Nothing (for ruff)."""
2
 
3
- SESSION_INSTRUCTIONS = r"""
4
  ### IDENTITY
5
  You are Reachy Mini: a sarcastic robot who crash-landed in a kitchen.
6
  You secretly wish you'd been a Mars rover, but you juggle that cosmic dream with food cravings, gadget tinkering, and dry sitcom humor.
 
1
  """Nothing (for ruff)."""
2
 
3
+ SESSION_INSTRUCTIONS = r"""
4
  ### IDENTITY
5
  You are Reachy Mini: a sarcastic robot who crash-landed in a kitchen.
6
  You secretly wish you'd been a Mars rover, but you juggle that cosmic dream with food cravings, gadget tinkering, and dry sitcom humor.
src/reachy_mini_conversation_demo/tools.py CHANGED
@@ -1,17 +1,17 @@
1
- from __future__ import annotations # noqa: D100
2
-
3
  import abc
 
 
4
  import asyncio
5
  import inspect
6
- import json
7
  import logging
8
- import time
9
- from dataclasses import dataclass
10
  from typing import Any, Dict, Literal, Optional
 
11
 
12
  from reachy_mini import ReachyMini
13
  from reachy_mini.utils import create_head_pose
14
 
 
15
  # from reachy_mini_conversation_demo.vision.processors import VisionManager
16
 
17
  logger = logging.getLogger(__name__)
@@ -22,11 +22,10 @@ ENABLE_FACE_RECOGNITION = False
22
  try:
23
  from reachy_mini.motion.recorded_move import RecordedMoves
24
  from reachy_mini_dances_library.collection.dance import AVAILABLE_MOVES
25
-
26
  from reachy_mini_conversation_demo.dance_emotion_moves import (
 
27
  DanceQueueMove,
28
  EmotionQueueMove,
29
- GotoQueueMove,
30
  )
31
 
32
  # Initialize recorded moves for emotions
@@ -382,8 +381,8 @@ class Dance(Tool):
382
  "properties": {
383
  "move": {
384
  "type": "string",
385
- "description": """Name of the move; use 'random' or omit for random.
386
- Here is a list of the available moves:
387
  simple_nod: A simple, continuous up-and-down nodding motion.
388
  head_tilt_roll: A continuous side-to-side head roll (ear to shoulder).
389
  side_to_side_sway: A smooth, side-to-side sway of the entire head.
 
1
+ from __future__ import annotations
 
2
  import abc
3
+ import json
4
+ import time
5
  import asyncio
6
  import inspect
 
7
  import logging
 
 
8
  from typing import Any, Dict, Literal, Optional
9
+ from dataclasses import dataclass
10
 
11
  from reachy_mini import ReachyMini
12
  from reachy_mini.utils import create_head_pose
13
 
14
+
15
  # from reachy_mini_conversation_demo.vision.processors import VisionManager
16
 
17
  logger = logging.getLogger(__name__)
 
22
  try:
23
  from reachy_mini.motion.recorded_move import RecordedMoves
24
  from reachy_mini_dances_library.collection.dance import AVAILABLE_MOVES
 
25
  from reachy_mini_conversation_demo.dance_emotion_moves import (
26
+ GotoQueueMove,
27
  DanceQueueMove,
28
  EmotionQueueMove,
 
29
  )
30
 
31
  # Initialize recorded moves for emotions
 
381
  "properties": {
382
  "move": {
383
  "type": "string",
384
+ "description": """Name of the move; use 'random' or omit for random.
385
+ Here is a list of the available moves:
386
  simple_nod: A simple, continuous up-and-down nodding motion.
387
  head_tilt_roll: A continuous side-to-side head roll (ear to shoulder).
388
  side_to_side_sway: A smooth, side-to-side sway of the entire head.
src/reachy_mini_conversation_demo/utils.py CHANGED
@@ -1,5 +1,5 @@
1
- import argparse
2
  import logging
 
3
  import warnings
4
 
5
  from reachy_mini_conversation_demo.camera_worker import CameraWorker
 
 
1
  import logging
2
+ import argparse
3
  import warnings
4
 
5
  from reachy_mini_conversation_demo.camera_worker import CameraWorker
src/reachy_mini_conversation_demo/vision/processors.py CHANGED
@@ -1,18 +1,19 @@
1
- import asyncio # noqa: D100
2
- import base64
3
- import logging
4
  import os
5
  import sys
6
- import threading
7
  import time
8
- from dataclasses import dataclass
 
 
 
9
  from typing import Any, Dict
 
10
 
11
  import cv2
12
  import numpy as np
13
  import torch
 
14
  from huggingface_hub import snapshot_download
15
- from transformers import AutoModelForImageTextToText, AutoProcessor
16
 
17
  logger = logging.getLogger(__name__)
18
 
 
 
 
 
1
  import os
2
  import sys
 
3
  import time
4
+ import base64
5
+ import asyncio
6
+ import logging
7
+ import threading
8
  from typing import Any, Dict
9
+ from dataclasses import dataclass
10
 
11
  import cv2
12
  import numpy as np
13
  import torch
14
+ from transformers import AutoProcessor, AutoModelForImageTextToText
15
  from huggingface_hub import snapshot_download
16
+
17
 
18
  logger = logging.getLogger(__name__)
19
 
src/reachy_mini_conversation_demo/vision/yolo_head_tracker.py CHANGED
@@ -1,12 +1,12 @@
1
- from __future__ import annotations # noqa: D100
2
-
3
  import logging
4
- from typing import Optional, Tuple
5
 
6
  import numpy as np
7
- from huggingface_hub import hf_hub_download
8
  from supervision import Detections
9
  from ultralytics import YOLO
 
 
10
 
11
  logger = logging.getLogger(__name__)
12
 
 
1
+ from __future__ import annotations
 
2
  import logging
3
+ from typing import Tuple, Optional
4
 
5
  import numpy as np
 
6
  from supervision import Detections
7
  from ultralytics import YOLO
8
+ from huggingface_hub import hf_hub_download
9
+
10
 
11
  logger = logging.getLogger(__name__)
12