| """Pyannote speaker embedding model. | |
| - pip install pyannote.audio | |
| - feature dimension: 512 | |
| - source: https://huggingface.co/pyannote/embedding | |
| """ | |
| from typing import Optional, Union, Tuple | |
| import torch | |
| import numpy as np | |
| from pyannote.audio import Model | |
| from pyannote.audio import Inference | |
| from pyannote.audio.core.inference import fix_reproducibility, map_with_specifications | |
| class PyannoteSE: | |
| def __init__(self): | |
| model = Model.from_pretrained("pyannote/embedding") | |
| self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| model.to(self.device) | |
| model.eval() | |
| self.inference = Inference(model, window="whole") | |
| def get_speaker_embedding(self, wav: np.ndarray, sampling_rate: Optional[int] = None) -> np.ndarray: | |
| wav = torch.as_tensor(wav.reshape(1, -1), dtype=torch.float32).to(self.device) | |
| fix_reproducibility(self.inference.device) | |
| if self.inference.window == "sliding": | |
| return self.inference.slide(wav, sampling_rate, hook=None) | |
| outputs: Union[np.ndarray, Tuple[np.ndarray]] = self.inference.infer(wav[None]) | |
| def __first_sample(outputs: np.ndarray, **kwargs) -> np.ndarray: | |
| return outputs[0] | |
| return map_with_specifications( | |
| self.inference.model.specifications, __first_sample, outputs | |
| ) | |