feat(hass): move whisper to nos for gpu
All checks were successful
Discord / discord commits (push) Has been skipped

This commit is contained in:
matt1432 2024-09-04 17:43:46 -04:00
parent 928bded3fd
commit f0f4db1be0
4 changed files with 32 additions and 24 deletions

View file

@ -15,6 +15,7 @@
enable = true;
extraComponents = [
"caldav"
"esphome"
"holiday"
"isal"
@ -71,20 +72,10 @@
uri = "tcp://127.0.0.1:10200";
# see https://github.com/rhasspy/rhasspy3/blob/master/programs/tts/piper/script/download.py
voice = "en-us-ryan-low";
voice = "en-us-ryan-low"; # using `hfc male (medium)` in GUI
speaker = 0;
};
faster-whisper.servers."en" = {
enable = true;
uri = "tcp://127.0.0.1:10300";
# see https://github.com/rhasspy/rhasspy3/blob/master/programs/asr/faster-whisper/script/download.py
model = "small-int8";
language = "en";
device = "cpu";
};
openwakeword-docker = {
enable = true;
uri = "127.0.0.1:10400";
@ -112,7 +103,7 @@
then "--socket /run/esphome/esphome.sock"
else "--address ${cfg.address} --port ${toString cfg.port}";
in {
environment.PLATFORMIO_CORE_DIR = mkForce "/var/lib/private/esphome/.platformio";
environment.PLATFORMIO_CORE_DIR = mkForce "${stateDir}/.platformio";
serviceConfig = {
ExecStart = mkForce "${cfg.package}/bin/esphome dashboard ${esphomeParams} ${stateDir}";

View file

@ -13,8 +13,8 @@ in {
./modules/docker
./modules/jellyfin
./modules/llm.nix
./modules/mergerfs.nix
./modules/ollama.nix
./modules/qbittorrent
./modules/snapraid.nix
./modules/subtitles

View file

@ -0,0 +1,28 @@
{self, ...}: let
tailscaleIP = "100.64.0.4";
in {
imports = [self.nixosModules.wyoming-plus];
services = {
# Speech-to-Text
wyoming.faster-whisper.servers."en" = {
enable = true;
uri = "tcp://${tailscaleIP}:10300";
# see https://github.com/rhasspy/wyoming-faster-whisper/releases/tag/v2.0.0
model = "medium.en";
device = "cuda";
};
# Text-to-Intent
ollama = {
enable = true;
acceleration = "cuda";
host = tailscaleIP;
port = 11434;
loadModels = ["fixt/home-3b-v3"];
};
};
}

View file

@ -1,11 +0,0 @@
{...}: {
services.ollama = {
enable = true;
acceleration = "cuda";
host = "100.64.0.4";
port = 11434;
loadModels = ["fixt/home-3b-v3"];
};
}