1
0
mirror of https://github.com/MarkParker5/STARK.git synced 2024-12-09 09:27:56 +02:00
STARK/majordom_va/VoiceAssistant.py

76 lines
2.3 KiB
Python
Raw Normal View History

import asyncio
2023-02-05 14:00:48 +02:00
from VICore import CommandsContext, CommandsContextDelegate
from IO.SpeechRecognition import SpeechRecognizer, SpeechRecognizerDelegate
from IO import Text2Speech
2021-08-21 01:09:37 +02:00
2023-02-05 14:00:48 +02:00
class VoiceAssistant(SpeechRecognizerDelegate, CommandsContextDelegate):
2021-08-21 01:09:37 +02:00
2023-02-05 14:00:48 +02:00
speech_recognizer: SpeechRecognizer
commands_context: CommandsContext
voice = Text2Speech.Engine()
voids: int = 0
2023-02-05 14:00:48 +02:00
last_clap_time: float = 0
double_clap: bool = False
2021-08-21 01:09:37 +02:00
# Control
2023-02-05 14:00:48 +02:00
def __init__(self, commands_context: CommandsContext):
self.speech_recognizer = SpeechRecognizer(delegate = self)
self.commands_context = commands_context
commands_context.delegate = self
2021-08-21 01:09:37 +02:00
def start(self):
2023-02-05 14:00:48 +02:00
self.speech_recognizer.delegate = self
print('Listen...')
asyncio.get_event_loop().run_until_complete(
2023-02-05 14:00:48 +02:00
self.speech_recognizer.startListening()
)
def stop(self):
2023-02-05 14:00:48 +02:00
self.speech_recognizer.stopListening()
# SpeechRecognizerDelegate
2023-02-12 19:33:23 +02:00
def speech_recognizer_did_receive_final_result(self, result: str):
self.voids = 0
2023-02-05 14:00:48 +02:00
# self.commands_context.lastInteractTime = VITime()
print(f'\rYou: {result}')
2023-02-05 14:00:48 +02:00
self.commands_context.process_string(result)
2023-02-12 19:33:23 +02:00
def speech_recognizer_did_receive_partial_result(self, result: str):
print(f'\rYou: \x1B[3m{result}\x1B[0m', end = '')
2023-02-12 19:33:23 +02:00
def speech_recognizer_did_receive_empty_result(self):
self.voids += 1
2021-08-21 01:09:37 +02:00
2023-02-05 14:00:48 +02:00
# CommandsContextDelegate
2021-08-21 01:09:37 +02:00
2023-02-05 14:00:48 +02:00
def commands_context_did_receive_response(self, response):
if response.text:
print(f'Archie: {response.text}')
if response.voice:
2023-02-05 14:00:48 +02:00
was_recognizing = self.speech_recognizer.is_recognizing
self.speech_recognizer.is_recognizing = False
self.voice.generate(response.voice).speak()
2023-02-05 14:00:48 +02:00
self.speech_recognizer.is_recognizing = was_recognizing
2021-08-21 01:09:37 +02:00
# check double clap from arduino microphone module
def checkClap(self, channel):
now = time.time()
2023-02-05 14:00:48 +02:00
delta = now - self.last_clap_time
2021-08-21 01:09:37 +02:00
if 0.1 < delta < 0.6:
2023-02-05 14:00:48 +02:00
self.speech_recognizer.is_recognizing = True
2021-08-21 01:09:37 +02:00
else:
2023-02-05 14:00:48 +02:00
self.last_clap_time = now
2021-08-21 01:09:37 +02:00
# if config.double_clap_activation:
# import RPi.GPIO as GPIO
#
# GPIO.setmode(GPIO.BCM)
# GPIO.setup(12, GPIO.IN)
# GPIO.add_event_detect(12, GPIO.RISING, callback = VoiceAssistant().checkClap)