diff --git a/ArchieCore/Commands/CommandsContext.py b/ArchieCore/Commands/CommandsContext.py new file mode 100644 index 0000000..a6ee7c7 --- /dev/null +++ b/ArchieCore/Commands/CommandsContext.py @@ -0,0 +1,88 @@ +from abc import ABC, abstractmethod +import asyncio +from typing import Any + +from ArchieCore import ACTime +from .CommandsManager import CommandsManager +from .Command import Command +from .Response import Response, ResponseAction +from .ThreadData import ThreadData + +class CommandsContextDelegate: + @abstractmethod + def commandsContextDidReceiveResponse(self, response: Response): pass + +class CommandsContext: + + delegate: CommandsContextDelegate + + commandsManager = CommandsManager() + lastInteractTime: ACTime = ACTime() + commandsContext: list[list[Command]] = [CommandsManager().allCommands,] + threads: list[ThreadData] = [] + reports: list[Response] = [] + memory: list[Response] = [] + delaysReports: False # if True, reports will be delayed to next interaction; if False send reports immediately + + def __init__(self, delegate: CommandsContextDelegate): + self.delegate = delegate + + def processString(self, string: str, data: dict[str, Any] = {}): + currentContext = self.commandsContext[0] if self.commandsContext else None + + while self.commandsContext: + if searchResults := self.commandsManager.search(string = string, commands = currentContext): + for searchResult in searchResults: + commandResponse = searchResult.command.start(params = searchResult.parameters) + commandResponse.data = data + + match commandResponse.action: + case ResponseAction.popContext: + self.commandsContext.pop(0) + case ResponseAction.popToRootContext: + self.commandsContext = [self.commandsManager.allCommands,] + case ResponseAction.sleep: + self.speechRecognizer.isRecognizing = False + case ResponseAction.repeatLastAnswer: + if self.memory: + previousResponse = self.memory[-1] + self.delegate.didReceiveCommandsResponse(previousResponse) + case ResponseAction.answerNotFound: + continue + + self.parse(commandResponse) + break + else: + currentContext = self.commandsContext.pop(0) + else: + self.commandsContext.append(self.commandsManager.allCommands) + + async def asyncCheckThreads(self): + while True: + await asyncio.sleep(5) + checkThreads() + + def checkThreads(self): + for threadData in self.threads: + if not threadData.finishEvent.is_set(): continue + + response = threadData.thread.join() + self.parse(response, delaysReports = delaysReports and ACTime() - self.lastInteractTime > 30) + threadData.finishEvent.clear() + + del threadData + + def parse(self, response, delaysReports: bool = False): + self.reports.insert(0, response) + if response.thread: + self.threads.append(response.thread) + if response.context: + self.commandsContext.insert(0, response.context) + if not delaysReports: + self.report() + self.memory.append(response) + + def report(self): + for response in self.reports: + self.delegate.commandsContextDidReceiveResponse(response) + self.reports = [] diff --git a/ArchieCore/Commands/CommandsManager.py b/ArchieCore/Commands/CommandsManager.py index f3a006f..4f5a971 100644 --- a/ArchieCore/Commands/CommandsManager.py +++ b/ArchieCore/Commands/CommandsManager.py @@ -44,7 +44,7 @@ class CommandsManager: if results: return results elif qa := self.QA: return [SearchResult(qa, {'string': acstring,}),] - + return [] def append(self, command): diff --git a/ArchieCore/Commands/Response.py b/ArchieCore/Commands/Response.py index e7fbe84..d4699ef 100644 --- a/ArchieCore/Commands/Response.py +++ b/ArchieCore/Commands/Response.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Optional, Any from enum import Enum, auto from .Command import Command from .ThreadData import ThreadData @@ -8,6 +8,7 @@ class ResponseAction(Enum): popToRootContext = auto() sleep = auto() repeatLastAnswer = auto() + answerNotFound = auto() class Response: voice: str @@ -15,6 +16,7 @@ class Response: context: list[Command] thread: Optional[ThreadData] action: Optional[ResponseAction] + data: dict[str, Any] def __init__(self, voice, text, context = [], thread = None, action = None): self.voice = voice diff --git a/ArchieCore/Commands/__init__.py b/ArchieCore/Commands/__init__.py index c5e52fa..6a8e5e8 100644 --- a/ArchieCore/Commands/__init__.py +++ b/ArchieCore/Commands/__init__.py @@ -1,3 +1,4 @@ -from .Command import * -from .Response import * +from .Command import Command +from .Response import Response, ResponseAction from .CommandsManager import CommandsManager, SearchResult +from .CommandsContext import CommandsContext, CommandsContextDelegate diff --git a/Controls/TelegramBot/TelegramBot.py b/Controls/TelegramBot/TelegramBot.py index 5d56e08..2ad0fe6 100644 --- a/Controls/TelegramBot/TelegramBot.py +++ b/Controls/TelegramBot/TelegramBot.py @@ -3,68 +3,53 @@ import time import os import config -from ArchieCore import Command +from ArchieCore import Command, CommandsContext, CommandsContextDelegate from General import Text2Speech from ..Control import Control from .MyTeleBot import MyTeleBot -class TelegramBot(Control): +class TelegramBot(Control, CommandsContextDelegate): threads = [] online = True voids = 0 memory = [] voice = Text2Speech.Engine() bot = MyTeleBot(config.telebot) + commandsContext: CommandsContext + + # Control def __init__(self): - pass + self.commandsContext = CommandsContext(delegate = self) + + def start(self): + while True: + try: + print("Start polling...") + self.bot.polling(callback = self.commandsContext.checkThreads) + except Exception as e: + print(e, "\nPolling failed") + time.sleep(10) + + def stop(self): + raise NotImplementedError + + def main(self, id, text): + self.commandsContext.processString(text.lower(), data = {'id': id}) + + # CommandsContextDelegate + + def commandsContextDidReceiveResponse(self, response): + id = response.data.get('id') + if not id: return - def reply(self, id, response): if response.text: self.bot.send_message(id, response.text) if response.voice: if bytes := self.voice.generate(response.voice).getBytes(): self.bot.send_voice(id, bytes) - if response.thread: # add background thread to list - response.thread['id'] = id - self.threads.append(response.thread) - def check_threads(self, threads): - for thread in threads: - if thread['finish_event'].is_set(): - response = thread['thread'].join() - self.reply(thread['id'], response) - thread['finish_event'].clear() - del thread - - def main(self, id, text): - text = text.lower() - if Command.isRepeat(text): - self.reply(id, self.memory[0]['response']); - return - if self.memory: - response = self.memory[0]['response'] - if response.callback: - if new_response := response.callback.answer(text): - self.reply(id, new_response) - memory.insert(0, { - 'cmd': response.callback, - 'params': None, - 'response': new_response, - }) - return - try: - cmd, params = self.memory[0]['cmd'].checkContext(text).values() - if self.memory[0].get('params'): params = {**memory[0].get('params'), **params} - except: - cmd, params = Command.reg_find(text).values() - response = cmd.start(params) - self.reply(id, response) - self.memory.insert(0, { - 'cmd': cmd, - 'params': params, - 'response': response, - }) + # Telebot @bot.message_handler(commands=['vlc', 'queue', 'cmd']) def simple_commands(msg): @@ -80,17 +65,4 @@ class TelegramBot(Control): @bot.message_handler(content_types = ['text']) def execute(msg): - TelegramBot().main(msg.chat.id, msg.text) - - def start(self): - while True: - try: - print("Start polling...") - self.bot.polling(callback = self.check_threads, args = (self.threads,) ) - except Exception as e: - print(e, "\nPolling failed") - time.sleep(10) - - -if __name__ == '__main__': - TelegramBot().start() + self.commandsContext.processString(msg.text.lower(), data = {'id': msg.chat.id}) diff --git a/Controls/VoiceAssistant/VoiceAssistant.py b/Controls/VoiceAssistant/VoiceAssistant.py index 614f761..e3a1e17 100644 --- a/Controls/VoiceAssistant/VoiceAssistant.py +++ b/Controls/VoiceAssistant/VoiceAssistant.py @@ -1,111 +1,58 @@ #!/usr/local/bin/python3.8 from typing import Optional import asyncio -import os import config from ..Control import Control -from General import SpeechRecognizer, Text2Speech -from ArchieCore import CommandsManager, Command, Response, ResponseAction, ThreadData, ACTime +from General import SpeechRecognizer, SpeechRecognizerDelegate, Text2Speech +from ArchieCore import ACTime, CommandsContext, CommandsContextDelegate -class VoiceAssistant(Control): - commandsManager = CommandsManager() - speechRecognizer = SpeechRecognizer() +class VoiceAssistant(Control, SpeechRecognizerDelegate, CommandsContextDelegate): + + speechRecognizer: SpeechRecognizer + commandsContext: CommandsContext voice = Text2Speech.Engine() - commandsContext: list[list[Command]] = [] - threads: list[ThreadData] = [] - reports: list[Response] = [] - memory: list[Response] = [] - voids: int = 0 - lastInteractTime: ACTime = ACTime() lastClapTime: float = 0 doubleClap: bool = False + # Control + def __init__(self): - pass + self.speechRecognizer = SpeechRecognizer(delegate = self) + self.commandsContext = CommandsContext(delegate = self) def start(self): - self.commandsContext = [self.commandsManager.allCommands,] - self.speechRecognizer.didReceivePartialResult = lambda string: self.speechRecognizerReceivePartialResult(string) - self.speechRecognizer.didReceiveFinalResult = lambda string: self.speechRecognizerReceiveFinalResult(string) - self.speechRecognizer.didReceiveEmptyResult = lambda: self.speechRecognizerReceiveEmptyResult() - + self.speechRecognizer.delegate = self + print('Listen...') asyncio.get_event_loop().run_until_complete( - self.listenAndCheckThreads() + self.speechRecognizer.startListening() ) def stop(self): self.speechRecognizer.stopListening() - def speechRecognizerReceiveEmptyResult(self): - self.voids += 1 + # SpeechRecognizerDelegate + + def speechRecognizerReceiveFinalResult(self, result: str): + self.voids = 0 + self.commandsContext.lastInteractTime = ACTime() + print(f'\rYou: {result}') + + self.commandsContext.processString(result) def speechRecognizerReceivePartialResult(self, result: str): print(f'\rYou: \x1B[3m{result}\x1B[0m', end = '') - def speechRecognizerReceiveFinalResult(self, result: str): - self.voids = 0 - self.lastInteractTime = ACTime() - print(f'\rYou: {result}') + def speechRecognizerReceiveEmptyResult(self): + self.voids += 1 - currentContext = self.commandsContext[0] if self.commandsContext else None + # CommandsContextDelegate - while self.commandsContext: - if searchResults := self.commandsManager.search(string = result, commands = currentContext): - for searchResult in searchResults: - commandResponse = searchResult.command.start(params = searchResult.parameters) - self.parse(commandResponse) - - match commandResponse.action: - case ResponseAction.popContext: - self.commandsContext.pop(0) - case ResponseAction.popToRootContext: - self.commandsContext = [self.commandsManager.allCommands,] - case ResponseAction.sleep: - self.speechRecognizer.isRecognizing = False - case ResponseAction.repeatLastAnswer: - if self.memory: - previousResponse = self.memory[-1] - self.reply(previousResponse) - break - else: - currentContext = self.commandsContext.pop(0) - else: - self.commandsContext.append(self.commandsManager.allCommands) - - async def listenAndCheckThreads(self): - while True: - await self.speechRecognizer.startListening() - - for threadData in self.threads: - if not threadData.finishEvent.is_set(): continue - - response = threadData.thread.join() - self.parse(response, silent = ACTime() - self.lastInteractTime > 30) - threadData.finishEvent.clear() - - del threadData - - def parse(self, response, silent: bool = False): - self.reports.insert(0, response) - if not silent: - self.report() - if response.thread: - self.threads.append(response.thread) - if response.context: - self.commandsContext.insert(0, response.context) - self.memory.append(response) - - def report(self): - for response in self.reports: - self.reply(response) - self.reports = [] - - def reply(self, response): + def commandsContextDidReceiveResponse(self, response): if response.text: - print(f'\nArchie: {response.text}') + print(f'Archie: {response.text}') if response.voice: wasRecognizing = self.speechRecognizer.isRecognizing self.speechRecognizer.isRecognizing = False @@ -121,6 +68,8 @@ class VoiceAssistant(Control): else: self.lastClapTime = now + + if config.double_clap_activation: import RPi.GPIO as GPIO diff --git a/Features/QA/QA.py b/Features/QA/QA.py index b8942ad..13744df 100644 --- a/Features/QA/QA.py +++ b/Features/QA/QA.py @@ -1,5 +1,5 @@ from bs4 import BeautifulSoup as BS -from ArchieCore import CommandsManager, Command, Response +from ArchieCore import CommandsManager, Command, Response, ResponseAction import wikipedia as wiki import requests import random @@ -57,7 +57,7 @@ class QAHelper(): @Command.new() def qa_start(params): - query = params['string'] + query = params['string'].value if 'вики' in query: query = query.replace('википедия', '').replace('вики', '').strip() try: search = QAHelper.googleSearch(query) @@ -72,7 +72,9 @@ def qa_start(params): try: search = QAHelper.googleSearch(query) except: search = '' voice = text = search or random.choice(['Не совсем понимаю, о чём вы.', 'Вот эта последняя фраза мне не ясна.', 'А вот это не совсем понятно.', 'Можете сказать то же самое другими словами?', 'Вот сейчас я совсем вас не понимаю.', 'Попробуйте выразить свою мысль по-другому',]) - return Response(text = text, voice = voice) + + action = ResponseAction.answerNotFound if not text and not voice else None + + return Response(text = text, voice = voice, action = action) CommandsManager().QA = qa_start -print(CommandsManager().QA, 'CommandsManager Sets QA') diff --git a/Features/__init__.py b/Features/__init__.py index 4cae28b..5cc52e4 100644 --- a/Features/__init__.py +++ b/Features/__init__.py @@ -2,6 +2,7 @@ from .SmallTalk import SmallTalk from .Raspi import Raspi from .Zieit import Zieit +from .QA import QA try: from .SmartHome import SmartHome except: pass diff --git a/General/SpeechRecognition/SpeechRecognition.py b/General/SpeechRecognition/SpeechRecognition.py index 4620f3b..991d3b5 100644 --- a/General/SpeechRecognition/SpeechRecognition.py +++ b/General/SpeechRecognition/SpeechRecognition.py @@ -1,4 +1,5 @@ from typing import Callable, Optional +from abc import ABC, abstractmethod import asyncio import os, sys import queue @@ -11,16 +12,26 @@ import config vosk.SetLogLevel(-1) +class SpeechRecognizerDelegate(ABC): + @abstractmethod + def speechRecognizerReceiveFinalResult(self, result: str): pass + + @abstractmethod + def speechRecognizerReceivePartialResult(self, result: str): pass + + @abstractmethod + def speechRecognizerReceiveEmptyResult(self): pass + + class SpeechRecognizer: - didReceivePartialResult: Callable[[str], None] = lambda _: None - didReceiveFinalResult: Callable[[str], None] = lambda _: None - didReceiveEmptyResult: Callable[[], None] = lambda: None + + delegate: SpeechRecognizerDelegate lastResult: Optional[str] = "" lastPartialResult: str = "" - _isListening = False isRecognizing = True + _isListening = False audioQueue = queue.Queue() model = vosk.Model(config.vosk_model) @@ -31,14 +42,14 @@ class SpeechRecognizer: channels = 1 kaldiRecognizer = vosk.KaldiRecognizer(model, samplerate) - def __init__(self): - callback = lambda indata, frames, time, status: self.audioInputCallback(indata, frames, time, status) + def __init__(self, delegate: SpeechRecognizerDelegate): + self.delegate = delegate self.parameters = { 'samplerate': self.samplerate, 'blocksize': self.blocksize, 'dtype': self.dtype, 'channels': self.channels, - 'callback': callback + 'callback': self.audioInputCallback } def audioInputCallback(self, indata, frames, time, status): @@ -62,12 +73,12 @@ class SpeechRecognizer: result = json.loads(self.kaldiRecognizer.Result()) if (string := result.get('text')) and string != self.lastResult: self.lastResult = string - self.didReceiveFinalResult(string) + self.delegate.speechRecognizerReceiveFinalResult(string) else: self.lastResult = None - self.didReceiveEmptyResult() + self.delegate.speechRecognizerReceiveEmptyResult() else: result = json.loads(self.kaldiRecognizer.PartialResult()) if (string := result.get('partial')) and string != self.lastPartialResult: self.lastPartialResult = string - self.didReceivePartialResult(result['partial']) + self.delegate.speechRecognizerReceivePartialResult(result['partial']) diff --git a/General/SpeechRecognition/__init__.py b/General/SpeechRecognition/__init__.py index d919a98..68685ec 100644 --- a/General/SpeechRecognition/__init__.py +++ b/General/SpeechRecognition/__init__.py @@ -1 +1 @@ -from .SpeechRecognition import SpeechRecognizer +from .SpeechRecognition import SpeechRecognizer, SpeechRecognizerDelegate diff --git a/start.py b/start.py index 8fe8cf9..15ff718 100644 --- a/start.py +++ b/start.py @@ -7,7 +7,7 @@ import Controls def main(): controls = [ Controls.VoiceAssistant(), - #Controls.TelegramBot(), + Controls.TelegramBot(), #Controls.RemoteControl(), #Controls.Django(), ]