Skip to content

Instantly share code, notes, and snippets.

@dzianisv
Last active June 4, 2023 12:16
Show Gist options
  • Select an option

  • Save dzianisv/173a194a6cb6e7a2f831a81f3d6cca37 to your computer and use it in GitHub Desktop.

Select an option

Save dzianisv/173a194a6cb6e7a2f831a81f3d6cca37 to your computer and use it in GitHub Desktop.
armbian aarch64 microsoft speech sdk
Traceback (most recent call last):
File "/opt/AssistantPlato/./src/test.py", line 80, in <module>
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=audio_output_config)
File "/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/speech.py", line 2149, in __init__
_call_hr_fn(fn=_sdk_lib.synthesizer_create_speech_synthesizer_from_config, *[
File "/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/interop.py", line 62, in _call_hr_fn
_raise_if_failed(hr)
File "/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/interop.py", line 55, in _raise_if_failed
__try_get_error(_spx_handle(hr))
File "/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/interop.py", line 50, in __try_get_error
raise RuntimeError(message)
RuntimeError: Exception with error code:
[CALL STACK BEGIN]
/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/libMicrosoft.CognitiveServices.Speech.core.so(+0x1d764c) [0xffff80b6764c]
/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/libMicrosoft.CognitiveServices.Speech.core.so(+0x1eb9a8) [0xffff80b7b9a8]
/lib/aarch64-linux-gnu/libc.so.6(+0x825d4) [0xffff817625d4]
/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/libMicrosoft.CognitiveServices.Speech.core.so(+0x1ec920) [0xffff80b7c920]
/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/libMicrosoft.CognitiveServices.Speech.core.so(+0x1ac2a4) [0xffff80b3c2a4]
/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/libMicrosoft.CognitiveServices.Speech.core.so(+0x1abb78) [0xffff80b3bb78]
/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/libMicrosoft.CognitiveServices.Speech.core.so(+0x1d9288) [0xffff80b69288]
/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/libMicrosoft.CognitiveServices.Speech.core.so(+0x1c3644) [0xffff80b53644]
/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/libMicrosoft.CognitiveServices.Speech.core.so(+0x1be070) [0xffff80b4e070]
/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/libMicrosoft.CognitiveServices.Speech.core.so(+0xeb7a8) [0xffff80a7b7a8]
/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/libMicrosoft.CognitiveServices.Speech.core.so(+0x1d9288) [0xffff80b69288]
/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/libMicrosoft.CognitiveServices.Speech.core.so(+0x1b759c) [0xffff80b4759c]
/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/libMicrosoft.CognitiveServices.Speech.core.so(+0x204814) [0xffff80b94814]
/root/.local/share/virtualenvs/AssistantPlato-qjxu2X5g/lib/python3.10/site-packages/azure/cognitiveservices/speech/libMicrosoft.CognitiveServices.Speech.core.so(synthesizer_create_speech_synthesizer_from_config+0x10c) [0xffff80a4a77c]
/lib/aarch64-linux-gnu/libffi.so.8(+0x6e10) [0xffff81506e10]
/lib/aarch64-linux-gnu/libffi.so.8(+0x3a94) [0xffff81503a94]
/usr/lib/python3.10/lib-dynload/_ctypes.cpython-310-aarch64-linux-gnu.so(+0x12b10) [0xffff81532b10]
[CALL STACK END]
root@orangepi4-lts:/media/root# aplay -l
**** List of PLAYBACK Hardware Devices ****
card 0: rockchipes8316c [rockchip-es8316c], device 0: ff880000.i2s-ES8316 HiFi ES8316 HiFi-0 [ff880000.i2s-ES8316 HiFi ES8316 HiFi-0]
Subdevices: 1/1
Subdevice #0: subdevice #0
card 1: hdmisound [hdmi-sound], device 0: ff8a0000.i2s-i2s-hifi i2s-hifi-0 [ff8a0000.i2s-i2s-hifi i2s-hifi-0]
Subdevices: 1/1
Subdevice #0: subdevice #0
root@orangepi4-lts:/media/root# arecord -l
**** List of CAPTURE Hardware Devices ****
card 0: rockchipes8316c [rockchip-es8316c], device 0: ff880000.i2s-ES8316 HiFi ES8316 HiFi-0 [ff880000.i2s-ES8316 HiFi ES8316 HiFi-0]
Subdevices: 1/1
Subdevice #0: subdevice #0
card 1: hdmisound [hdmi-sound], device 0: ff8a0000.i2s-i2s-hifi i2s-hifi-0 [ff8a0000.i2s-i2s-hifi i2s-hifi-0]
Subdevices: 1/1
Subdevice #0: subdevice #0
#!/usr/bin/env python3
import os
import azure.cognitiveservices.speech as speechsdk
import openai
# Speech Services
speech_key = os.environ.get("AZURE_SPEECH_KEY")
speech_region = os.environ.get("AZURE_REGION")
language = "" #"en-US"
voice = "" #"en-US-JennyMultilingualNeural"
# Open Ai
openai.api_key = os.environ.get("OPENAI_KEY")
# Prompt
base_message = [{"role":"system","content":"You are an senior expert voice assistant who can answer all related questions. You are friendly and concise. You only provide factual answers to queries, and do not provide answers that are not related to products or ."}]
#######################
###### Functions ######
#######################
def ask_openai(prompt):
base_message.append({"role": "user", "content": prompt})
response = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages = base_message,
temperature=0.24,
max_tokens=50,
top_p=0.95,
frequency_penalty=0,
presence_penalty=0,
stop=None)
text = response['choices'][0]['message']['content'].replace('\n', ' ').replace(' .', '.').strip()
print('Azure OpenAI response:' + text)
base_message.append({"role": "assistant", "content": text})
speech_synthesis_result = speech_synthesizer.speak_text_async(text).get()
if speech_synthesis_result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
print("Speech synthesized to speaker for text [{}]".format(text))
elif speech_synthesis_result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = speech_synthesis_result.cancellation_details
print("Speech synthesis canceled: {}".format(cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print("Error details: {}".format(cancellation_details.error_details))
def chat_with_open_ai():
while True:
print("Azure OpenAI is listening. Say 'Stop' or press Ctrl-Z to end the conversation.")
try:
speech_recognition_result = speech_recognizer.recognize_once_async().get()
if speech_recognition_result.reason == speechsdk.ResultReason.RecognizedSpeech:
text = speech_recognition_result.text
if text == "Stop.":
print("Conversation ended.")
break
if text == "Reset.":
print("Reset")
base_message = [{"role":"system","content":"You are an AI voice assistant that helps to answer questions."}]
if "Hey" in text:
print("Recognized speech: {}".format(speech_recognition_result.text))
ask_openai(speech_recognition_result.text)
elif speech_recognition_result.reason == speechsdk.ResultReason.NoMatch:
print("No speech could be recognized: {}".format(speech_recognition_result.no_match_details))
break
elif speech_recognition_result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = speech_recognition_result.cancellation_details
print("Speech Recognition canceled: {}".format(cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print("Error details: {}".format(cancellation_details.error_details))
except EOFError:
break
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=speech_region)
audio_output_config = speechsdk.audio.AudioOutputConfig(use_default_speaker=True)
audio_config = speechsdk.audio.AudioConfig(use_default_microphone=True)
speech_config.speech_recognition_language=language
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
speech_config.speech_synthesis_voice_name=voice
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=audio_output_config)
try:
chat_with_open_ai()
except Exception as err:
print("Encountered exception. {}".format(err))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment