Set configuration of the audio that will be sent to DialogFlow (e.g. language, sample_rate).
"""
'''
defconfigure_dialogflow(self):
def play():
audio_encoding=dialogflow.enums.AudioEncoding.AUDIO_ENCODING_LINEAR_16# uncompressed 16-bit signed little-endian samples
CHUNK = 1024
sample_rate_hertz=16000
wf = wave.open("out.wav", 'rb')
google_application_credentials="/home/daniel/liu-home-wreckers/src/lhw_nlp/lhw_nlp/cred.json"#rospy.get_param('~google_application_credentials') # Samla ihop alla get_param() till en funktion så de är samlade. Enklare att debugga när man ser alla parametrar som en ros nod använder.
self.audio_chunk_queue=queue.Queue()# Buffer where callback stores audio samples coming from audio_topic. Audio chunks are later removed from it and sent to Dialogflow
# Signal the generator to terminate so that the client's
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
# streaming_recognize method will not block the process termination.
self.audio_chunk_queue.put(None)
#self.audio_chunk_queue.put(None)
#self.audio_chunk_queue = queue.Queue()
"""
Continuosly stream audio chunks from audio_chunk_queue to Dialogflow. Request generator.
"""
defaudio_stream_request_generator(self):
# ----------- Trigg intents with audio (InputAudioConfig)
query_input=dialogflow.types.QueryInput(audio_config=self.audio_config)# input specification, it instructs the speech recognizer how to process the speech audio
#print("hejehejej")
# The first request sends up the configuration of the speech recognition. This request doesn't contain input audio.
Set configuration of the audio that will be sent to DialogFlow (e.g. language, sample_rate).
"""
defconfigure_dialogflow(self):
audio_encoding=dialogflow.enums.AudioEncoding.AUDIO_ENCODING_LINEAR_16# uncompressed 16-bit signed little-endian samples
sample_rate_hertz=16000
#####
#print(os.getcwd())
# TODO - lägg till från launch file
#google_application_credentials = "/home/daniel/liu-home-wreckers/src/lhw_nlp/lhw_nlp/cred.json" #rospy.get_param('~google_application_credentials') # Samla ihop alla get_param() till en funktion så de är samlade. Enklare att debugga när man ser alla parametrar som en ros nod använder.
# ----------- Trigg intents with audio (InputAudioConfig)
query_input = dialogflow.types.QueryInput(audio_config=self.audio_config) # input specification, it instructs the speech recognizer how to process the speech audio
#print("hejehejej")
# The first request sends up the configuration of the speech recognition. This request doesn't contain input audio.