/* * Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0/ * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /// @file SpeechSynthesizerIntegrationTest.cpp #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "Integration/ACLTestContext.h" #include "Integration/ClientMessageHandler.h" #include "Integration/ConnectionStatusObserver.h" #include "Integration/ObservableMessageRequest.h" #include "Integration/TestMessageSender.h" #include "Integration/TestSpeechSynthesizerObserver.h" #include "Integration/TestDirectiveHandler.h" #include "Integration/TestExceptionEncounteredSender.h" #ifdef GSTREAMER_MEDIA_PLAYER #include "MediaPlayer/MediaPlayer.h" #else #include "Integration/TestMediaPlayer.h" #endif namespace alexaClientSDK { namespace integration { namespace test { using namespace acl; using namespace adsl; using namespace afml; using namespace avsCommon; using namespace avsCommon::avs; using namespace avsCommon::avs::attachment; using namespace avsCommon::sdkInterfaces; using namespace avsCommon::utils::json; using namespace avsCommon::utils::mediaPlayer; using namespace avsCommon::utils::sds; using namespace capabilityAgents::speechSynthesizer; using namespace contextManager; using namespace sdkInterfaces; #ifdef GSTREAMER_MEDIA_PLAYER using namespace mediaPlayer; #endif /** * This string specifies a Recognize event using the specified profile. * * CLOSE_TALK performs end-of-speech detection on the client, so no directive is sent from AVS to stop recording. * NEAR_FIELD performs end-of-speech detection in AVS, so a directive is sent from AVS to stop recording. */ // clang-format off #define RECOGNIZE_EVENT_JSON(PROFILE, DIALOG_REQUEST_ID ) \ "{" \ "\"event\":{" \ "\"payload\":{" \ "\"format\":\"AUDIO_L16_RATE_16000_CHANNELS_1\"," \ "\"profile\":\"" #PROFILE "\"" \ "}," \ "\"header\":{" \ "\"dialogRequestId\":\"" DIALOG_REQUEST_ID "\"," \ "\"messageId\":\"messageId123\"," \ "\"name\":\"Recognize\"," \ "\"namespace\":\"SpeechRecognizer\"" \ "}" \ "}," \ "\"context\":[{" \ "\"payload\":{" \ "\"activeAlerts\":[]," \ "\"allAlerts\":[]" \ "}," \ "\"header\":{" \ "\"name\":\"AlertsState\"," \ "\"namespace\":\"Alerts\"" \ "}" \ "}," \ "{" \ "\"payload\":{" \ "\"playerActivity\":\"IDLE\"," \ "\"offsetInMilliseconds\":0," \ "\"token\":\"\"" \ "}," \ "\"header\":{" \ "\"name\":\"PlaybackState\"," \ "\"namespace\":\"AudioPlayer\"" \ "}" \ "}," \ "{" \ "\"payload\":{" \ "\"muted\":false," \ "\"volume\":0" \ "}," \ "\"header\":{" \ "\"name\":\"VolumeState\"," \ "\"namespace\":\"Speaker\"" \ "}" \ "}," \ "{" \ "\"payload\":{" \ "\"playerActivity\":\"FINISHED\"," \ "\"offsetInMilliseconds\":0," \ "\"token\":\"\"" \ "}," \ "\"header\":{" \ "\"name\":\"SpeechState\"," \ "\"namespace\":\"SpeechSynthesizer\"" \ "}" \ "}]" \ "}" // clang-format on // This is a 16 bit 16 kHz little endian linear PCM audio file of "Joke" to be recognized. static const std::string RECOGNIZE_JOKE_AUDIO_FILE_NAME = "/recognize_joke_test.wav"; // This is a 16 bit 16 kHz little endian linear PCM audio file of "Wikipedia" to be recognized. static const std::string RECOGNIZE_WIKI_AUDIO_FILE_NAME = "/recognize_wiki_test.wav"; // This is a 16 bit 16 kHz little endian linear PCM audio file of "Lions" to be recognized. static const std::string RECOGNIZE_LIONS_AUDIO_FILE_NAME = "/recognize_lions_test.wav"; // This is a 16 bit 16 kHz little endian linear PCM audio file of "Flashbriefing" to be recognized. static const std::string RECOGNIZE_FLASHBRIEFING_AUDIO_FILE_NAME = "/recognize_flashbriefing_test.wav"; // This is a 16 bit 16 kHz little endian linear PCM audio file of "What's up" to be recognized. static const std::string RECOGNIZE_WHATS_UP_AUDIO_FILE_NAME = "/recognize_whats_up_test.wav"; // This is a 16 bit 16 kHz little endian linear PCM audio file of "Volume up" to be recognized. static const std::string RECOGNIZE_VOLUME_UP_AUDIO_FILE_NAME = "/recognize_volume_up_test.wav"; // String to be used as a basic DialogRequestID. #define FIRST_DIALOG_REQUEST_ID "DialogRequestID123" // String to be used as a DialogRequestID when the first has already been used. #define SECOND_DIALOG_REQUEST_ID "DialogRequestID456" // This string specifies a Recognize event using the CLOSE_TALK profile and uses the first DialogRequestID. static const std::string CT_FIRST_RECOGNIZE_EVENT_JSON = RECOGNIZE_EVENT_JSON(CLOSE_TALK, FIRST_DIALOG_REQUEST_ID); // This string specifies a Recognize event using the CLOSE_TALK profile and uses the first DialogRequestID. static const std::string CT_FIRST_RECOGNIZE_EVENT_JSON_NEAR = RECOGNIZE_EVENT_JSON(NEAR_FIELD, FIRST_DIALOG_REQUEST_ID); // This string specifies a Recognize event using the CLOSE_TALK profile and uses the second DialogRequestID. static const std::string CT_SECOND_RECOGNIZE_EVENT_JSON = RECOGNIZE_EVENT_JSON(CLOSE_TALK, SECOND_DIALOG_REQUEST_ID); // This string to be used for Speak Directives which use the NAMESPACE_SPEECH_SYNTHESIZER namespace. static const std::string NAME_SPEAK = "Speak"; // This string to be used for Speak Directives which use the NAMESPACE_SPEECH_SYNTHESIZER namespace. static const std::string NAME_RECOGNIZE = "Recognize"; // This string to be used for AdjustVolume Directives which use the NAMESPACE_SPEAKER namespace. static const std::string NAME_ADJUST_VOLUME = "AdjustVolume"; // This string to be used for ExpectSpeech Directives which use the NAMESPACE_SPEECH_RECOGNIZER namespace. static const std::string NAME_EXPECT_SPEECH = "ExpectSpeech"; // This string to be used for SetMute Directives which use the NAMESPACE_SPEAKER namespace. static const std::string NAME_SET_MUTE = "SetMute"; // This string to be used for SpeechStarted Directives which use the NAMESPACE_SPEECH_SYNTHESIZER namespace. static const std::string NAME_SPEECH_STARTED = "SpeechStarted"; // This string to be used for SpeechFinished Directives which use the NAMESPACE_SPEECH_SYNTHESIZER namespace. static const std::string NAME_SPEECH_FINISHED = "SpeechFinished"; // This String to be used to register the SpeechRecognizer namespace to a DirectiveHandler. static const std::string NAMESPACE_SPEECH_RECOGNIZER = "SpeechRecognizer"; // This String to be used to register the SpeechSynthesizer namespace to a DirectiveHandler. static const std::string NAMESPACE_SPEECH_SYNTHESIZER = "SpeechSynthesizer"; // This String to be used to register the Speaker namespace to a DirectiveHandler. static const std::string NAMESPACE_SPEAKER = "Speaker"; // This pair connects a Speak name and SpeechSynthesizer namespace for use in DirectiveHandler registration. static const NamespaceAndName SPEAK_PAIR = {NAMESPACE_SPEECH_SYNTHESIZER, NAME_SPEAK}; // This pair connects a ExpectSpeech name and SpeechRecognizer namespace for use in DirectiveHandler registration. static const NamespaceAndName EXPECT_SPEECH_PAIR = {NAMESPACE_SPEECH_RECOGNIZER, NAME_EXPECT_SPEECH}; // This pair connects a SetMute name and Speaker namespace for use in DirectiveHandler registration. static const NamespaceAndName SET_MUTE_PAIR = {NAMESPACE_SPEAKER, NAME_SET_MUTE}; // This pair connects a SetMute name and Speaker namespace for use in DirectiveHandler registration. static const NamespaceAndName ADJUST_VOLUME_PAIR = {NAMESPACE_SPEAKER, NAME_ADJUST_VOLUME}; // Sample dialog activity id. static const std::string DIALOG_ACTIVITY_ID = "Dialog"; /// Sample alerts activity id. static const std::string ALERTS_ACTIVITY_ID = "Alerts"; // A timeout in seconds for a directive to reach the DirectiveHandler. static const std::chrono::seconds WAIT_FOR_TIMEOUT_DURATION(15); // A timeout in seconds for an event to be sent to @c AVS. static const std::chrono::seconds SEND_EVENT_TIMEOUT_DURATION(20); // A timeout in seconds for a directive to reach the DirectiveHandler. static const std::chrono::seconds DIRECTIVE_TIMEOUT_DURATION(7); // Timeout to use when expecting o reach the timeout. static const std::chrono::seconds WANTING_TIMEOUT_DURATION(1); // A timeout in seconds for the Media Player to finish playing. static const std::chrono::seconds WAIT_FOR_MEDIA_PLAYER_TIMEOUT_DURATION(60); // This Integer to be used to specify number of Speak Directives to validate in test handleMultipleConsecutiveSpeaks. // Although we anticipate four Speak Directives, we validate only three Speak Directives. // Validating three Speak Directives helps keep the test short. static const unsigned int NUMBER_OF_SPEAK_DIRECTIVES_TO_VALIDATE = 3; /// JSON key to get the event object of a message. static const std::string JSON_MESSAGE_EVENT_KEY = "event"; /// JSON key to get the directive object of a message. static const std::string JSON_MESSAGE_DIRECTIVE_KEY = "directive"; /// JSON key to get the header object of a message. static const std::string JSON_MESSAGE_HEADER_KEY = "header"; /// JSON key to get the namespace value of a header. static const std::string JSON_MESSAGE_NAMESPACE_KEY = "namespace"; /// JSON key to get the name value of a header. static const std::string JSON_MESSAGE_NAME_KEY = "name"; /// JSON key to get the messageId value of a header. static const std::string JSON_MESSAGE_MESSAGE_ID_KEY = "messageId"; /// JSON key to get the dialogRequestId value of a header. static const std::string JSON_MESSAGE_DIALOG_REQUEST_ID_KEY = "dialogRequestId"; /// JSON key to get the payload object of a message. static const std::string JSON_MESSAGE_PAYLOAD_KEY = "payload"; /// String to identify log entries originating from this file. static const std::string TAG("SpeechSynthesizerIntegrationTest"); /** * Create a LogEntry using this file's TAG and the specified event string. * * @param The event string for this @c LogEntry. */ #define LX(event) alexaClientSDK::avsCommon::utils::logger::LogEntry(TAG, event) /// Path to the AlexaClientSDKConfig.json file (from command line arguments). static std::string g_configPath; /// Path to resources (e.g. audio files) for tests (from command line arguments). static std::string g_inputPath; /// A test observer that mocks out the ChannelObserverInterface##onFocusChanged() call. class TestClient : public ChannelObserverInterface { public: /** * Constructor. */ TestClient() : m_focusState(FocusState::NONE) { } /** * Implementation of the ChannelObserverInterface##onFocusChanged() callback. * * @param focusState The new focus state of the Channel observer. */ void onFocusChanged(FocusState focusState) override { std::unique_lock lock(m_mutex); m_queue.push_back(focusState); m_focusState = focusState; m_wakeTrigger.notify_all(); } /** * Waits for the ChannelObserverInterface##onFocusChanged() callback. * * @param timeout The amount of time to wait for the callback. * @param focusChanged An output parameter that notifies the caller whether a callback occurred. * @return Returns @c true if the callback occured within the timeout period and @c false otherwise. */ FocusState waitForFocusChange(std::chrono::milliseconds timeout) { FocusState ret; std::unique_lock lock(m_mutex); if (!m_wakeTrigger.wait_for(lock, timeout, [this]() { return !m_queue.empty(); })) { ret = m_focusState; return ret; } ret = m_queue.front(); m_queue.pop_front(); return ret; } FocusState getCurrentFocusState() { return m_focusState; } private: /// The focus state of the observer. FocusState m_focusState; /// A lock to guard against focus state changes. std::mutex m_mutex; /// Trigger to wake up waitForNext calls. std::condition_variable m_wakeTrigger; /// Queue of received focus states that have not been waited on. std::deque m_queue; }; class SpeechSynthesizerTest : public ::testing::Test { protected: virtual void SetUp() override { m_context = ACLTestContext::create(g_configPath); ASSERT_TRUE(m_context); m_exceptionEncounteredSender = std::make_shared(); m_dialogUXStateAggregator = std::make_shared(); DirectiveHandlerConfiguration config; config[SET_MUTE_PAIR] = BlockingPolicy::BLOCKING; config[ADJUST_VOLUME_PAIR] = BlockingPolicy::BLOCKING; config[EXPECT_SPEECH_PAIR] = BlockingPolicy::BLOCKING; m_directiveHandler = std::make_shared(config); m_directiveSequencer = DirectiveSequencer::create(m_exceptionEncounteredSender); m_messageInterpreter = std::make_shared( m_exceptionEncounteredSender, m_directiveSequencer, m_context->getAttachmentManager()); // Set up connection and connect m_avsConnectionManager = std::make_shared( m_context->getMessageRouter(), false, m_context->getConnectionStatusObserver(), m_messageInterpreter); ASSERT_TRUE(m_avsConnectionManager); connect(); m_focusManager = std::make_shared(FocusManager::getDefaultAudioChannels()); m_testClient = std::make_shared(); ASSERT_TRUE( m_focusManager->acquireChannel(FocusManager::ALERTS_CHANNEL_NAME, m_testClient, ALERTS_ACTIVITY_ID)); ASSERT_EQ(m_testClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION), FocusState::FOREGROUND); #ifdef GSTREAMER_MEDIA_PLAYER m_mediaPlayer = MediaPlayer::create(std::make_shared()); #else m_mediaPlayer = std::make_shared(); #endif // Create and register the SpeechSynthesizer. m_speechSynthesizer = SpeechSynthesizer::create( m_mediaPlayer, m_avsConnectionManager, m_focusManager, m_context->getContextManager(), m_exceptionEncounteredSender, m_dialogUXStateAggregator); m_directiveSequencer->addDirectiveHandler(m_speechSynthesizer); m_speechSynthesizerObserver = std::make_shared(); m_speechSynthesizer->addObserver(m_speechSynthesizerObserver); m_speechSynthesizer->addObserver(m_dialogUXStateAggregator); ASSERT_TRUE(m_directiveSequencer->addDirectiveHandler(m_directiveHandler)); } /** * Send and event to AVS. Blocks until a status is received. * * @param message The message to send. * @param expectStatus The status to expect from the call to send the message. * @param timeout How long to wait for a result from delivering the message. */ void sendEvent( const std::string& jsonContent, std::shared_ptr attachmentReader, avsCommon::sdkInterfaces::MessageRequestObserverInterface::Status expectedStatus, std::chrono::seconds timeout) { auto messageRequest = std::make_shared(jsonContent, attachmentReader); m_avsConnectionManager->sendMessage(messageRequest); ASSERT_TRUE(messageRequest->waitFor(expectedStatus, timeout)); } /** * Function to setup a message and send it to AVS. * * @param json A JSON string containing the message to send. * @param expectStatus The status to expect from the call to send the message. * @param timeout How long to wait for a result from delivering the message. */ void setupMessageAndSend( const std::string& json, avsCommon::sdkInterfaces::MessageRequestObserverInterface::Status expectedStatus, std::chrono::seconds timeout) { sendEvent(json, nullptr, expectedStatus, timeout); } /** * Function to setup a message with an attachment and send it to AVS. * * @param json A JSON string containing the message to send. * @param file Name of the file to read the attachment from. * @param expectStatus The status to expect from the call to send the message. * @param timeout How long to wait for a result from delivering the message. */ void setupMessageWithAttachmentAndSend( const std::string& json, std::string& file, avsCommon::sdkInterfaces::MessageRequestObserverInterface::Status expectedStatus, std::chrono::seconds timeout) { auto is = std::make_shared(file, std::ios::binary); ASSERT_TRUE(is->is_open()); const int mbBytes = 1024 * 1024; std::vector localBuffer(mbBytes); auto bufferSize = InProcessSDS::calculateBufferSize(localBuffer.size()); auto buffer = std::make_shared(bufferSize); std::shared_ptr sds = InProcessSDS::create(buffer); auto attachmentWriter = InProcessAttachmentWriter::create(sds); while (*is) { is->read(localBuffer.data(), mbBytes); size_t numBytesRead = is->gcount(); AttachmentWriter::WriteStatus writeStatus = AttachmentWriter::WriteStatus::OK; attachmentWriter->write(localBuffer.data(), numBytesRead, &writeStatus); // write status should be either OK or CLOSED bool writeStatusOk = (AttachmentWriter::WriteStatus::OK == writeStatus || AttachmentWriter::WriteStatus::CLOSED == writeStatus); ASSERT_TRUE(writeStatusOk); } attachmentWriter->close(); std::shared_ptr attachmentReader = InProcessAttachmentReader::create(ReaderPolicy::NONBLOCKING, sds); ASSERT_NE(attachmentReader, nullptr); sendEvent(json, attachmentReader, expectedStatus, std::chrono::seconds(timeout)); } /** * Waits for the ChannelObserverInterface##onFocusChanged() callback. * * @param timeout The amount of time to wait for the callback. * @param focusChanged An output parameter that notifies the caller whether a callback occurred. * @return Returns @c true if the callback occured within the timeout period and @c false otherwise. */ FocusState waitForFocusChange(std::chrono::milliseconds timeout) { std::unique_lock lock(m_mutex); m_focusChanged.wait_for(lock, timeout, [this]() { return m_focusChangeOccurred; }); return m_focusState; } void TearDown() override { disconnect(); // Note that these nullptr checks are needed to avoid segaults if @c SetUp() failed. if (m_speechSynthesizer) { // m_speechSynthesizer->removeObserver(m_dialogUXStateAggregator); m_speechSynthesizer->shutdown(); } if (m_directiveSequencer) { m_directiveSequencer->shutdown(); } if (m_avsConnectionManager) { m_avsConnectionManager->shutdown(); } #ifdef GSTREAMER_MEDIA_PLAYER if (m_mediaPlayer) { m_mediaPlayer->shutdown(); } #endif m_context.reset(); } /** * Connect to AVS. */ void connect() { m_avsConnectionManager->enable(); m_context->waitForConnected(); } /** * Disconnect from AVS. */ void disconnect() { if (m_avsConnectionManager) { m_avsConnectionManager->disable(); m_context->waitForDisconnected(); } } bool checkSentEventName(TestMessageSender::SendParams sendParams, std::string expectedName) { if (TestMessageSender::SendParams::Type::SEND == sendParams.type) { std::string eventString; std::string eventHeader; std::string eventName; jsonUtils::retrieveValue(sendParams.request->getJsonContent(), JSON_MESSAGE_EVENT_KEY, &eventString); jsonUtils::retrieveValue(eventString, JSON_MESSAGE_HEADER_KEY, &eventHeader); jsonUtils::retrieveValue(eventHeader, JSON_MESSAGE_NAME_KEY, &eventName); return eventName == expectedName; } return false; } /// Context for running ACL based tests. std::unique_ptr m_context; std::shared_ptr m_avsConnectionManager; std::shared_ptr m_exceptionEncounteredSender; std::shared_ptr m_directiveHandler; std::shared_ptr m_directiveSequencer; std::shared_ptr m_messageInterpreter; std::shared_ptr m_speechSynthesizerObserver; std::shared_ptr m_speechSynthesizer; std::shared_ptr m_dialogUXStateAggregator; std::shared_ptr m_focusManager; std::shared_ptr m_testClient; FocusState m_focusState; std::mutex m_mutex; std::condition_variable m_focusChanged; bool m_focusChangeOccurred; #ifdef GSTREAMER_MEDIA_PLAYER std::shared_ptr m_mediaPlayer; #else std::shared_ptr m_mediaPlayer; #endif }; /** * Test ability for the SpeechSynthesizer to handle one Speak directive. * * This test is intended to test the SpeechSynthesizer's ability to receive one directive, play it using a MediaPlayer * then return to a finished state. * */ TEST_F(SpeechSynthesizerTest, handleOneSpeech) { // SpeechSynthesizerObserverInterface defaults to a FINISHED state. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // Send audio of "Joke" that will prompt SetMute and Speak. m_directiveSequencer->setDialogRequestId(FIRST_DIALOG_REQUEST_ID); std::string file = g_inputPath + RECOGNIZE_JOKE_AUDIO_FILE_NAME; setupMessageWithAttachmentAndSend( CT_FIRST_RECOGNIZE_EVENT_JSON, file, avsCommon::sdkInterfaces::MessageRequestObserverInterface::Status::SUCCESS, SEND_EVENT_TIMEOUT_DURATION); TestMessageSender::SendParams sendRecognizeParams = m_avsConnectionManager->waitForNext(DIRECTIVE_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(sendRecognizeParams, NAME_RECOGNIZE)); // Wait for the directive to route through to our handler. TestDirectiveHandler::DirectiveParams params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); ASSERT_EQ(params.type, TestDirectiveHandler::DirectiveParams::Type::PREHANDLE); params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); ASSERT_EQ(params.type, TestDirectiveHandler::DirectiveParams::Type::HANDLE); // Unblock the queue so SpeechSynthesizer can do its work. params.result->setCompleted(); // SpeechSynthesizer is now playing. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::GAINING_FOCUS); ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::PLAYING); // Check that SS grabs the channel focus by seeing that the test client has been backgrounded. ASSERT_EQ(m_testClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION), FocusState::BACKGROUND); // SpeechStarted was sent. TestMessageSender::SendParams sendStartedParams = m_avsConnectionManager->waitForNext(DIRECTIVE_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(sendStartedParams, NAME_SPEECH_STARTED)); // Media Player has finished. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // SpeechFinished is sent here. TestMessageSender::SendParams sendFinishedParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(sendFinishedParams, NAME_SPEECH_FINISHED)); // Alerts channel regains the foreground. ASSERT_EQ(m_testClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION), FocusState::FOREGROUND); } /** * Test ability for the SpeechSynthesizer to handle multiple consecutive Speak directives. * * This test is intended to test the SpeechSynthesizer's ability to receive multiple directives, play each using a * MediaPlayer then return to a finished state and notify the DirectiveSequencer that the directive was handled. This is * done by sending a Recognize event to AVS with audio of "What's up?" which returns four sets of SetMute and Speak. * */ TEST_F(SpeechSynthesizerTest, handleMultipleConsecutiveSpeaks) { // SpeechSynthesizerObserverInterface defaults to a FINISHED state. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // Send audio of "What's up" that will prompt four sets of SetMute and Speak. m_directiveSequencer->setDialogRequestId(FIRST_DIALOG_REQUEST_ID); std::string file = g_inputPath + RECOGNIZE_WHATS_UP_AUDIO_FILE_NAME; setupMessageWithAttachmentAndSend( CT_FIRST_RECOGNIZE_EVENT_JSON, file, avsCommon::sdkInterfaces::MessageRequestObserverInterface::Status::SUCCESS, SEND_EVENT_TIMEOUT_DURATION); TestMessageSender::SendParams sendRecognizeParams = m_avsConnectionManager->waitForNext(DIRECTIVE_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(sendRecognizeParams, NAME_RECOGNIZE)); for (unsigned int x = 0; x < NUMBER_OF_SPEAK_DIRECTIVES_TO_VALIDATE; ++x) { // Each iteration, remove the blocking setMute directive. TestDirectiveHandler::DirectiveParams params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); while (params.type != TestDirectiveHandler::DirectiveParams::Type::HANDLE) { ASSERT_NE(params.type, TestDirectiveHandler::DirectiveParams::Type::TIMEOUT); params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); } params.result->setCompleted(); // SpeechSynthesizer is now speaking. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::GAINING_FOCUS); ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::PLAYING); // Check that SS grabs the channel focus by seeing that the test client has been backgrounded. ASSERT_EQ(m_testClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION), FocusState::BACKGROUND); // SpeechStarted has sent. TestMessageSender::SendParams sendStartedParams = m_avsConnectionManager->waitForNext(DIRECTIVE_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(sendStartedParams, NAME_SPEECH_STARTED)); // Media Player has finished. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_MEDIA_PLAYER_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // SpeechFinished was sent. TestMessageSender::SendParams sendFinishedParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(sendFinishedParams, NAME_SPEECH_FINISHED)); // Alerts channel regains the foreground. ASSERT_EQ(m_testClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION), FocusState::FOREGROUND); } } /** * Test ability for the SpeechSynthesizer to handle one Speak directive. * * This test is intended to test the SpeechSynthesizer's ability to receive one directive, play it using a MediaPlayer * then return to a finished state. Once the Speak reaches the SpeechSynthesizer, the dialogRequestID is changed and * all directives are cancelled. * */ TEST_F(SpeechSynthesizerTest, bargeInOnOneSpeech) { // SpeechSynthesizerObserverInterface defaults to a FINISHED state. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // Send audio of "Joke" that will prompt SetMute and Speak. m_directiveSequencer->setDialogRequestId(FIRST_DIALOG_REQUEST_ID); std::string file = g_inputPath + RECOGNIZE_JOKE_AUDIO_FILE_NAME; setupMessageWithAttachmentAndSend( CT_FIRST_RECOGNIZE_EVENT_JSON, file, avsCommon::sdkInterfaces::MessageRequestObserverInterface::Status::SUCCESS, SEND_EVENT_TIMEOUT_DURATION); TestMessageSender::SendParams sendRecognizeParams = m_avsConnectionManager->waitForNext(DIRECTIVE_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(sendRecognizeParams, NAME_RECOGNIZE)); // Wait for the directive to route through to our handler. TestDirectiveHandler::DirectiveParams params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); ASSERT_EQ(params.type, TestDirectiveHandler::DirectiveParams::Type::PREHANDLE); params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); ASSERT_EQ(params.type, TestDirectiveHandler::DirectiveParams::Type::HANDLE); // Unblock the queue so SpeechSynthesizer can do its work. params.result->setCompleted(); // SpeechSynthesizer is now speaking. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::GAINING_FOCUS); ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::PLAYING); // Change the dialogRequestID to cancel the queued directives. m_directiveSequencer->setDialogRequestId(SECOND_DIALOG_REQUEST_ID); // Check that SS grabs the channel focus by seeing that the test client has been backgrounded. ASSERT_EQ(m_testClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION), FocusState::BACKGROUND); // SpeechStarted was sent. TestMessageSender::SendParams sendStartedParams = m_avsConnectionManager->waitForNext(DIRECTIVE_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(sendStartedParams, NAME_SPEECH_STARTED)); // SpeechSynthesizer has finished. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // No SpeechFinished was sent. TestMessageSender::SendParams sendFinishedParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION); ASSERT_FALSE(checkSentEventName(sendFinishedParams, NAME_SPEECH_FINISHED)); // Alerts channel regains the foreground. ASSERT_EQ(m_testClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION), FocusState::FOREGROUND); } /** * Test ability for the SpeechSynthesizer to handle a barge in at the begining of consucutive speaks. * * This test is intended to test the SpeechSynthesizer's ability to receive multiple directives, play each using a * MediaPlayer then return to a finished state and notify the DirectiveSequencer that the directive was handled. This is * done by sending a Recognize event to AVS with audio of "What's up?" which returns four sets of SetMute and Speak. * Once the first Speak reaches the SpeechSynthesizer, the dialogRequestID is changed and all directives are cancelled. * */ TEST_F(SpeechSynthesizerTest, bargeInOnMultipleSpeaksAtTheBeginning) { // SpeechSynthesizerObserverInterface defaults to a FINISHED state. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // Send audio of "What's Up" that will prompt four sets of SetMute and Speak. m_directiveSequencer->setDialogRequestId(FIRST_DIALOG_REQUEST_ID); std::string file = g_inputPath + RECOGNIZE_WHATS_UP_AUDIO_FILE_NAME; setupMessageWithAttachmentAndSend( CT_FIRST_RECOGNIZE_EVENT_JSON, file, avsCommon::sdkInterfaces::MessageRequestObserverInterface::Status::SUCCESS, SEND_EVENT_TIMEOUT_DURATION); TestMessageSender::SendParams sendRecognizeParams = m_avsConnectionManager->waitForNext(DIRECTIVE_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(sendRecognizeParams, NAME_RECOGNIZE)); // Wait for the directive to route through to our handler. TestDirectiveHandler::DirectiveParams params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); ASSERT_NE(params.type, TestDirectiveHandler::DirectiveParams::Type::TIMEOUT); // While blocked, change the dialogRequestID. m_directiveSequencer->setDialogRequestId(SECOND_DIALOG_REQUEST_ID); while (params.type != TestDirectiveHandler::DirectiveParams::Type::TIMEOUT) { if (params.type == TestDirectiveHandler::DirectiveParams::Type::HANDLE) { params.result->setCompleted(); } params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); } // SpeechSynthesizer is still finished. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WANTING_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // Check that the test client is still in the foreground. ASSERT_EQ(m_testClient->waitForFocusChange(WANTING_TIMEOUT_DURATION), FocusState::FOREGROUND); // SpeechStarted is not sent. TestMessageSender::SendParams canceledSendStartedParams = m_avsConnectionManager->waitForNext(WANTING_TIMEOUT_DURATION); ASSERT_FALSE(checkSentEventName(canceledSendStartedParams, NAME_SPEECH_STARTED)); // Media Player has not changed. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WANTING_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // SpeechFinished is not sent. TestMessageSender::SendParams canceledSendFinishedParams = m_avsConnectionManager->waitForNext(WANTING_TIMEOUT_DURATION); ASSERT_FALSE(checkSentEventName(canceledSendFinishedParams, NAME_SPEECH_FINISHED)); // Alerts channel regains the foreground. ASSERT_EQ(m_testClient->waitForFocusChange(WANTING_TIMEOUT_DURATION), FocusState::FOREGROUND); } /** * Test ability for the SpeechSynthesizer to handle a barge in in the middle of consucutive speaks. * * This test is intended to test the SpeechSynthesizer's ability to receive multiple directives, play each using a * MediaPlayer then return to a finished state and notify the DirectiveSequencer that the directive was handled. This is * done by sending a Recognize event to AVS with audio of "What's up?" which returns four sets of SetMute and Speak. * While the Speak directives are being handled, the dialogRequestID is changed and all directives are cancelled. * */ TEST_F(SpeechSynthesizerTest, bargeInOnMultipleSpeaksInTheMiddle) { // SpeechSynthesizerObserverInterface defaults to a FINISHED state. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // Send audio of "What's up" that will prompt four sets of SetMute and Speak. m_directiveSequencer->setDialogRequestId(FIRST_DIALOG_REQUEST_ID); std::string file = g_inputPath + RECOGNIZE_WHATS_UP_AUDIO_FILE_NAME; setupMessageWithAttachmentAndSend( CT_FIRST_RECOGNIZE_EVENT_JSON, file, avsCommon::sdkInterfaces::MessageRequestObserverInterface::Status::SUCCESS, SEND_EVENT_TIMEOUT_DURATION); TestMessageSender::SendParams sendRecognizeParams = m_avsConnectionManager->waitForNext(DIRECTIVE_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(sendRecognizeParams, NAME_RECOGNIZE)); // Wait for the directive to route through to our handler. TestDirectiveHandler::DirectiveParams params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); bool handleFound = false; while (params.type != TestDirectiveHandler::DirectiveParams::Type::TIMEOUT && !handleFound) { if (params.type == TestDirectiveHandler::DirectiveParams::Type::HANDLE) { handleFound = true; } else { params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); } } ASSERT_TRUE(handleFound); // Unblock the queue so SS can do its work. params.result->setCompleted(); // SpeechSynthesizer is now speaking. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::GAINING_FOCUS); ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::PLAYING); // Check that SS grabs the channel focus by seeing that the test client has been backgrounded. ASSERT_EQ(m_testClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION), FocusState::BACKGROUND); // SpeechStarted is sent. TestMessageSender::SendParams sendStartedParams = m_avsConnectionManager->waitForNext(DIRECTIVE_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(sendStartedParams, NAME_SPEECH_STARTED)); // Media Player has finished. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // SpeechFinished is sent here. TestMessageSender::SendParams sendFinishedParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(sendFinishedParams, NAME_SPEECH_FINISHED)); // Alerts channel regains the foreground. ASSERT_EQ(m_testClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION), FocusState::FOREGROUND); // While blocked, change the dialogRequestID to cancel the queued directives. m_directiveSequencer->setDialogRequestId(SECOND_DIALOG_REQUEST_ID); while (params.type != TestDirectiveHandler::DirectiveParams::Type::TIMEOUT) { if (params.type == TestDirectiveHandler::DirectiveParams::Type::HANDLE) { // Unblock the queue so SS can do its work. params.result->setCompleted(); } params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); } // SpeechSynthesizer is still finished. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WANTING_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // Check that the test client is still in the foreground. ASSERT_EQ(m_testClient->waitForFocusChange(WANTING_TIMEOUT_DURATION), FocusState::FOREGROUND); // SpeechStarted is not sent. TestMessageSender::SendParams canceledSendStartedParams = m_avsConnectionManager->waitForNext(WANTING_TIMEOUT_DURATION); ASSERT_FALSE(checkSentEventName(canceledSendStartedParams, NAME_SPEECH_STARTED)); // Media Player has not changed. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WANTING_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // SpeechFinished is not sent. TestMessageSender::SendParams canceledSendFinishedParams = m_avsConnectionManager->waitForNext(WANTING_TIMEOUT_DURATION); ASSERT_FALSE(checkSentEventName(canceledSendFinishedParams, NAME_SPEECH_FINISHED)); // Alerts channel regains the foreground. ASSERT_EQ(m_testClient->waitForFocusChange(WANTING_TIMEOUT_DURATION), FocusState::FOREGROUND); } /** * Test ability for the SpeechSynthesizer to handle a Multiturn scenario. * * This test is intended to test the SpeechSynthesizer's ability to receive one directive, play it using a MediaPlayer * then return to a finished state. Another recognize event is then sent to AVS is response to the ExpectSpeech * directive which prompts another Speak directive to be handled. */ TEST_F(SpeechSynthesizerTest, multiturnScenario) { // SpeechSynthesizerObserverInterface defaults to a FINISHED state. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // Send audio of "Wikipedia" that will prompt SetMute, Speak, and ExpectSpeech. m_directiveSequencer->setDialogRequestId(FIRST_DIALOG_REQUEST_ID); std::string file = g_inputPath + RECOGNIZE_WIKI_AUDIO_FILE_NAME; setupMessageWithAttachmentAndSend( CT_FIRST_RECOGNIZE_EVENT_JSON, file, avsCommon::sdkInterfaces::MessageRequestObserverInterface::Status::SUCCESS, SEND_EVENT_TIMEOUT_DURATION); TestMessageSender::SendParams sendRecognizeParams = m_avsConnectionManager->waitForNext(DIRECTIVE_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(sendRecognizeParams, NAME_RECOGNIZE)); // Wait for the directive to route through to our handler. TestDirectiveHandler::DirectiveParams params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); ASSERT_NE(params.type, TestDirectiveHandler::DirectiveParams::Type::TIMEOUT); while (params.type == TestDirectiveHandler::DirectiveParams::Type::HANDLE) { params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); } params.result->setCompleted(); // SpeechSynthesizer is now speaking. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::GAINING_FOCUS); ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::PLAYING); // Check that SS grabs the channel focus by seeing that the test client has been backgrounded. ASSERT_EQ(m_testClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION), FocusState::BACKGROUND); // SpeechStarted is sent. TestMessageSender::SendParams sendStartedParams = m_avsConnectionManager->waitForNext(DIRECTIVE_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(sendStartedParams, NAME_SPEECH_STARTED)); // Media Player has finished. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // SpeechFinished is sent here. TestMessageSender::SendParams sendFinishedParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(sendFinishedParams, NAME_SPEECH_FINISHED)); // Alerts channel regains the foreground. ASSERT_EQ(m_testClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION), FocusState::FOREGROUND); params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); bool expectSpeechFound = false; while (params.type != TestDirectiveHandler::DirectiveParams::Type::TIMEOUT) { if (params.type == TestDirectiveHandler::DirectiveParams::Type::HANDLE) { if (params.directive->getName() == NAME_EXPECT_SPEECH) { expectSpeechFound = true; } // Unblock the queue so SS can do its work. params.result->setCompleted(); } params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); } ASSERT_TRUE(expectSpeechFound); // Clear out remaining directives. m_directiveSequencer->setDialogRequestId(FIRST_DIALOG_REQUEST_ID); std::string secondFile = g_inputPath + RECOGNIZE_LIONS_AUDIO_FILE_NAME; setupMessageWithAttachmentAndSend( CT_FIRST_RECOGNIZE_EVENT_JSON, secondFile, avsCommon::sdkInterfaces::MessageRequestObserverInterface::Status::SUCCESS, SEND_EVENT_TIMEOUT_DURATION); TestMessageSender::SendParams secondSendRecognizeParams = m_avsConnectionManager->waitForNext(DIRECTIVE_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(secondSendRecognizeParams, NAME_RECOGNIZE)); params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); while (params.type != TestDirectiveHandler::DirectiveParams::Type::TIMEOUT) { if (params.type == TestDirectiveHandler::DirectiveParams::Type::HANDLE) { // Unblock the queue so SS can do its work. params.result->setCompleted(); } params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); } // SpeechSynthesizer is now speaking. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::GAINING_FOCUS); ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::PLAYING); // Check that SS grabs the channel focus by seeing that the test client has been backgrounded. ASSERT_EQ(m_testClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION), FocusState::BACKGROUND); // SpeechStarted is sent. TestMessageSender::SendParams secondSendStartedParams = m_avsConnectionManager->waitForNext(DIRECTIVE_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(secondSendStartedParams, NAME_SPEECH_STARTED)); // Media Player has finished. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // SpeechFinished is sent here. TestMessageSender::SendParams secondSendFinishedParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(secondSendFinishedParams, NAME_SPEECH_FINISHED)); // Alerts channel regains the foreground. ASSERT_EQ(m_testClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION), FocusState::FOREGROUND); } /** * Test ability for the SpeechSynthesizer to handle no directives. * * This test is intended to test the SpeechSynthesizer's ability to do nothing when there are no Speak directives. A * Recognize event with audio of "Volume up" is sent to AVS to prompt a AdjustVolume directive but no Speak directives. */ TEST_F(SpeechSynthesizerTest, handleNoSpeakDirectives) { // SpeechSynthesizerObserverInterface defaults to a FINISHED state. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // Send audio of "Volume up" that will prompt an adjustVolume directive. m_directiveSequencer->setDialogRequestId(FIRST_DIALOG_REQUEST_ID); std::string file = g_inputPath + RECOGNIZE_VOLUME_UP_AUDIO_FILE_NAME; setupMessageWithAttachmentAndSend( CT_FIRST_RECOGNIZE_EVENT_JSON, file, avsCommon::sdkInterfaces::MessageRequestObserverInterface::Status::SUCCESS, SEND_EVENT_TIMEOUT_DURATION); TestMessageSender::SendParams sendRecognizeParams = m_avsConnectionManager->waitForNext(DIRECTIVE_TIMEOUT_DURATION); ASSERT_TRUE(checkSentEventName(sendRecognizeParams, NAME_RECOGNIZE)); // Wait for the directive to route through to our handler. TestDirectiveHandler::DirectiveParams params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); ASSERT_EQ(params.type, TestDirectiveHandler::DirectiveParams::Type::PREHANDLE); params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION); ASSERT_EQ(params.type, TestDirectiveHandler::DirectiveParams::Type::HANDLE); ASSERT_EQ(params.directive->getName(), NAME_ADJUST_VOLUME); // Unblock the queue so SS can do its work. params.result->setCompleted(); // SpeechSynthesizer just defaults to Playing state. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WANTING_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // Check that the test client is still in the foreground. ASSERT_EQ(m_testClient->waitForFocusChange(WANTING_TIMEOUT_DURATION), FocusState::FOREGROUND); // SpeechStarted is not sent. TestMessageSender::SendParams sendStartedParams = m_avsConnectionManager->waitForNext(WANTING_TIMEOUT_DURATION); ASSERT_FALSE(checkSentEventName(sendStartedParams, NAME_SPEECH_STARTED)); // Media Player has not changed. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WANTING_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // SpeechFinished is not sent. TestMessageSender::SendParams sendFinishedParams = m_avsConnectionManager->waitForNext(WANTING_TIMEOUT_DURATION); ASSERT_FALSE(checkSentEventName(sendFinishedParams, NAME_SPEECH_FINISHED)); // Alerts channel regains the foreground. ASSERT_EQ(m_testClient->waitForFocusChange(WANTING_TIMEOUT_DURATION), FocusState::FOREGROUND); } /** * Test ability for the test setup to handle no directives. * * This test is intended to test the SpeechSynthesizer's ability to do nothing when there are no Speak directives. No * Recognize events are sent to trigger any directives. */ TEST_F(SpeechSynthesizerTest, handleNoDirectives) { // SpeechSynthesizerObserverInterface defaults to a FINISHED state. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); TestMessageSender::SendParams sendRecognizeParams = m_avsConnectionManager->waitForNext(WANTING_TIMEOUT_DURATION); ASSERT_FALSE(checkSentEventName(sendRecognizeParams, NAME_RECOGNIZE)); // Wait for the directive to route through to our handler. TestDirectiveHandler::DirectiveParams params = m_directiveHandler->waitForNext(WANTING_TIMEOUT_DURATION); ASSERT_EQ(params.type, TestDirectiveHandler::DirectiveParams::Type::TIMEOUT); // SpeechSynthesizer defaults to Finished state. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WANTING_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // Check that the test client is still in the foreground. ASSERT_EQ(m_testClient->waitForFocusChange(WANTING_TIMEOUT_DURATION), FocusState::FOREGROUND); // SpeechStarted is not sent. TestMessageSender::SendParams sendStartedParams = m_avsConnectionManager->waitForNext(WANTING_TIMEOUT_DURATION); ASSERT_FALSE(checkSentEventName(sendStartedParams, NAME_SPEECH_STARTED)); // Media Player has not changed. ASSERT_EQ( m_speechSynthesizerObserver->waitForNext(WANTING_TIMEOUT_DURATION), SpeechSynthesizerObserverInterface::SpeechSynthesizerState::FINISHED); // SpeechFinished is not sent. TestMessageSender::SendParams sendFinishedParams = m_avsConnectionManager->waitForNext(WANTING_TIMEOUT_DURATION); ASSERT_FALSE(checkSentEventName(sendFinishedParams, NAME_SPEECH_FINISHED)); // Alerts channel regains the foreground. ASSERT_EQ(m_testClient->waitForFocusChange(WANTING_TIMEOUT_DURATION), FocusState::FOREGROUND); } } // namespace test } // namespace integration } // namespace alexaClientSDK int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); if (argc < 3) { std::cerr << "USAGE: " << std::string(argv[0]) << " " << std::endl; return 1; } else { alexaClientSDK::integration::test::g_configPath = std::string(argv[1]); alexaClientSDK::integration::test::g_inputPath = std::string(argv[2]); return RUN_ALL_TESTS(); } }