Version 1.0.3 alexa-client-sdk
Changes in this update - Implemented `setOffSet` in `MediaPlayer`. - Updated `LoggerUtils.cpp` to address (https://github.com/alexa/avs-device-sdk/issues/77). - Bug fix to address incorrect stop behavior caused when Audio Focus is set to `NONE` and released. This addresses (https://github.com/alexa/avs-device-sdk/issues/129). - Bug fix for intermittent failure in `handleMultipleConsecutiveSpeaks`. - Bug fix for `jsonArrayExist` incorrectly parsing JSON when trying to locate array children. - Bug fix for ADSL test failures with `sendDirectiveWithoutADialogRequestId`. - Bug fix for `SpeechSynthesizer` showing the wrong UX state when a burst of `Speak` directives are received. - Bug fix for recursive loop in `AudioPlayer.Stop`.
This commit is contained in:
parent
6ec9cc785c
commit
4def446a92
|
@ -73,7 +73,7 @@ void DialogUXStateAggregator::onStateChanged(AudioInputProcessorObserverInterfac
|
|||
[this, state] () {
|
||||
switch (state) {
|
||||
case AudioInputProcessorObserverInterface::State::IDLE:
|
||||
if (DialogUXStateObserverInterface::DialogUXState::THINKING == m_currentState) {
|
||||
if (DialogUXStateObserverInterface::DialogUXState::THINKING == m_currentState) {
|
||||
return;
|
||||
}
|
||||
setState(DialogUXStateObserverInterface::DialogUXState::IDLE);
|
||||
|
@ -109,6 +109,9 @@ void DialogUXStateAggregator::onStateChanged(SpeechSynthesizerObserver::SpeechSy
|
|||
if (DialogUXStateObserverInterface::DialogUXState::SPEAKING != m_currentState) {
|
||||
return;
|
||||
}
|
||||
|
||||
m_currentState = DialogUXStateObserverInterface::DialogUXState::FINISHED;
|
||||
|
||||
if (!m_multiturnSpeakingToListeningTimer.start(
|
||||
SHORT_TIMEOUT, std::bind(
|
||||
&DialogUXStateAggregator::transitionFromSpeakingFinished, this)).valid()) {
|
||||
|
@ -161,7 +164,7 @@ void DialogUXStateAggregator::transitionFromThinkingTimedOut() {
|
|||
void DialogUXStateAggregator::transitionFromSpeakingFinished() {
|
||||
m_executor.submit(
|
||||
[this] () {
|
||||
if (DialogUXStateObserverInterface::DialogUXState::SPEAKING == m_currentState) {
|
||||
if (DialogUXStateObserverInterface::DialogUXState::FINISHED == m_currentState) {
|
||||
setState(DialogUXStateObserverInterface::DialogUXState::IDLE);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,7 +44,15 @@ public:
|
|||
THINKING,
|
||||
|
||||
/// Alexa is responding to a request with speech.
|
||||
SPEAKING
|
||||
SPEAKING,
|
||||
|
||||
/**
|
||||
* Alexa has finished processing a SPEAK directive. In this state there
|
||||
* are no notifications triggered. If the SPEAK directive is part of a
|
||||
* speech burst UX moves back to the SPEAKING state. If it was the last
|
||||
* SPEAK directive after timeout the UX state moves to the IDLE state.
|
||||
*/
|
||||
FINISHED
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -79,6 +87,8 @@ inline std::string DialogUXStateObserverInterface::stateToString(DialogUXState s
|
|||
return "THINKING";
|
||||
case DialogUXState::SPEAKING:
|
||||
return "SPEAKING";
|
||||
case DialogUXState::FINISHED:
|
||||
return "FINISHED";
|
||||
}
|
||||
return "Unknown State";
|
||||
}
|
||||
|
|
|
@ -97,7 +97,15 @@ public:
|
|||
std::shared_ptr<std::istream> stream, bool repeat) = 0;
|
||||
|
||||
/**
|
||||
* TODO ACSDK-423: Implement setOffset behavior.
|
||||
* Set the offset for playback. A seek will be performed to the offset at the next @c play() command.
|
||||
*
|
||||
* The following situations will reset the offset:
|
||||
* # A seek attempt is made (ie. via play()).
|
||||
* # A new source is set.
|
||||
*
|
||||
* @param offset The offset in milliseconds to seek to.
|
||||
*
|
||||
* @return @c SUCCESS if the offset was successfully set, and FAILURE for any error.
|
||||
*/
|
||||
virtual MediaPlayerStatus setOffset(std::chrono::milliseconds offset) { return MediaPlayerStatus::FAILURE; }
|
||||
|
||||
|
|
|
@ -199,8 +199,8 @@ bool convertToValue(const rapidjson::Value& documentNode, int64_t* value) {
|
|||
}
|
||||
|
||||
bool jsonArrayExists(const rapidjson::Value & parsedDocument, const std::string & key) {
|
||||
auto iter = parsedDocument.FindMember(key.c_str());
|
||||
if (parsedDocument.MemberEnd() != iter) {
|
||||
auto iter = parsedDocument.FindMember(key);
|
||||
if (parsedDocument.MemberEnd() == iter) {
|
||||
ACSDK_ERROR(LX("lookupArrayExistsFailed").d("reason", "keyNotFound").d("key", key));
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
#include "AVSCommon/Utils/Logger/LoggerUtils.h"
|
||||
#include "AVSCommon/Utils/Logger/Logger.h"
|
||||
|
||||
#include <cstdio>
|
||||
|
||||
namespace alexaClientSDK {
|
||||
namespace avsCommon {
|
||||
namespace utils {
|
||||
|
@ -132,7 +134,7 @@ std::string formatLogString(
|
|||
std::chrono::duration_cast<std::chrono::milliseconds>(time.time_since_epoch()).count() %
|
||||
MILLISECONDS_PER_SECOND);
|
||||
char millisString[MILLIS_STRING_SIZE];
|
||||
if (snprintf(millisString, sizeof(millisString), MILLIS_FORMAT_STRING, timeMillisPart) < 0) {
|
||||
if (std::snprintf(millisString, sizeof(millisString), MILLIS_FORMAT_STRING, timeMillisPart) < 0) {
|
||||
millisecondFailure = true;
|
||||
}
|
||||
|
||||
|
|
13
CHANGELOG.md
13
CHANGELOG.md
|
@ -1,4 +1,17 @@
|
|||
## ChangeLog
|
||||
### [1.0.3] - 2017-09-19
|
||||
* **Enhancements**
|
||||
* Implemented `setOffSet` in `MediaPlayer`.
|
||||
* [Updated `LoggerUtils.cpp`](https://github.com/alexa/avs-device-sdk/issues/77).
|
||||
|
||||
* **Bug Fixes**
|
||||
* [Bug fix to address incorrect stop behavior caused when Audio Focus is set to `NONE` and released](https://github.com/alexa/avs-device-sdk/issues/129).
|
||||
* Bug fix for intermittent failure in `handleMultipleConsecutiveSpeaks`.
|
||||
* Bug fix for `jsonArrayExist` incorrectly parsing JSON when trying to locate array children.
|
||||
* Bug fix for ADSL test failures with `sendDirectiveWithoutADialogRequestId`.
|
||||
* Bug fix for `SpeechSynthesizer` showing the wrong UX state when a burst of `Speak` directives are received.
|
||||
* Bug fix for recursive loop in `AudioPlayer.Stop`.
|
||||
|
||||
### [1.0.2] - 2017-08-23
|
||||
* Removed code from AIP which propagates ExpectSpeech initiator strings to subsequent Recognize events. This code will be re-introduced when AVS starts sending initiator strings.
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
|
||||
|
||||
# Set project information
|
||||
project(AlexaClientSDK VERSION 1.0.2 LANGUAGES CXX)
|
||||
project(AlexaClientSDK VERSION 1.0.3 LANGUAGES CXX)
|
||||
set(PROJECT_BRIEF "A cross-platform, modular SDK for interacting with the Alexa Voice Service")
|
||||
|
||||
include(build/BuildDefaults.cmake)
|
||||
|
|
|
@ -328,8 +328,17 @@ private:
|
|||
/// Send a @c PlaybackMetadataExtracted event.
|
||||
void sendStreamMetadataExtractedEvent();
|
||||
|
||||
/// Get the media player offset.
|
||||
std::chrono::milliseconds getMediaPlayerOffset();
|
||||
/**
|
||||
* Get the current offset in the audio stream.
|
||||
*
|
||||
* @note @c MediaPlayer has a getOffset function which only works while actively playing, but AudioPlayer needs to
|
||||
* be able to report its offset at any time, even when paused or stopped. To address the gap, this function
|
||||
* reports the live offset from @c MediaPlayer when it is playing, and reports a cached offset when
|
||||
* @c MediaPlayer is not playing.
|
||||
*
|
||||
* @return The current offset in the stream.
|
||||
*/
|
||||
std::chrono::milliseconds getOffset();
|
||||
|
||||
/// @}
|
||||
|
||||
|
@ -409,6 +418,13 @@ private:
|
|||
/// This timer is used to send @c ProgressReportIntervalElapsed events.
|
||||
avsCommon::utils::timing::Timer m_intervalTimer;
|
||||
|
||||
/**
|
||||
* This keeps track of the current offset in the audio stream. Reading the offset from @c MediaPlayer is
|
||||
* insufficient because @c MediaPlayer only returns a valid offset when it is actively playing, but @c AudioPlayer
|
||||
* must return a valid offset when @c MediaPlayer is stopped.
|
||||
*/
|
||||
std::chrono::milliseconds m_offset;
|
||||
|
||||
/// @}
|
||||
|
||||
/**
|
||||
|
|
|
@ -260,7 +260,8 @@ AudioPlayer::AudioPlayer(
|
|||
m_playbackFinished{false},
|
||||
m_currentActivity{PlayerActivity::IDLE},
|
||||
m_starting{false},
|
||||
m_focus{FocusState::NONE} {
|
||||
m_focus{FocusState::NONE},
|
||||
m_offset{std::chrono::milliseconds{std::chrono::milliseconds::zero()}} {
|
||||
}
|
||||
|
||||
void AudioPlayer::doShutdown() {
|
||||
|
@ -483,7 +484,7 @@ void AudioPlayer::executeProvideState(bool sendToken, unsigned int stateRequestT
|
|||
state.AddMember(TOKEN_KEY, m_token, state.GetAllocator());
|
||||
state.AddMember(
|
||||
OFFSET_KEY,
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(getMediaPlayerOffset()).count(),
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(getOffset()).count(),
|
||||
state.GetAllocator());
|
||||
state.AddMember(ACTIVITY_KEY, playerActivityToString(m_currentActivity), state.GetAllocator());
|
||||
|
||||
|
@ -506,7 +507,11 @@ void AudioPlayer::executeProvideState(bool sendToken, unsigned int stateRequestT
|
|||
}
|
||||
|
||||
void AudioPlayer::executeOnFocusChanged(FocusState newFocus) {
|
||||
ACSDK_DEBUG9(LX("executeOnFocusChanged").d("from", m_focus).d("to", newFocus));
|
||||
ACSDK_DEBUG9(LX("executeOnFocusChanged")
|
||||
.d("from", m_focus)
|
||||
.d("to", newFocus)
|
||||
.d("m_starting", m_starting)
|
||||
.d("m_currentActivity", m_currentActivity));
|
||||
if (m_focus == newFocus) {
|
||||
return;
|
||||
}
|
||||
|
@ -572,9 +577,21 @@ void AudioPlayer::executeOnFocusChanged(FocusState newFocus) {
|
|||
}
|
||||
break;
|
||||
case FocusState::NONE:
|
||||
if (PlayerActivity::STOPPED == m_currentActivity) {
|
||||
break;
|
||||
switch (m_currentActivity) {
|
||||
case PlayerActivity::IDLE:
|
||||
case PlayerActivity::STOPPED:
|
||||
case PlayerActivity::FINISHED:
|
||||
// Nothing to more to do if we're already not playing; we got here because the act of stopping
|
||||
// caused the channel to be released, which in turn caused this callback.
|
||||
return;
|
||||
case PlayerActivity::PLAYING:
|
||||
case PlayerActivity::PAUSED:
|
||||
case PlayerActivity::BUFFER_UNDERRUN:
|
||||
// If The focus change came in while we were in a 'playing' state, we need to stop because we are
|
||||
// yielding the channel.
|
||||
break;
|
||||
}
|
||||
|
||||
m_audioItems.clear();
|
||||
|
||||
std::unique_lock<std::mutex> lock(m_playbackMutex);
|
||||
|
@ -606,6 +623,7 @@ void AudioPlayer::executeOnPlaybackStarted() {
|
|||
}
|
||||
|
||||
void AudioPlayer::executeOnPlaybackFinished() {
|
||||
ACSDK_DEBUG9(LX("executeOnPlaybackFinished"));
|
||||
if (m_currentActivity != PlayerActivity::PLAYING ) {
|
||||
ACSDK_ERROR(LX("executeOnPlaybackFinishedError")
|
||||
.d("reason", "notPlaying")
|
||||
|
@ -710,6 +728,7 @@ void AudioPlayer::executePlay(PlayBehavior playBehavior, const AudioItem& audioI
|
|||
}
|
||||
|
||||
void AudioPlayer::playNextItem() {
|
||||
ACSDK_DEBUG9(LX("playNextItem").d("m_audioItems.size", m_audioItems.size()));
|
||||
if (m_audioItems.empty()) {
|
||||
sendPlaybackFailedEvent(
|
||||
m_token,
|
||||
|
@ -742,6 +761,7 @@ void AudioPlayer::playNextItem() {
|
|||
return;
|
||||
}
|
||||
|
||||
ACSDK_DEBUG9(LX("playNextItem").d("item.stream.offset", item.stream.offset.count()));
|
||||
if (item.stream.offset.count() && m_mediaPlayer->setOffset(item.stream.offset) == MediaPlayerStatus::FAILURE) {
|
||||
sendPlaybackFailedEvent(
|
||||
m_token,
|
||||
|
@ -776,6 +796,7 @@ void AudioPlayer::playNextItem() {
|
|||
|
||||
void AudioPlayer::executeStop(bool releaseFocus) {
|
||||
ACSDK_DEBUG9(LX("executestop").d("m_currentActivity", m_currentActivity));
|
||||
auto stopStatus = MediaPlayerStatus::SUCCESS;
|
||||
switch (m_currentActivity) {
|
||||
case PlayerActivity::IDLE:
|
||||
case PlayerActivity::STOPPED:
|
||||
|
@ -788,9 +809,8 @@ void AudioPlayer::executeStop(bool releaseFocus) {
|
|||
case PlayerActivity::PLAYING:
|
||||
case PlayerActivity::PAUSED:
|
||||
case PlayerActivity::BUFFER_UNDERRUN:
|
||||
if (m_mediaPlayer->stop() == MediaPlayerStatus::FAILURE) {
|
||||
executeOnPlaybackError("stopFailed");
|
||||
}
|
||||
getOffset();
|
||||
stopStatus = m_mediaPlayer->stop();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -802,6 +822,9 @@ void AudioPlayer::executeStop(bool releaseFocus) {
|
|||
m_focusManager->releaseChannel(CHANNEL_NAME, shared_from_this());
|
||||
}
|
||||
changeActivity(PlayerActivity::STOPPED);
|
||||
if (MediaPlayerStatus::FAILURE == stopStatus) {
|
||||
executeOnPlaybackError("mediaPlayerStopFailed");
|
||||
}
|
||||
sendPlaybackStoppedEvent();
|
||||
}
|
||||
|
||||
|
@ -847,7 +870,7 @@ void AudioPlayer::sendEventWithTokenAndOffset(const std::string& eventName) {
|
|||
payload.AddMember(TOKEN_KEY, m_token, payload.GetAllocator());
|
||||
payload.AddMember(
|
||||
OFFSET_KEY,
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(getMediaPlayerOffset()).count(),
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(getOffset()).count(),
|
||||
payload.GetAllocator());
|
||||
|
||||
rapidjson::StringBuffer buffer;
|
||||
|
@ -887,7 +910,7 @@ void AudioPlayer::sendPlaybackStutterFinishedEvent() {
|
|||
payload.AddMember(TOKEN_KEY, m_token, payload.GetAllocator());
|
||||
payload.AddMember(
|
||||
OFFSET_KEY,
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(getMediaPlayerOffset()).count(),
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(getOffset()).count(),
|
||||
payload.GetAllocator());
|
||||
auto stutterDuration = std::chrono::steady_clock::now() - m_bufferUnderrunTimestamp;
|
||||
payload.AddMember(
|
||||
|
@ -920,7 +943,9 @@ void AudioPlayer::sendPlaybackFailedEvent(
|
|||
|
||||
rapidjson::Value currentPlaybackState(rapidjson::kObjectType);
|
||||
currentPlaybackState.AddMember(TOKEN_KEY, m_token, payload.GetAllocator());
|
||||
currentPlaybackState.AddMember(OFFSET_KEY, m_mediaPlayer->getOffsetInMilliseconds(), payload.GetAllocator());
|
||||
currentPlaybackState.AddMember(
|
||||
OFFSET_KEY,
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(getOffset()).count(), payload.GetAllocator());
|
||||
currentPlaybackState.AddMember(ACTIVITY_KEY, playerActivityToString(m_currentActivity), payload.GetAllocator());
|
||||
|
||||
payload.AddMember("currentPlaybackState", currentPlaybackState, payload.GetAllocator());
|
||||
|
@ -965,12 +990,15 @@ void AudioPlayer::sendStreamMetadataExtractedEvent() {
|
|||
//TODO: Implement/call this once MediaPlayer exports metadata info (ACSDK-414).
|
||||
}
|
||||
|
||||
std::chrono::milliseconds AudioPlayer::getMediaPlayerOffset() {
|
||||
auto offset = m_mediaPlayer->getOffsetInMilliseconds();
|
||||
if (offset < 0) {
|
||||
offset = 0;
|
||||
std::chrono::milliseconds AudioPlayer::getOffset() {
|
||||
if (PlayerActivity::PLAYING != m_currentActivity) {
|
||||
return m_offset;
|
||||
}
|
||||
return std::chrono::milliseconds(offset);
|
||||
m_offset = std::chrono::milliseconds(m_mediaPlayer->getOffsetInMilliseconds());
|
||||
if (m_offset < std::chrono::milliseconds::zero()) {
|
||||
m_offset = std::chrono::milliseconds::zero();
|
||||
}
|
||||
return m_offset;
|
||||
}
|
||||
|
||||
} // namespace audioPlayer
|
||||
|
|
|
@ -54,14 +54,13 @@ public:
|
|||
* @return true if expected connectionStatus is received within @c duration else false.
|
||||
*/
|
||||
bool waitFor(const avsCommon::sdkInterfaces::ConnectionStatusObserverInterface::Status connectionStatus,
|
||||
const std::chrono::seconds duration = std::chrono::seconds(10));
|
||||
const std::chrono::seconds duration = std::chrono::seconds(15));
|
||||
|
||||
/**
|
||||
* Function to check if the connection is broken due to Server side Disconnect.
|
||||
* @return true if the disconnect happens due to SERVER_SIDE_DISCONNECT else false.
|
||||
*/
|
||||
bool checkForServerSideDisconnect();
|
||||
|
||||
private:
|
||||
/// Mutex used internally to enforce thread safety and serialize read/write access to @c m_statusChanges.
|
||||
mutable std::mutex m_mutex;
|
||||
|
|
|
@ -24,15 +24,17 @@
|
|||
#include <mutex>
|
||||
|
||||
#include <ACL/AVSConnectionManager.h>
|
||||
|
||||
#include "AVSCommon/SDKInterfaces/MessageSenderInterface.h"
|
||||
#include "AVSCommon/SDKInterfaces/MessageObserverInterface.h"
|
||||
#include <AVSCommon/SDKInterfaces/MessageSenderInterface.h>
|
||||
#include <AVSCommon/SDKInterfaces/MessageObserverInterface.h>
|
||||
#include <AVSCommon/Utils/RequiresShutdown.h>
|
||||
|
||||
namespace alexaClientSDK {
|
||||
namespace integration {
|
||||
namespace test {
|
||||
|
||||
class TestMessageSender : public avsCommon::sdkInterfaces::MessageSenderInterface {
|
||||
class TestMessageSender :
|
||||
public avsCommon::sdkInterfaces::MessageSenderInterface,
|
||||
public avsCommon::utils::RequiresShutdown {
|
||||
public:
|
||||
/// Destructor.
|
||||
~TestMessageSender() = default;
|
||||
|
@ -123,6 +125,8 @@ public:
|
|||
*/
|
||||
void synchronize();
|
||||
|
||||
void doShutdown() override;
|
||||
|
||||
private:
|
||||
/// Mutex to protect m_queue.
|
||||
std::mutex m_mutex;
|
||||
|
|
|
@ -22,6 +22,7 @@ using namespace alexaClientSDK;
|
|||
using namespace acl;
|
||||
using namespace avsCommon::avs;
|
||||
using namespace avsCommon::sdkInterfaces;
|
||||
using namespace avsCommon::utils;
|
||||
|
||||
namespace alexaClientSDK {
|
||||
namespace integration {
|
||||
|
@ -31,7 +32,7 @@ TestMessageSender::TestMessageSender(
|
|||
std::shared_ptr<acl::MessageRouterInterface> messageRouter,
|
||||
bool isEnabled,
|
||||
std::shared_ptr<ConnectionStatusObserverInterface> connectionStatusObserver,
|
||||
std::shared_ptr<MessageObserverInterface> messageObserver) {
|
||||
std::shared_ptr<MessageObserverInterface> messageObserver) : RequiresShutdown{"TestMessageSender"} {
|
||||
m_connectionManager = acl::AVSConnectionManager::create(messageRouter, isEnabled, { connectionStatusObserver },
|
||||
{ messageObserver });
|
||||
// TODO: ACSDK-421: Remove the callback when m_avsConnection manager is no longer an observer to
|
||||
|
@ -118,6 +119,10 @@ void TestMessageSender::setAVSEndpoint(const std::string& avsEndpoint) {
|
|||
m_connectionManager->onStateChanged(StateSynchronizerObserverInterface::State::SYNCHRONIZED);
|
||||
}
|
||||
|
||||
void TestMessageSender::doShutdown() {
|
||||
m_connectionManager->shutdown();
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
} // namespace integration
|
||||
} // namespace alexaClientSDK
|
||||
|
|
|
@ -138,7 +138,9 @@ static const std::string CONTENT_ACTIVITY_ID = "Content";
|
|||
/// Sample alerts activity id.
|
||||
static const std::string ALERTS_ACTIVITY_ID = "Alerts";
|
||||
// This Integer to be used to specify a timeout in seconds.
|
||||
static const std::chrono::seconds WAIT_FOR_TIMEOUT_DURATION(20);
|
||||
static const std::chrono::seconds WAIT_FOR_TIMEOUT_DURATION(25);
|
||||
// This Integer to be used to specify a short timeout in seconds.
|
||||
static const std::chrono::seconds SHORT_TIMEOUT_DURATION(5);
|
||||
/// The compatible encoding for AIP.
|
||||
static const avsCommon::utils::AudioFormat::Encoding COMPATIBLE_ENCODING =
|
||||
avsCommon::utils::AudioFormat::Encoding::LPCM;
|
||||
|
@ -807,10 +809,10 @@ TEST_F(AlertsTest, DisconnectAndReconnectBeforeLocalStop) {
|
|||
ASSERT_TRUE(focusChanged);
|
||||
|
||||
connect();
|
||||
|
||||
|
||||
// Locally stop the alarm.
|
||||
m_alertsAgent->onLocalStop();
|
||||
|
||||
|
||||
//AlertStopped Event is sent.
|
||||
sendParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
ASSERT_TRUE(checkSentEventName(sendParams, NAME_ALERT_STOPPED));
|
||||
|
@ -880,11 +882,6 @@ TEST_F(AlertsTest, RemoveAllAlertsBeforeAlertIsActive) {
|
|||
sendAudioFileAsRecognize(RECOGNIZE_TIMER_AUDIO_FILE_NAME);
|
||||
TestMessageSender::SendParams sendParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
ASSERT_TRUE(checkSentEventName(sendParams, NAME_RECOGNIZE));
|
||||
bool focusChanged = false;
|
||||
ASSERT_EQ(m_testContentClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION, &focusChanged), FocusState::BACKGROUND);
|
||||
ASSERT_TRUE(focusChanged);
|
||||
ASSERT_EQ(m_testContentClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION, &focusChanged), FocusState::FOREGROUND);
|
||||
ASSERT_TRUE(focusChanged);
|
||||
|
||||
// Speech is handled.
|
||||
TestMessageSender::SendParams sendStartedParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
|
@ -892,25 +889,32 @@ TEST_F(AlertsTest, RemoveAllAlertsBeforeAlertIsActive) {
|
|||
TestMessageSender::SendParams sendFinishedParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
ASSERT_TRUE(checkSentEventName(sendFinishedParams, NAME_SPEECH_FINISHED));
|
||||
|
||||
ASSERT_EQ(m_testContentClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION, &focusChanged), FocusState::FOREGROUND);
|
||||
bool focusChanged = false;
|
||||
FocusState state;
|
||||
state = m_testContentClient->waitForFocusChange(WAIT_FOR_TIMEOUT_DURATION, &focusChanged);
|
||||
ASSERT_TRUE(focusChanged);
|
||||
ASSERT_EQ(state, FocusState::FOREGROUND);
|
||||
|
||||
// SetAlertSucceeded Event is sent
|
||||
sendParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
ASSERT_TRUE(checkSentEventName(sendParams, NAME_SET_ALERT_SUCCEEDED));
|
||||
|
||||
// Remove all alerts.
|
||||
m_alertsAgent->removeAllAlerts();
|
||||
|
||||
// AlertStarted Event is not sent.
|
||||
sendParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
sendParams = m_avsConnectionManager->waitForNext(SHORT_TIMEOUT_DURATION);
|
||||
ASSERT_FALSE(checkSentEventName(sendParams, NAME_ALERT_STARTED));
|
||||
|
||||
// Locally stop the alarm.
|
||||
m_alertsAgent->onLocalStop();
|
||||
|
||||
// AlertStopped Event is sent.
|
||||
sendParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
// AlertStopped Event is not sent.
|
||||
sendParams = m_avsConnectionManager->waitForNext(SHORT_TIMEOUT_DURATION);
|
||||
ASSERT_FALSE(checkSentEventName(sendParams, NAME_ALERT_STOPPED));
|
||||
|
||||
// Focus has not changed.
|
||||
focusChanged = false;
|
||||
state = m_testContentClient->waitForFocusChange(SHORT_TIMEOUT_DURATION, &focusChanged);
|
||||
ASSERT_FALSE(focusChanged);
|
||||
ASSERT_EQ(state, FocusState::FOREGROUND);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1036,12 +1040,16 @@ TEST_F(AlertsTest, UserShortUnrelatedBargeInOnActiveTimer) {
|
|||
|
||||
// Write audio to SDS sying "Tell me a joke"
|
||||
sendAudioFileAsRecognize(RECOGNIZE_JOKE_AUDIO_FILE_NAME);
|
||||
|
||||
sendParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
ASSERT_TRUE(checkSentEventName(sendParams, NAME_RECOGNIZE));
|
||||
|
||||
sendParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
ASSERT_TRUE(checkSentEventName(sendParams, NAME_ALERT_ENTERED_BACKGROUND));
|
||||
if (getSentEventName(sendParams) == NAME_ALERT_ENTERED_BACKGROUND) {
|
||||
sendStartedParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
}
|
||||
else {
|
||||
ASSERT_TRUE(checkSentEventName(sendParams, NAME_RECOGNIZE));
|
||||
sendParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
ASSERT_TRUE(checkSentEventName(sendParams, NAME_ALERT_ENTERED_BACKGROUND));
|
||||
}
|
||||
|
||||
// Speech is handled.
|
||||
sendStartedParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include "AVSCommon/AVS/Attachment/AttachmentManager.h"
|
||||
#include "AVSCommon/AVS/Attachment/InProcessAttachmentWriter.h"
|
||||
#include "AVSCommon/AVS/Attachment/InProcessAttachmentReader.h"
|
||||
#include "AVSCommon/AVS/EventBuilder.h"
|
||||
#include "AVSCommon/SDKInterfaces/ExceptionEncounteredSenderInterface.h"
|
||||
#include "AVSCommon/SDKInterfaces/DirectiveHandlerInterface.h"
|
||||
#include "AVSCommon/SDKInterfaces/DirectiveHandlerResultInterface.h"
|
||||
|
@ -47,6 +48,9 @@
|
|||
#include "Integration/ObservableMessageRequest.h"
|
||||
#include "Integration/TestDirectiveHandler.h"
|
||||
#include "Integration/TestExceptionEncounteredSender.h"
|
||||
#include <rapidjson/stringbuffer.h>
|
||||
#include <rapidjson/writer.h>
|
||||
#include <rapidjson/error/en.h>
|
||||
|
||||
namespace alexaClientSDK {
|
||||
namespace integration {
|
||||
|
@ -62,6 +66,7 @@ using namespace avsCommon::avs::initialization;
|
|||
using namespace avsCommon::avs::attachment;
|
||||
using namespace avsCommon::utils::sds;
|
||||
using namespace avsCommon::utils::json;
|
||||
using namespace rapidjson;
|
||||
|
||||
/// String to identify log entries originating from this file.
|
||||
static const std::string TAG("AlexaDirectiveSequencerLibraryTest");
|
||||
|
@ -137,7 +142,6 @@ static const std::string TAG("AlexaDirectiveSequencerLibraryTest");
|
|||
"}]" \
|
||||
"}"
|
||||
|
||||
|
||||
/// This is a 16 bit 16 kHz little endian linear PCM audio file of "Joke" to be recognized.
|
||||
static const std::string RECOGNIZE_JOKE_AUDIO_FILE_NAME = "/recognize_joke_test.wav";
|
||||
/// This is a 16 bit 16 kHz little endian linear PCM audio file of "Wikipedia" to be recognized.
|
||||
|
@ -146,6 +150,8 @@ static const std::string RECOGNIZE_WIKI_AUDIO_FILE_NAME = "/recognize_wiki_test.
|
|||
static const std::string RECOGNIZE_LIONS_AUDIO_FILE_NAME = "/recognize_lions_test.wav";
|
||||
/// This is a 16 bit 16 kHz little endian linear PCM audio file of "What's up" to be recognized.
|
||||
static const std::string RECOGNIZE_WHATS_UP_AUDIO_FILE_NAME = "/recognize_whats_up_test.wav";
|
||||
/// This is a 16 bit 16 kHz little endian linear PCM audio file of "Set a timer for 5 seconds" to be recognized.
|
||||
static const std::string RECOGNIZE_TIMER_AUDIO_FILE_NAME = "/recognize_timer_test.wav";
|
||||
|
||||
//String to be used as a basic DialogRequestID.
|
||||
#define FIRST_DIALOG_REQUEST_ID "DialogRequestID123"
|
||||
|
@ -154,11 +160,8 @@ static const std::string RECOGNIZE_WHATS_UP_AUDIO_FILE_NAME = "/recognize_whats_
|
|||
|
||||
/// This string specifies a Recognize event using the CLOSE_TALK profile and uses the first DialogRequestID.
|
||||
static const std::string CT_FIRST_RECOGNIZE_EVENT_JSON = RECOGNIZE_EVENT_JSON(CLOSE_TALK, FIRST_DIALOG_REQUEST_ID);
|
||||
/// This string specifies a Recognize event using the CLOSE_TALK profile and uses the first DialogRequestID.
|
||||
static const std::string CT_FIRST_RECOGNIZE_EVENT_JSON_NEAR = RECOGNIZE_EVENT_JSON(NEAR_FIELD, FIRST_DIALOG_REQUEST_ID);
|
||||
/// This string specifies a Recognize event using the CLOSE_TALK profile and uses the second DialogRequestID.
|
||||
static const std::string CT_SECOND_RECOGNIZE_EVENT_JSON = RECOGNIZE_EVENT_JSON(CLOSE_TALK, SECOND_DIALOG_REQUEST_ID);
|
||||
|
||||
// This string to be used for ClearQueue Directives which use the NAMESPACE_AUDIO_PLAYER namespace.
|
||||
static const std::string NAME_CLEAR_QUEUE = "ClearQueue";
|
||||
// This string to be used for ExpectSpeech Directives which use the NAMESPACE_SPEECH_RECOGNIZER namespace.
|
||||
|
@ -171,17 +174,25 @@ static const std::string NAME_SET_MUTE = "SetMute";
|
|||
static const std::string NAME_SPEAK = "Speak";
|
||||
// This string to be used for Stop Directives which use the NAMESPACE_AUDIO_PLAYER namespace.
|
||||
static const std::string NAME_STOP = "Stop";
|
||||
// This string to be used for SpeechStarted Directives which use the NAMESPACE_SPEECH_SYNTHESIZER namespace.
|
||||
static const std::string NAME_SPEECH_STARTED = "SpeechStarted";
|
||||
// This string to be used for SpeechFinished Directives which use the NAMESPACE_SPEECH_SYNTHESIZER namespace.
|
||||
static const std::string NAME_SPEECH_FINISHED = "SpeechFinished";
|
||||
// This string to be used for SetAlertFailed Directives which use the NAMESPACE_ALERTS namespace.
|
||||
static const std::string NAME_SET_ALERT_FAILED = "SetAlertFailed";
|
||||
// This string to be used for SetAlert Directives which use the NAMESPACE_ALERTS namespace.
|
||||
static const std::string NAME_SET_ALERT = "SetAlert";
|
||||
|
||||
// This String to be used to register the AudioPlayer namespace to a DirectiveHandler.
|
||||
static const std::string NAMESPACE_AUDIO_PLAYER = "AudioPlayer";
|
||||
// This String to be used to register the Alerts namespace to a DirectiveHandler.
|
||||
static const std::string NAMESPACE_ALERTS = "Alerts";
|
||||
// This String to be used to register the Speaker namespace to a DirectiveHandler.
|
||||
static const std::string NAMESPACE_SPEAKER = "Speaker";
|
||||
// This String to be used to register the SpeechRecognizer namespace to a DirectiveHandler.
|
||||
static const std::string NAMESPACE_SPEECH_RECOGNIZER = "SpeechRecognizer";
|
||||
// This String to be used to register the SpeechSynthesizer namespace to a DirectiveHandler.
|
||||
static const std::string NAMESPACE_SPEECH_SYNTHESIZER = "SpeechSynthesizer";
|
||||
// This string to be used for StopCapture Directives which use the NAMESPACE_SPEECH_RECOGNIZER namespace.
|
||||
static const std::string NAME_STOP_CAPTURE = "StopCapture";
|
||||
|
||||
// This pair connects a ExpectSpeech name and SpeechRecognizer namespace for use in DirectiveHandler registration.
|
||||
static const NamespaceAndName EXPECT_SPEECH_PAIR(NAMESPACE_SPEECH_RECOGNIZER, NAME_EXPECT_SPEECH);
|
||||
|
@ -189,8 +200,8 @@ static const NamespaceAndName EXPECT_SPEECH_PAIR(NAMESPACE_SPEECH_RECOGNIZER, NA
|
|||
static const NamespaceAndName SET_MUTE_PAIR(NAMESPACE_SPEAKER, NAME_SET_MUTE);
|
||||
// This pair connects a Speak name and SpeechSynthesizer namespace for use in DirectiveHandler registration.
|
||||
static const NamespaceAndName SPEAK_PAIR(NAMESPACE_SPEECH_SYNTHESIZER, NAME_SPEAK);
|
||||
// This pair connects a StopCapture name and SpeechRecognizer namespace for use in DirectiveHandler registration.
|
||||
static const NamespaceAndName STOP_CAPTURE_PAIR(NAMESPACE_SPEECH_RECOGNIZER, NAME_STOP_CAPTURE);
|
||||
// This pair connects a SetAlert name and Alerts namespace for use in DirectiveHandler registration.
|
||||
static const NamespaceAndName SET_ALERT_PAIR(NAMESPACE_ALERTS, NAME_SET_ALERT);
|
||||
|
||||
// This Integer to be used to specify a timeout in seconds for a directive to reach the DirectiveHandler.
|
||||
static const std::chrono::seconds WAIT_FOR_TIMEOUT_DURATION(5);
|
||||
|
@ -211,6 +222,10 @@ static const std::string JSON_MESSAGE_MESSAGE_ID_KEY = "messageId";
|
|||
static const std::string JSON_MESSAGE_DIALOG_REQUEST_ID_KEY = "dialogRequestId";
|
||||
/// JSON key to get the payload object of a message.
|
||||
static const std::string JSON_MESSAGE_PAYLOAD_KEY = "payload";
|
||||
/// JSON key to get the payload object of a message.
|
||||
static const std::string JSON_MESSAGE_TOKEN_KEY = "token";
|
||||
/// JSON key to add to the payload object of a message.
|
||||
static const char TOKEN_KEY[] = "token";
|
||||
|
||||
/// Path to configuration file (from command line arguments).
|
||||
std::string configPath;
|
||||
|
@ -379,6 +394,27 @@ protected:
|
|||
ASSERT_NE (params.type, TestExceptionEncounteredSender::ExceptionParams::Type::TIMEOUT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Function to setup a message with a token and send it to AVS.
|
||||
*
|
||||
* @param eventName Name of the event to send.
|
||||
* @param eventNameSpace Namespace if the Name of the event to send.
|
||||
* @param dialogRequestID DialogRequestID to use to send the event.
|
||||
* @param token Token to be added to the event payload.
|
||||
*/
|
||||
void sendEventWithToken(const std::string& eventName, const std::string& eventNameSpace,
|
||||
const std::string& dialogRequestID, std::string token) {
|
||||
rapidjson::Document payload(rapidjson::kObjectType);
|
||||
payload.AddMember(TOKEN_KEY, token, payload.GetAllocator());
|
||||
|
||||
rapidjson::StringBuffer buffer;
|
||||
rapidjson::Writer<rapidjson::StringBuffer> writer(buffer);
|
||||
ASSERT_TRUE (payload.Accept(writer));
|
||||
|
||||
auto event = buildJsonEventString(eventNameSpace, eventName, dialogRequestID, buffer.GetString());
|
||||
sendEvent(event.second, nullptr,avsCommon::avs::MessageRequest::Status::SUCCESS, std::chrono::seconds(SEND_EVENT_TIMEOUT_DURATION));
|
||||
}
|
||||
|
||||
/// Object to monitor the status of the authorization to communicate with @c AVS.
|
||||
std::shared_ptr<AuthObserver> m_authObserver;
|
||||
|
||||
|
@ -409,7 +445,21 @@ protected:
|
|||
std::shared_ptr<TestExceptionEncounteredSender> m_exceptionEncounteredSender;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Helper function to extract the token from a directive.
|
||||
*
|
||||
* @param params that has the JSON to be searched through.
|
||||
* @param returnToken to hold the reulting token.
|
||||
* @return Indicates whether extracting the token was successful.
|
||||
*/
|
||||
bool getToken(TestDirectiveHandler::DirectiveParams params, std::string &returnToken) {
|
||||
std::string directiveString;
|
||||
std::string directivePayload;
|
||||
std::string directiveToken;
|
||||
jsonUtils::lookupStringValue(params.directive->getUnparsedDirective(), JSON_MESSAGE_DIRECTIVE_KEY, &directiveString);
|
||||
jsonUtils::lookupStringValue(directiveString, JSON_MESSAGE_PAYLOAD_KEY, &directivePayload);
|
||||
return jsonUtils::lookupStringValue(directivePayload, JSON_MESSAGE_TOKEN_KEY, &returnToken);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test DirectiveSequencer's ability to pass an @c AVSDirective to a @c DirectiveHandler.
|
||||
|
@ -600,49 +650,71 @@ TEST_F(AlexaDirectiveSequencerLibraryTest, dropQueueAfterBargeIn) {
|
|||
/**
|
||||
* Test @c DirectiveSequencer's ability to handle a Directive without a DialogRequestID.
|
||||
*
|
||||
* This test sends a @c NEAR_FIELD @c Recognize event to AVS to trigger delivery of a @c StopCapture directive.
|
||||
* @c StopCapture directives do not have a @c dialogRequestId value. This test uses that fact to verify that
|
||||
* This test sends a @c Recognize event to AVS to trigger delivery of a @c Speak and a @c SetAlert directive.
|
||||
* @c SetAlert directives do not have a @c dialogRequestId value. This test uses that fact to verify that
|
||||
* @c AVSDirectives with no @c dialogRequestId are processed properly.
|
||||
*/
|
||||
TEST_F(AlexaDirectiveSequencerLibraryTest, sendDirectiveWithoutADialogRequestID) {
|
||||
DirectiveHandlerConfiguration config;
|
||||
config[SET_MUTE_PAIR] = BlockingPolicy::NON_BLOCKING;
|
||||
config[SPEAK_PAIR] = BlockingPolicy::NON_BLOCKING;
|
||||
config[STOP_CAPTURE_PAIR] = BlockingPolicy::NON_BLOCKING;
|
||||
config[SET_ALERT_PAIR] = BlockingPolicy::NON_BLOCKING;
|
||||
|
||||
auto directiveHandler = std::make_shared<TestDirectiveHandler>(config);
|
||||
|
||||
ASSERT_TRUE(m_directiveSequencer->addDirectiveHandler(directiveHandler));
|
||||
|
||||
// Send audio of "Joke" that will prompt SetMute and Speak.
|
||||
// Send audio of "Set a timer for 5 seconds" that will prompt a Speak.
|
||||
m_directiveSequencer->setDialogRequestId(FIRST_DIALOG_REQUEST_ID);
|
||||
std::string file = inputPath + RECOGNIZE_JOKE_AUDIO_FILE_NAME;
|
||||
std::string file = inputPath + RECOGNIZE_TIMER_AUDIO_FILE_NAME;
|
||||
setupMessageWithAttachmentAndSend(
|
||||
CT_FIRST_RECOGNIZE_EVENT_JSON_NEAR,
|
||||
CT_FIRST_RECOGNIZE_EVENT_JSON,
|
||||
file,
|
||||
avsCommon::avs::MessageRequest::Status::SUCCESS,
|
||||
SEND_EVENT_TIMEOUT_DURATION);
|
||||
|
||||
std::string token;
|
||||
bool handleAlertFound = false;
|
||||
bool prehandleAlertFound = false;
|
||||
bool prehandleSpeakFound = false;
|
||||
|
||||
TestDirectiveHandler::DirectiveParams params;
|
||||
|
||||
// Make sure we get preHandle followed by handle for StopCapture.
|
||||
|
||||
params = directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
ASSERT_TRUE(params.isPreHandle());
|
||||
ASSERT_TRUE(params.directive->getDialogRequestId().empty());
|
||||
ASSERT_EQ(params.directive->getName(), NAME_STOP_CAPTURE);
|
||||
while (!params.isTimeout()) {
|
||||
if (params.directive->getName() == NAME_SPEAK) {
|
||||
ASSERT_FALSE(params.directive->getDialogRequestId().empty());
|
||||
if (params.isPreHandle()) {
|
||||
prehandleSpeakFound = true;
|
||||
} else if (params.isHandle()) {
|
||||
ASSERT_TRUE(prehandleSpeakFound);
|
||||
ASSERT_TRUE(getToken(params, token));
|
||||
// Send speechFinished to prompt the cloud to send setAlert which does not have a DialogRequestID.
|
||||
sendEventWithToken(NAME_SPEECH_FINISHED, NAMESPACE_SPEECH_SYNTHESIZER, FIRST_DIALOG_REQUEST_ID, token);
|
||||
}
|
||||
} else {
|
||||
ASSERT_EQ(params.directive->getName(), NAME_SET_ALERT);
|
||||
ASSERT_TRUE(params.directive->getDialogRequestId().empty());
|
||||
if (params.isPreHandle()) {
|
||||
prehandleAlertFound = true;
|
||||
} else if (params.isHandle()) {
|
||||
ASSERT_TRUE(prehandleAlertFound);
|
||||
handleAlertFound = true;
|
||||
ASSERT_TRUE(getToken(params, token));
|
||||
}
|
||||
}
|
||||
params = directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
}
|
||||
ASSERT_TRUE(handleAlertFound);
|
||||
|
||||
params = directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
ASSERT_TRUE(params.isHandle());
|
||||
ASSERT_TRUE(params.directive->getDialogRequestId().empty());
|
||||
ASSERT_EQ(params.directive->getName(), NAME_STOP_CAPTURE);
|
||||
// Send setAlertFailed to clean up the alert on the cloud side.
|
||||
sendEventWithToken(NAME_SET_ALERT_FAILED, NAMESPACE_ALERTS, FIRST_DIALOG_REQUEST_ID, token);
|
||||
|
||||
params = directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
while (!params.isTimeout()) {
|
||||
// Make sure no other calls for StopCapture are made except for the initial handleImmediately.
|
||||
ASSERT_NE(params.directive->getName(), NAME_STOP_CAPTURE);
|
||||
// Make sure no other calls for SetAlert are made except for the initial handleImmediately.
|
||||
ASSERT_NE(params.directive->getName(), NAME_SET_ALERT);
|
||||
params = directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -649,7 +649,6 @@ TEST_F(AudioPlayerTest, FlashBriefing) {
|
|||
|
||||
if (hasFlashbriefingItems) {
|
||||
// The last speak is then allowed.
|
||||
sendStartedParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
EXPECT_TRUE(checkSentEventName(sendStartedParams, NAME_SPEECH_STARTED));
|
||||
sendFinishedParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
EXPECT_TRUE(checkSentEventName(sendFinishedParams, NAME_SPEECH_FINISHED));
|
||||
|
|
|
@ -208,6 +208,12 @@ static const std::chrono::seconds SEND_EVENT_TIMEOUT_DURATION(20);
|
|||
static const std::chrono::seconds DIRECTIVE_TIMEOUT_DURATION(7);
|
||||
// This Integer to be used when it is expected the duration will timeout.
|
||||
static const std::chrono::seconds WANTING_TIMEOUT_DURATION(1);
|
||||
// This Integer to be used to specify a timeout in seconds for the Media Player to finish playing.
|
||||
static const std::chrono::seconds WAIT_FOR_MEDIA_PLAYER_TIMEOUT_DURATION(60);
|
||||
// This Integer to be used to specify number of Speak Directives to validate in test handleMultipleConsecutiveSpeaks.
|
||||
// Although we anticipate four Speak Directives, we validate only three Speak Directives.
|
||||
// Validating three Speak Directives helps keep the test short.
|
||||
static const unsigned int NUMBER_OF_SPEAK_DIRECTIVES_TO_VALIDATE = 3;
|
||||
|
||||
/// JSON key to get the event object of a message.
|
||||
static const std::string JSON_MESSAGE_EVENT_KEY = "event";
|
||||
|
@ -614,8 +620,7 @@ TEST_F(SpeechSynthesizerTest, handleMultipleConsecutiveSpeaks) {
|
|||
TestMessageSender::SendParams sendRecognizeParams = m_avsConnectionManager->waitForNext(DIRECTIVE_TIMEOUT_DURATION);
|
||||
ASSERT_TRUE(checkSentEventName(sendRecognizeParams, NAME_RECOGNIZE));
|
||||
|
||||
int numberOfAnticipatedSpeakDirectives = 4;
|
||||
for (int x = 0; x < numberOfAnticipatedSpeakDirectives; ++x) {
|
||||
for (unsigned int x = 0; x < NUMBER_OF_SPEAK_DIRECTIVES_TO_VALIDATE; ++x) {
|
||||
// Each iteration, remove the blocking setMute directive.
|
||||
TestDirectiveHandler::DirectiveParams params = m_directiveHandler->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
while (params.type != TestDirectiveHandler::DirectiveParams::Type::HANDLE) {
|
||||
|
@ -637,7 +642,7 @@ TEST_F(SpeechSynthesizerTest, handleMultipleConsecutiveSpeaks) {
|
|||
|
||||
// Media Player has finished.
|
||||
ASSERT_EQ(m_speechSynthesizerObserver->waitForNext(
|
||||
WAIT_FOR_TIMEOUT_DURATION), SpeechSynthesizerObserver::SpeechSynthesizerState::FINISHED);
|
||||
WAIT_FOR_MEDIA_PLAYER_TIMEOUT_DURATION), SpeechSynthesizerObserver::SpeechSynthesizerState::FINISHED);
|
||||
|
||||
// SpeechFinished was sent.
|
||||
TestMessageSender::SendParams sendFinishedParams = m_avsConnectionManager->waitForNext(WAIT_FOR_TIMEOUT_DURATION);
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <AVSCommon/Utils/MediaPlayer/MediaPlayerInterface.h>
|
||||
#include <AVSCommon/Utils/PlaylistParser/PlaylistParserInterface.h>
|
||||
|
||||
#include "MediaPlayer/OffsetManager.h"
|
||||
#include "MediaPlayer/PipelineInterface.h"
|
||||
#include "MediaPlayer/SourceInterface.h"
|
||||
|
||||
|
@ -76,6 +77,11 @@ public:
|
|||
*/
|
||||
avsCommon::utils::mediaPlayer::MediaPlayerStatus resume() override;
|
||||
int64_t getOffsetInMilliseconds() override;
|
||||
/**
|
||||
* This function is a setter, storing @c offset to be consumed internally by @c play().
|
||||
* The function will always return MediaPlayerStatus::SUCCESS.
|
||||
*/
|
||||
avsCommon::utils::mediaPlayer::MediaPlayerStatus setOffset(std::chrono::milliseconds offset) override;
|
||||
void setObserver(std::shared_ptr<avsCommon::utils::mediaPlayer::MediaPlayerObserverInterface> observer) override;
|
||||
/// @}
|
||||
|
||||
|
@ -286,6 +292,16 @@ private:
|
|||
*/
|
||||
void handleGetOffsetInMilliseconds(std::promise<int64_t>* promise);
|
||||
|
||||
/**
|
||||
* Worker thread handler for setting the playback position.
|
||||
*
|
||||
* @param promise A promise to fulfill with a @c MediaPlayerStatus value once the offset has been set.
|
||||
* @param offset The offset to start playing from.
|
||||
*/
|
||||
void handleSetOffset(
|
||||
std::promise<avsCommon::utils::mediaPlayer::MediaPlayerStatus>* promise,
|
||||
std::chrono::milliseconds offset);
|
||||
|
||||
/**
|
||||
* Worker thread handler for setting the observer.
|
||||
*
|
||||
|
@ -333,6 +349,24 @@ private:
|
|||
*/
|
||||
void sendBufferRefilled();
|
||||
|
||||
/**
|
||||
* Used to obtain seeking information about the pipeline.
|
||||
*
|
||||
* @param isSeekable A boolean indicating whether the stream is seekable.
|
||||
* @return A boolean indicating whether the operation was successful.
|
||||
*/
|
||||
bool queryIsSeekable(bool* isSeekable);
|
||||
|
||||
/**
|
||||
* Performs a seek to the @c seekPoint.
|
||||
*
|
||||
* @return A boolean indicating whether the seek operation was successful.
|
||||
*/
|
||||
bool seek();
|
||||
|
||||
/// An instance of the @c OffsetManager.
|
||||
OffsetManager m_offsetManager;
|
||||
|
||||
/// An instance of the @c AudioPipeline.
|
||||
AudioPipeline m_pipeline;
|
||||
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* OffsetManager.h
|
||||
*
|
||||
* Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License").
|
||||
* You may not use this file except in compliance with the License.
|
||||
* A copy of the License is located at
|
||||
*
|
||||
* http://aws.amazon.com/apache2.0/
|
||||
*
|
||||
* or in the "license" file accompanying this file. This file is distributed
|
||||
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||
* express or implied. See the License for the specific language governing
|
||||
* permissions and limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef ALEXA_CLIENT_SDK_MEDIA_PLAYER_INCLUDE_MEDIA_PLAYER_OFFSET_MANAGER_H_
|
||||
#define ALEXA_CLIENT_SDK_MEDIA_PLAYER_INCLUDE_MEDIA_PLAYER_OFFSET_MANAGER_H_
|
||||
|
||||
#include <chrono>
|
||||
|
||||
namespace alexaClientSDK {
|
||||
namespace mediaPlayer {
|
||||
|
||||
/**
|
||||
* TODO ACSDK-459: Implement support for seeking across playlists.
|
||||
*/
|
||||
class OffsetManager {
|
||||
|
||||
public:
|
||||
/**
|
||||
* Constructor with initialization of members.
|
||||
*/
|
||||
OffsetManager();
|
||||
|
||||
/**
|
||||
* Set whether the stream is seekable.
|
||||
*
|
||||
* @param seekable A boolean indicating whether the stream is seekable.
|
||||
*/
|
||||
void setIsSeekable(bool seekable);
|
||||
|
||||
/**
|
||||
* Returns whether the stream is seekable.
|
||||
*
|
||||
* @return A boolean indicating whether the stream is seekable.
|
||||
*/
|
||||
bool isSeekable();
|
||||
|
||||
/**
|
||||
* Set a seek point.
|
||||
*
|
||||
* @param seekPoint The seek point in milliseconds.
|
||||
*/
|
||||
void setSeekPoint(std::chrono::milliseconds seekPoint);
|
||||
|
||||
/**
|
||||
* Get the seek point.
|
||||
*
|
||||
* @return The seek point in milliseconds.
|
||||
*/
|
||||
std::chrono::milliseconds getSeekPoint();
|
||||
|
||||
/**
|
||||
* Returns whether a seek point has been set.
|
||||
*
|
||||
* @return A boolean indicating whether the stream is seekable.
|
||||
*/
|
||||
bool isSeekPointSet();
|
||||
|
||||
/**
|
||||
* Explicitly clears locally cached data, ex @c seekPoint.
|
||||
*/
|
||||
void clear();
|
||||
|
||||
private:
|
||||
|
||||
/// The seekpoint in milliseconds.
|
||||
std::chrono::milliseconds m_seekPoint;
|
||||
|
||||
/// Whether the stream is seekable.
|
||||
bool m_isSeekable;
|
||||
|
||||
/// Whether the seek point has been set.
|
||||
bool m_isSeekPointSet;
|
||||
};
|
||||
|
||||
} // namespace mediaPlayer
|
||||
} // namespace alexaClientSDK
|
||||
|
||||
#endif // ALEXA_CLIENT_SDK_MEDIA_PLAYER_INCLUDE_MEDIA_PLAYER_OFFSET_MANAGER_H_
|
|
@ -89,4 +89,4 @@ protected:
|
|||
} // namespace mediaPlayer
|
||||
} // namespace alexaClientSDK
|
||||
|
||||
#endif // ALEXA_CLIENT_SDK_MEDIA_PLAYER_INCLUDE_MEDIA_PLAYER_PIPELINE_INTERFACE_H_
|
||||
#endif // ALEXA_CLIENT_SDK_MEDIA_PLAYER_INCLUDE_MEDIA_PLAYER_PIPELINE_INTERFACE_H_
|
||||
|
|
|
@ -4,6 +4,7 @@ add_library(MediaPlayer SHARED
|
|||
BaseStreamSource.cpp
|
||||
IStreamSource.cpp
|
||||
MediaPlayer.cpp
|
||||
OffsetManager.cpp
|
||||
UrlSource.cpp)
|
||||
|
||||
target_include_directories(MediaPlayer PUBLIC
|
||||
|
@ -14,4 +15,4 @@ target_include_directories(MediaPlayer PUBLIC
|
|||
target_link_libraries(MediaPlayer "${GST_LDFLAGS}" AVSCommon PlaylistParser)
|
||||
|
||||
# install target
|
||||
asdk_install()
|
||||
asdk_install()
|
||||
|
|
|
@ -24,10 +24,10 @@
|
|||
#else
|
||||
#include <PlaylistParser/DummyPlaylistParser.h>
|
||||
#endif
|
||||
|
||||
#include "MediaPlayer/AttachmentReaderSource.h"
|
||||
#include "MediaPlayer/IStreamSource.h"
|
||||
#include "MediaPlayer/UrlSource.h"
|
||||
|
||||
#include "MediaPlayer/MediaPlayer.h"
|
||||
|
||||
namespace alexaClientSDK {
|
||||
|
@ -191,6 +191,18 @@ int64_t MediaPlayer::getOffsetInMilliseconds() {
|
|||
return future.get();
|
||||
}
|
||||
|
||||
MediaPlayerStatus MediaPlayer::setOffset(std::chrono::milliseconds offset) {
|
||||
ACSDK_DEBUG9(LX("setOffsetCalled"));
|
||||
std::promise<MediaPlayerStatus> promise;
|
||||
auto future = promise.get_future();
|
||||
std::function<gboolean()> callback = [this, &promise, offset]() {
|
||||
handleSetOffset(&promise, offset);
|
||||
return false;
|
||||
};
|
||||
queueCallback(&callback);
|
||||
return future.get();
|
||||
}
|
||||
|
||||
void MediaPlayer::setObserver(std::shared_ptr<MediaPlayerObserverInterface> observer) {
|
||||
ACSDK_DEBUG9(LX("setObserverCalled"));
|
||||
std::promise<void> promise;
|
||||
|
@ -290,6 +302,7 @@ void MediaPlayer::tearDownPipeline() {
|
|||
resetPipeline();
|
||||
g_source_remove(m_busWatchId);
|
||||
}
|
||||
m_offsetManager.clear();
|
||||
}
|
||||
|
||||
void MediaPlayer::resetPipeline() {
|
||||
|
@ -301,6 +314,49 @@ void MediaPlayer::resetPipeline() {
|
|||
m_pipeline.audioSink = nullptr;
|
||||
}
|
||||
|
||||
bool MediaPlayer::queryIsSeekable(bool* isSeekable) {
|
||||
ACSDK_DEBUG9(LX("queryIsSeekable"));
|
||||
gboolean seekable;
|
||||
GstQuery* query;
|
||||
query = gst_query_new_seeking(GST_FORMAT_TIME);
|
||||
if (gst_element_query(m_pipeline.pipeline, query)) {
|
||||
gst_query_parse_seeking(query, NULL, &seekable, NULL, NULL);
|
||||
*isSeekable = (seekable == TRUE);
|
||||
ACSDK_DEBUG(LX("queryIsSeekable").d("isSeekable", *isSeekable));
|
||||
gst_query_unref(query);
|
||||
return true;
|
||||
} else {
|
||||
ACSDK_ERROR(LX("queryIsSeekableFailed").d("reason", "seekQueryFailed"));
|
||||
gst_query_unref(query);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MediaPlayer::seek() {
|
||||
bool seekSuccessful = true;
|
||||
ACSDK_DEBUG9(LX("seekCalled"));
|
||||
if (!m_offsetManager.isSeekable() || !m_offsetManager.isSeekPointSet()) {
|
||||
ACSDK_ERROR(LX("seekFailed")
|
||||
.d("reason", "invalidState")
|
||||
.d("isSeekable", m_offsetManager.isSeekable())
|
||||
.d("seekPointSet", m_offsetManager.isSeekPointSet()));
|
||||
seekSuccessful = false;
|
||||
} else if (!gst_element_seek_simple(
|
||||
m_pipeline.pipeline,
|
||||
GST_FORMAT_TIME, // ns
|
||||
static_cast<GstSeekFlags>(GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_KEY_UNIT),
|
||||
std::chrono::duration_cast<std::chrono::nanoseconds>(
|
||||
m_offsetManager.getSeekPoint()).count())) {
|
||||
ACSDK_ERROR(LX("seekFailed").d("reason", "gstElementSeekSimpleFailed"));
|
||||
seekSuccessful = false;
|
||||
} else {
|
||||
ACSDK_DEBUG(LX("seekSuccessful").d("offsetInMs", m_offsetManager.getSeekPoint().count()));
|
||||
}
|
||||
|
||||
m_offsetManager.clear();
|
||||
return seekSuccessful;
|
||||
}
|
||||
|
||||
guint MediaPlayer::queueCallback(const std::function<gboolean()> *callback) {
|
||||
return g_idle_add(reinterpret_cast<GSourceFunc>(&onCallback), const_cast<std::function<gboolean()> *>(callback));
|
||||
}
|
||||
|
@ -334,6 +390,7 @@ gboolean MediaPlayer::onBusMessage(GstBus *bus, GstMessage *message, gpointer me
|
|||
}
|
||||
|
||||
gboolean MediaPlayer::handleBusMessage(GstMessage *message) {
|
||||
ACSDK_DEBUG9(LX("messageReceived").d("messageType", gst_message_type_get_name(GST_MESSAGE_TYPE(message))));
|
||||
switch (GST_MESSAGE_TYPE(message)) {
|
||||
case GST_MESSAGE_EOS:
|
||||
if (GST_MESSAGE_SRC(message) == GST_OBJECT_CAST(m_pipeline.pipeline)) {
|
||||
|
@ -363,6 +420,7 @@ gboolean MediaPlayer::handleBusMessage(GstMessage *message) {
|
|||
}
|
||||
} else {
|
||||
sendPlaybackFinished();
|
||||
tearDownPipeline();
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -389,6 +447,10 @@ gboolean MediaPlayer::handleBusMessage(GstMessage *message) {
|
|||
GstState newState;
|
||||
GstState pendingState;
|
||||
gst_message_parse_state_changed(message, &oldState, &newState, &pendingState);
|
||||
ACSDK_DEBUG9(LX("State Change")
|
||||
.d("oldState", gst_element_state_get_name(oldState))
|
||||
.d("newState", gst_element_state_get_name(newState))
|
||||
.d("pendingState", gst_element_state_get_name(pendingState)));
|
||||
if (newState == GST_STATE_PLAYING) {
|
||||
if (!m_playbackStartedSent) {
|
||||
sendPlaybackStarted();
|
||||
|
@ -401,8 +463,7 @@ gboolean MediaPlayer::handleBusMessage(GstMessage *message) {
|
|||
m_isPaused = false;
|
||||
}
|
||||
}
|
||||
} else if (newState == GST_STATE_PAUSED &&
|
||||
oldState == GST_STATE_PLAYING) {
|
||||
} else if (newState == GST_STATE_PAUSED && oldState == GST_STATE_PLAYING) {
|
||||
if (m_isBufferUnderrun) {
|
||||
sendBufferUnderrun();
|
||||
} else if (!m_isPaused) {
|
||||
|
@ -432,7 +493,18 @@ gboolean MediaPlayer::handleBusMessage(GstMessage *message) {
|
|||
m_isBufferUnderrun = true;
|
||||
}
|
||||
} else {
|
||||
if (GST_STATE_CHANGE_FAILURE == gst_element_set_state(m_pipeline.pipeline, GST_STATE_PLAYING)) {
|
||||
bool isSeekable = false;
|
||||
if (queryIsSeekable(&isSeekable)) {
|
||||
m_offsetManager.setIsSeekable(isSeekable);
|
||||
}
|
||||
|
||||
ACSDK_DEBUG9(LX("offsetState")
|
||||
.d("isSeekable", m_offsetManager.isSeekable())
|
||||
.d("isSeekPointSet", m_offsetManager.isSeekPointSet()));
|
||||
|
||||
if (m_offsetManager.isSeekable() && m_offsetManager.isSeekPointSet()) {
|
||||
seek();
|
||||
} else if (GST_STATE_CHANGE_FAILURE == gst_element_set_state(m_pipeline.pipeline, GST_STATE_PLAYING)) {
|
||||
std::string error = "resumingOnBufferRefilledFailed";
|
||||
ACSDK_ERROR(LX(error));
|
||||
sendPlaybackError(error);
|
||||
|
@ -715,22 +787,61 @@ void MediaPlayer::handleGetOffsetInMilliseconds(std::promise<int64_t> *promise)
|
|||
ACSDK_DEBUG(LX("handleGetOffsetInMillisecondsCalled"));
|
||||
gint64 position = -1;
|
||||
GstState state;
|
||||
GstState pending;
|
||||
if (m_pipeline.pipeline) {
|
||||
auto stateChangeRet = gst_element_get_state(
|
||||
m_pipeline.pipeline, &state, &pending, TIMEOUT_ZERO_NANOSECONDS);
|
||||
if (GST_STATE_CHANGE_FAILURE == stateChangeRet) {
|
||||
ACSDK_ERROR(LX("handleGetOffsetInMillisecondsFailed").d("reason", "getElementGetStateFailed"));
|
||||
} else if (GST_STATE_CHANGE_SUCCESS == stateChangeRet &&
|
||||
(GST_STATE_PLAYING == state || GST_STATE_PAUSED == state) &&
|
||||
gst_element_query_position(m_pipeline.pipeline, GST_FORMAT_TIME, &position)) {
|
||||
position /= NANOSECONDS_TO_MILLISECONDS;
|
||||
}
|
||||
|
||||
// Check if pipeline is set.
|
||||
if (!m_pipeline.pipeline) {
|
||||
ACSDK_INFO(LX("handleGetOffsetInMilliseconds").m("pipelineNotSet"));
|
||||
promise->set_value(static_cast<int64_t>(-1));
|
||||
return;
|
||||
}
|
||||
|
||||
auto stateChangeRet = gst_element_get_state(
|
||||
m_pipeline.pipeline,
|
||||
&state,
|
||||
NULL,
|
||||
TIMEOUT_ZERO_NANOSECONDS);
|
||||
|
||||
if (GST_STATE_CHANGE_FAILURE == stateChangeRet) {
|
||||
// Getting the state failed.
|
||||
ACSDK_ERROR(LX("handleGetOffsetInMillisecondsFailed").d("reason", "getElementGetStateFailure"));
|
||||
} else if (GST_STATE_CHANGE_SUCCESS != stateChangeRet) {
|
||||
// Getting the state was not successful (GST_STATE_CHANGE_ASYNC or GST_STATE_CHANGE_NO_PREROLL).
|
||||
ACSDK_INFO(LX("handleGetOffsetInMilliseconds")
|
||||
.d("reason", "getElementGetStateUnsuccessful")
|
||||
.d("stateChangeReturn", gst_element_state_change_return_get_name(stateChangeRet)));
|
||||
} else if (GST_STATE_PAUSED != state && GST_STATE_PLAYING != state) {
|
||||
// Invalid State.
|
||||
std::ostringstream expectedStates;
|
||||
expectedStates << gst_element_state_get_name(GST_STATE_PAUSED)
|
||||
<< "/"
|
||||
<< gst_element_state_get_name(GST_STATE_PLAYING);
|
||||
ACSDK_ERROR(LX("handleGetOffsetInMillisecondsFailed")
|
||||
.d("reason", "invalidPipelineState")
|
||||
.d("state", gst_element_state_get_name(state))
|
||||
.d("expectedStates", expectedStates.str()));
|
||||
} else if (!gst_element_query_position(m_pipeline.pipeline, GST_FORMAT_TIME, &position)) {
|
||||
/*
|
||||
* Query Failed. Explicitly reset the position to -1 as gst_element_query_position() does not guarantee
|
||||
* value of position in the event of a failure.
|
||||
*/
|
||||
position = -1;
|
||||
ACSDK_ERROR(LX("handleGetOffsetInMillisecondsFailed").d("reason", "gstElementQueryPositionError"));
|
||||
} else {
|
||||
// Query succeeded.
|
||||
position /= NANOSECONDS_TO_MILLISECONDS;
|
||||
}
|
||||
|
||||
promise->set_value(static_cast<int64_t>(position));
|
||||
}
|
||||
|
||||
void MediaPlayer::handleSetOffset(
|
||||
std::promise<MediaPlayerStatus> *promise,
|
||||
std::chrono::milliseconds offset) {
|
||||
ACSDK_DEBUG(LX("handleSetOffsetCalled"));
|
||||
m_offsetManager.setSeekPoint(offset);
|
||||
promise->set_value(MediaPlayerStatus::SUCCESS);
|
||||
}
|
||||
|
||||
void MediaPlayer::handleSetObserver(
|
||||
std::promise<void>* promise,
|
||||
std::shared_ptr<avsCommon::utils::mediaPlayer::MediaPlayerObserverInterface> observer) {
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* OffsetManager.cpp
|
||||
*
|
||||
* Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License").
|
||||
* You may not use this file except in compliance with the License.
|
||||
* A copy of the License is located at
|
||||
*
|
||||
* http://aws.amazon.com/apache2.0/
|
||||
*
|
||||
* or in the "license" file accompanying this file. This file is distributed
|
||||
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||
* express or implied. See the License for the specific language governing
|
||||
* permissions and limitations under the License.
|
||||
*/
|
||||
|
||||
#include <AVSCommon/Utils/Logger/Logger.h>
|
||||
|
||||
#include "MediaPlayer/OffsetManager.h"
|
||||
|
||||
namespace alexaClientSDK {
|
||||
namespace mediaPlayer {
|
||||
|
||||
using namespace avsCommon::utils;
|
||||
|
||||
/// String to identify log entries originating from this file.
|
||||
static const std::string TAG("OffsetManager");
|
||||
|
||||
/**
|
||||
* Create a LogEntry using this file's TAG and the specified event string.
|
||||
*
|
||||
* @param The event string for this @c LogEntry.
|
||||
*/
|
||||
#define LX(event) alexaClientSDK::avsCommon::utils::logger::LogEntry(TAG, event)
|
||||
|
||||
OffsetManager::OffsetManager() {
|
||||
clear();
|
||||
}
|
||||
|
||||
void OffsetManager::setIsSeekable(bool seekable) {
|
||||
m_isSeekable = seekable;
|
||||
}
|
||||
|
||||
void OffsetManager::setSeekPoint(std::chrono::milliseconds seekPoint) {
|
||||
m_isSeekPointSet = true;
|
||||
m_seekPoint = seekPoint;
|
||||
}
|
||||
|
||||
std::chrono::milliseconds OffsetManager::getSeekPoint() {
|
||||
return m_seekPoint;
|
||||
}
|
||||
|
||||
bool OffsetManager::isSeekable() {
|
||||
return m_isSeekable;
|
||||
}
|
||||
|
||||
bool OffsetManager::isSeekPointSet() {
|
||||
return m_isSeekPointSet;
|
||||
}
|
||||
|
||||
void OffsetManager::clear() {
|
||||
m_seekPoint = std::chrono::milliseconds::zero();
|
||||
m_isSeekable = false;
|
||||
m_isSeekPointSet = false;
|
||||
}
|
||||
|
||||
} // namespace mediaPlayer
|
||||
} // namespace alexaClientSDK
|
|
@ -62,6 +62,20 @@ static const std::string M3U_FILE_PATH("/fox_dog_playlist.m3u");
|
|||
/// file URI Prefix
|
||||
static const std::string FILE_PREFIX("file://");
|
||||
|
||||
/// File length for the MP3 test file.
|
||||
static const std::chrono::milliseconds MP3_FILE_LENGTH(2688);
|
||||
|
||||
// setOffset timing constants.
|
||||
|
||||
/// Offset to start playback at.
|
||||
static const std::chrono::milliseconds OFFSET(2000);
|
||||
|
||||
/// Tolerance when setting expectations.
|
||||
static const std::chrono::milliseconds TOLERANCE(200);
|
||||
|
||||
/// Padding to add to offsets when necessary.
|
||||
static const std::chrono::milliseconds PADDING(10);
|
||||
|
||||
/**
|
||||
* Mock AttachmentReader.
|
||||
*/
|
||||
|
@ -440,6 +454,8 @@ TEST_F(MediaPlayerTest, testSetSourceEmptyUrl) {
|
|||
* Set the source of the @c MediaPlayer twice consecutively to a url representing a single audio file.
|
||||
* Playback audio till the end. Check whether the playback started and playback finished notifications
|
||||
* are received.
|
||||
*
|
||||
* Consecutive calls to setSource(const std::string url) without play() cause tests to occasionally fail: ACSDK-508.
|
||||
*/
|
||||
TEST_F(MediaPlayerTest, testConsecutiveSetSource) {
|
||||
|
||||
|
@ -637,12 +653,21 @@ TEST_F(MediaPlayerTest, testGetOffsetInMilliseconds) {
|
|||
ASSERT_NE(MediaPlayerStatus::FAILURE,m_mediaPlayer->play());
|
||||
ASSERT_TRUE(m_playerObserver->waitForPlaybackStarted());
|
||||
std::this_thread::sleep_for (std::chrono::seconds(1));
|
||||
ASSERT_NE(-1, m_mediaPlayer->getOffsetInMilliseconds());
|
||||
int64_t offset = m_mediaPlayer->getOffsetInMilliseconds();
|
||||
ASSERT_TRUE((offset > 0) && (offset <= MP3_FILE_LENGTH.count()));
|
||||
ASSERT_NE(MediaPlayerStatus::FAILURE,m_mediaPlayer->stop());
|
||||
ASSERT_TRUE(m_playerObserver->waitForPlaybackFinished());
|
||||
ASSERT_EQ(-1, m_mediaPlayer->getOffsetInMilliseconds());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test getOffsetInMilliseconds with a null pipeline. Expect that -1 is returned.
|
||||
* This currently results in errors on shutdown. Will be fixed by ACSDK-446.
|
||||
*/
|
||||
TEST_F(MediaPlayerTest, testGetOffsetInMillisecondsNullPipeline) {
|
||||
ASSERT_EQ(-1, m_mediaPlayer->getOffsetInMilliseconds());
|
||||
}
|
||||
|
||||
/**
|
||||
* Check playing two attachments back to back.
|
||||
* Read an audio file into a buffer. Set the source of the @c MediaPlayer to the buffer. Playback audio for a few
|
||||
|
@ -749,6 +774,113 @@ TEST_F(MediaPlayerTest, testStartPlayWithUrlPlaylistWaitForEnd) {
|
|||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Test setting the offset to a seekable source. Setting the offset should succeed and playback should start from the offset.
|
||||
*/
|
||||
TEST_F(MediaPlayerTest, testSetOffsetSeekableSource) {
|
||||
std::chrono::milliseconds offset(OFFSET);
|
||||
|
||||
std::string url_single(FILE_PREFIX + inputsDirPath + MP3_FILE_PATH);
|
||||
m_mediaPlayer->setSource(url_single);
|
||||
ASSERT_EQ(MediaPlayerStatus::SUCCESS, m_mediaPlayer->setOffset(offset));
|
||||
|
||||
ASSERT_NE(MediaPlayerStatus::FAILURE, m_mediaPlayer->play());
|
||||
ASSERT_TRUE(m_playerObserver->waitForPlaybackStarted());
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
ASSERT_TRUE(m_playerObserver->waitForPlaybackFinished());
|
||||
|
||||
std::chrono::milliseconds timeElapsed =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - start);
|
||||
ACSDK_INFO(LX("MediaPlayerTest").d("timeElapsed", timeElapsed.count()));
|
||||
|
||||
// Time elapsed should be total file length minus the offset.
|
||||
ASSERT_TRUE(timeElapsed < (MP3_FILE_LENGTH - offset + TOLERANCE));
|
||||
ASSERT_EQ(m_playerObserver->getOnPlaybackStartedCallCount(), 1);
|
||||
ASSERT_EQ(m_playerObserver->getOnPlaybackFinishedCallCount(), 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test setting the offset to an un-seekable pipeline. Setting the offset should succeed, but
|
||||
* no seeking should occur. Playback will start from the beginning.
|
||||
*/
|
||||
TEST_F(MediaPlayerTest, testSetOffsetUnseekable) {
|
||||
std::chrono::milliseconds offset(OFFSET);
|
||||
|
||||
setAttachmentReaderSource();
|
||||
// Ensure that source is set to not seekable.
|
||||
gst_app_src_set_stream_type(m_mediaPlayer->getAppSrc(), GST_APP_STREAM_TYPE_STREAM);
|
||||
|
||||
ASSERT_EQ(MediaPlayerStatus::SUCCESS, m_mediaPlayer->setOffset(offset));
|
||||
|
||||
ASSERT_NE(MediaPlayerStatus::FAILURE, m_mediaPlayer->play());
|
||||
ASSERT_TRUE(m_playerObserver->waitForPlaybackStarted());
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
ASSERT_TRUE(m_playerObserver->waitForPlaybackFinished());
|
||||
|
||||
std::chrono::milliseconds timeElapsed =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - start);
|
||||
ACSDK_INFO(LX("MediaPlayerTest").d("timeElapsed", timeElapsed.count()));
|
||||
|
||||
// Time elapsed should be the length of the file.
|
||||
ASSERT_TRUE(timeElapsed >= (MP3_FILE_LENGTH));
|
||||
ASSERT_EQ(m_playerObserver->getOnPlaybackStartedCallCount(), 1);
|
||||
ASSERT_EQ(m_playerObserver->getOnPlaybackFinishedCallCount(), 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test setting the offset outside the bounds of the source. Playback will immediately end.
|
||||
*/
|
||||
TEST_F(MediaPlayerTest, testSetOffsetOutsideBounds) {
|
||||
std::chrono::milliseconds outOfBounds(MP3_FILE_LENGTH + PADDING);
|
||||
|
||||
std::string url_single(FILE_PREFIX + inputsDirPath + MP3_FILE_PATH);
|
||||
m_mediaPlayer->setSource(url_single);
|
||||
ASSERT_EQ(MediaPlayerStatus::SUCCESS, m_mediaPlayer->setOffset(outOfBounds));
|
||||
|
||||
ASSERT_NE(MediaPlayerStatus::FAILURE, m_mediaPlayer->play());
|
||||
ASSERT_TRUE(m_playerObserver->waitForPlaybackStarted());
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
ASSERT_TRUE(m_playerObserver->waitForPlaybackFinished());
|
||||
|
||||
std::chrono::milliseconds timeElapsed =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - start);
|
||||
ACSDK_INFO(LX("MediaPlayerTest").d("timeElapsed", timeElapsed.count()));
|
||||
|
||||
// Time elapsed should be zero.
|
||||
ASSERT_TRUE(timeElapsed < std::chrono::milliseconds::zero() + TOLERANCE);
|
||||
ASSERT_EQ(m_playerObserver->getOnPlaybackStartedCallCount(), 1);
|
||||
ASSERT_EQ(m_playerObserver->getOnPlaybackFinishedCallCount(), 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test calling setSource resets the offset.
|
||||
*
|
||||
* Consecutive calls to setSource(const std::string url) without play() cause tests to occasionally fail: ACSDK-508.
|
||||
*/
|
||||
TEST_F(MediaPlayerTest, testSetSourceResetsOffset) {
|
||||
std::chrono::milliseconds offset(OFFSET);
|
||||
|
||||
std::string url_single(FILE_PREFIX + inputsDirPath + MP3_FILE_PATH);
|
||||
m_mediaPlayer->setSource(url_single);
|
||||
ASSERT_EQ(MediaPlayerStatus::SUCCESS, m_mediaPlayer->setOffset(offset));
|
||||
|
||||
m_mediaPlayer->setSource(url_single);
|
||||
// Play, expect full file.
|
||||
ASSERT_NE(MediaPlayerStatus::FAILURE, m_mediaPlayer->play());
|
||||
ASSERT_TRUE(m_playerObserver->waitForPlaybackStarted());
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
ASSERT_TRUE(m_playerObserver->waitForPlaybackFinished());
|
||||
|
||||
std::chrono::milliseconds timeElapsed =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - start);
|
||||
ACSDK_INFO(LX("MediaPlayerTest").d("timeElapsed", timeElapsed.count()));
|
||||
|
||||
// Time elapsed should be the full file.
|
||||
ASSERT_TRUE(timeElapsed >= MP3_FILE_LENGTH);
|
||||
ASSERT_EQ(m_playerObserver->getOnPlaybackStartedCallCount(), 1);
|
||||
ASSERT_EQ(m_playerObserver->getOnPlaybackFinishedCallCount(), 1);
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
} // namespace mediaPlayer
|
||||
} // namespace alexaClientSDK
|
||||
|
|
34
README.md
34
README.md
|
@ -1,6 +1,6 @@
|
|||
### What is the Alexa Voice Service (AVS)?
|
||||
|
||||
The Alexa Voice Service (AVS) enables developers to integrate Alexa directly into their products, bringing the convenience of voice control to any connected device. AVS provides developers with access to a suite of resources to quickly and easily build Alexa-enabled products, including APIs, hardware development kits, software development kits, and documentation.
|
||||
The Alexa Voice Service (AVS) enables developers to integrate Alexa directly into their products, bringing the convenience of voice control to any connected device. AVS provides developers with access to a suite of resources to quickly and easily build Alexa-enabled products, including APIs, hardware development kits, software development kits, and documentation.
|
||||
|
||||
[Learn more »](https://developer.amazon.com/alexa-voice-service)
|
||||
|
||||
|
@ -24,13 +24,13 @@ Or if you prefer, you can start with our [SDK API Documentation](https://alexa.g
|
|||
|
||||
### SDK Architecture
|
||||
|
||||
This diagram illustrates the data flows between components that comprise the AVS Device SDK for C++.
|
||||
This diagram illustrates the data flows between components that comprise the AVS Device SDK for C++.
|
||||
|
||||

|
||||
|
||||
**Audio Signal Processor (ASP)** - Third-party software that applies signal processing algorithms to both input and output audio channels. The applied algorithms are designed to produce clean audio data and include, but are not limited to acoustic echo cancellation (AEC), beam forming (fixed or adaptive), voice activity detection (VAD), and dynamic range compression (DRC). If a multi-microphone array is present, the ASP constructs and outputs a single audio stream for the array.
|
||||
|
||||
**Shared Data Stream (SDS)** - A single producer, multi-consumer buffer that allows for the transport of any type of data between a single writer and one or more readers. SDS performs two key tasks:
|
||||
**Shared Data Stream (SDS)** - A single producer, multi-consumer buffer that allows for the transport of any type of data between a single writer and one or more readers. SDS performs two key tasks:
|
||||
|
||||
1. It passes audio data between the audio front end (or Audio Signal Processor), the wake word engine, and the Alexa Communications Library (ACL) before sending to AVS
|
||||
2. It passes data attachments sent by AVS to specific capability agents via the ACL
|
||||
|
@ -62,7 +62,7 @@ Focus management is not specific to Capability Agents or Directive Handlers, and
|
|||
* [SpeechSynthesizer](https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/reference/speechsynthesizer) - The interface for Alexa speech output.
|
||||
* [Alerts](https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/reference/alerts) - The interface for setting, stopping, and deleting timers and alarms.
|
||||
* [AudioPlayer](https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/reference/audioplayer) - The interface for managing and controlling audio playback.
|
||||
* [Notifications](https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/reference/notifications) - The interface for displaying notifications indicators.
|
||||
* [Notifications](https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/reference/notifications) - The interface for displaying notifications indicators.
|
||||
* [PlaybackController](https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/reference/playbackcontroller) - The interface for navigating a playback queue via GUI or buttons.
|
||||
* [Speaker](https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/reference/speaker) - The interface for volume control, including mute and unmute.
|
||||
* [System](https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/reference/system) - The interface for communicating product status/state to AVS.
|
||||
|
@ -82,16 +82,16 @@ Focus management is not specific to Capability Agents or Directive Handlers, and
|
|||
|
||||
**Note**: Features, updates, and resolved issues from previous releases are available to view in [CHANGELOG.md](https://github.com/alexa/alexa-client-sdk/blob/master/CHANGELOG.md).
|
||||
|
||||
v1.0.2 released 8/23/2017:
|
||||
* **Features**
|
||||
* Native components for the following capability agents are included in this release: `Alerts`, `AudioPlayer`, `SpeechRecognizer`, `SpeechSynthesizer`, and `System`
|
||||
* Supports iHeartRadio
|
||||
* Includes a sample application to demonstrate interactions with AVS
|
||||
* **Known Issues**
|
||||
* Native components for the following capability agents are **not** included in this release: `PlaybackController`, `Speaker`, `Settings`, `TemplateRuntime`, and `Notifications`
|
||||
* Amazon Music, TuneIn, SiriusXM, and audio books are not supported in v1.0.1
|
||||
* The `AlertsCapabilityAgent` satisfies the [AVS specification](https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/reference/timers-and-alarms-conceptual-overview) except for sending retrospective events. For example, sending `AlertStarted` for an Alert which rendered when there was no internet connection.
|
||||
* `ACL`'s asynchronous receipt of audio attachments may manage resources poorly in scenarios where attachments are received but not consumed.
|
||||
* When an `AttachmentReader` does not deliver data for prolonged periods, `MediaPlayer` may not resume playing the delayed audio.
|
||||
* Without the refresh token in the JSON file, the sample app crashes on start up.
|
||||
* Any connection loss during the `Listening` state keeps the app stuck in this state, unless the ongoing interaction is manually stopped by the user.
|
||||
v1.0.3 released 9/19/2017:
|
||||
|
||||
* **Enhancements**
|
||||
* Implemented `setOffSet` in `MediaPlayer`.
|
||||
* [Updated `LoggerUtils.cpp`](https://github.com/alexa/avs-device-sdk/issues/77).
|
||||
|
||||
* **Bug Fixes**
|
||||
* [Bug fix to address incorrect stop behavior caused when Audio Focus is set to `NONE` and released](https://github.com/alexa/avs-device-sdk/issues/129).
|
||||
* Bug fix for intermittent failure in `handleMultipleConsecutiveSpeaks`.
|
||||
* Bug fix for `jsonArrayExist` incorrectly parsing JSON when trying to locate array children.
|
||||
* Bug fix for ADSL test failures with `sendDirectiveWithoutADialogRequestId`.
|
||||
* Bug fix for `SpeechSynthesizer` showing the wrong UX state when a burst of `Speak` directives are received.
|
||||
* Bug fix for recursive loop in `AudioPlayer.Stop`.
|
||||
|
|
|
@ -143,6 +143,12 @@ void UIManager::printState() {
|
|||
return;;
|
||||
case DialogUXState::SPEAKING:
|
||||
ConsolePrinter::prettyPrint("Speaking...");
|
||||
return;
|
||||
/*
|
||||
* This is an intermediate state after a SPEAK directive is completed. In the case of a speech burst the next SPEAK
|
||||
* could kick in or if its the last SPEAK directive ALEXA moves to the IDLE state. So we do nothing for this state.
|
||||
*/
|
||||
case DialogUXState::FINISHED:
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue