From 944c6977ace0e4e11b5f40401ff5d9a6e22d4359 Mon Sep 17 00:00:00 2001 From: Brant Thomsen Date: Tue, 1 Aug 2017 16:45:56 -0600 Subject: AAF Support Updates For AAF support, Talker timestamps vary based on the sparse value. This is handled in map_aaf, rather than intf_alsa or intf_wav. For AAF support, Listener will adjust to match the sparse setting of the incoming stream. Removed (unsupported) sparse support from map_uncmp. Removed "surplus" support from intf_alsa, as it was preventing data from being read at the needed speed when batch_factor > 1. --- .../map_aaf_audio/openavb_map_aaf_audio.c | 132 +++++++++++++-------- .../map_uncmp_audio/openavb_map_uncmp_audio.c | 3 - .../map_uncmp_audio/openavb_map_uncmp_audio_pub.h | 16 --- .../map_uncmp_audio/uncmp_audio_map.md | 2 - .../platform/Linux/intf_alsa/openavb_intf_alsa.c | 26 +--- .../Linux/intf_wav_file/openavb_intf_wav_file.c | 10 -- 6 files changed, 85 insertions(+), 104 deletions(-) diff --git a/lib/avtp_pipeline/map_aaf_audio/openavb_map_aaf_audio.c b/lib/avtp_pipeline/map_aaf_audio/openavb_map_aaf_audio.c index ecd413f5..b29563d2 100755 --- a/lib/avtp_pipeline/map_aaf_audio/openavb_map_aaf_audio.c +++ b/lib/avtp_pipeline/map_aaf_audio/openavb_map_aaf_audio.c @@ -58,6 +58,9 @@ https://github.com/benhoyt/inih/commit/74d2ca064fb293bc60a77b0bd068075b293cf175. // - 1 Byte - TV bit (timestamp valid) #define HIDX_AVTP_HIDE7_TV1 1 +// - 1 Byte - Sequence number +#define HIDX_AVTP_SEQ_NUM 2 + // - 1 Byte - TU bit (timestamp uncertain) #define HIDX_AVTP_HIDE7_TU1 3 @@ -87,6 +90,7 @@ typedef enum { AAF_FORMAT_INT_32, AAF_FORMAT_INT_24, AAF_FORMAT_INT_16, + AAF_FORMAT_AES3_32, // AVDECC_TODO: Implement this } aaf_sample_format_t; typedef enum { @@ -98,6 +102,13 @@ typedef enum { AAF_MAX_CHANNELS_LAYOUT = 15, } aaf_automotive_channels_layout_t; +typedef enum { + // Disabled - timestamp is valid in every avtp packet + TS_SPARSE_MODE_DISABLED = 0, + // Enabled - timestamp is valid in every 8th avtp packet + TS_SPARSE_MODE_ENABLED = 1 +} avb_audio_sparse_mode_t; + typedef struct { ///////////// // Config data @@ -137,7 +148,7 @@ typedef struct { U32 intervalCounter; - U32 sparseMode; + avb_audio_sparse_mode_t sparseMode; bool mediaQItemSyncTS; @@ -406,13 +417,6 @@ void openavbMapAVTPAudioGenInitCB(media_q_t *pMediaQ) openavbMediaQSetSize(pMediaQ, pPvtData->itemCount, pPubMapInfo->itemSize); pPvtData->dataValid = TRUE; - - pPubMapInfo->sparseMode = pPvtData->sparseMode; - if (pPubMapInfo->sparseMode == TS_SPARSE_MODE_ENABLED) { - AVB_LOG_INFO("Sparse timestamping mode: enabled"); - } else { - AVB_LOG_INFO("Sparse timestamping mode: disabled"); - } } AVB_TRACE_EXIT(AVB_TRACE_MAP); } @@ -425,7 +429,7 @@ void openavbMapAVTPAudioTxInitCB(media_q_t *pMediaQ) AVB_TRACE_EXIT(AVB_TRACE_MAP); } -// CORE_TODO: This callback should be updated to work in a similiar way the uncompressed audio mapping. With allowing AVTP packets to be built +// CORE_TODO: This callback should be updated to work in a similar way the uncompressed audio mapping. With allowing AVTP packets to be built // from multiple media queue items. This allows interface to set into the media queue blocks of audio frames to properly correspond to // a SYT_INTERVAL. Additionally the public data member sytInterval needs to be set in the same way the uncompressed audio mapping does. // This talker callback will be called for each AVB observation interval. @@ -434,30 +438,68 @@ tx_cb_ret_t openavbMapAVTPAudioTxCB(media_q_t *pMediaQ, U8 *pData, U32 *dataLen) media_q_item_t *pMediaQItem = NULL; AVB_TRACE_ENTRY(AVB_TRACE_MAP_DETAIL); + if (!pMediaQ) { + AVB_LOG_ERROR("Mapping module invalid MediaQ"); + AVB_TRACE_EXIT(AVB_TRACE_MAP_DETAIL); + return TX_CB_RET_PACKET_NOT_READY; + } + if (!pData || !dataLen) { AVB_LOG_ERROR("Mapping module data or data length argument incorrect."); + AVB_TRACE_EXIT(AVB_TRACE_MAP_DETAIL); return TX_CB_RET_PACKET_NOT_READY; } - if (pMediaQ) - pMediaQItem = openavbMediaQTailLock(pMediaQ, TRUE); + media_q_pub_map_aaf_audio_info_t *pPubMapInfo = pMediaQ->pPubMapInfo; - if (pMediaQItem) { - if (pMediaQItem->dataLen > 0) { + U32 bytesNeeded = pPubMapInfo->itemFrameSizeBytes * pPubMapInfo->framesPerPacket; + if (!openavbMediaQIsAvailableBytes(pMediaQ, pPubMapInfo->itemFrameSizeBytes * pPubMapInfo->framesPerPacket, TRUE)) { + AVB_LOG_VERBOSE("Not enough bytes are ready"); + AVB_TRACE_EXIT(AVB_TRACE_MAP_DETAIL); + return TX_CB_RET_PACKET_NOT_READY; + } + + pvt_data_t *pPvtData = pMediaQ->pPvtMapInfo; + if (!pPvtData) { + AVB_LOG_ERROR("Private mapping module data not allocated."); + openavbMediaQTailUnlock(pMediaQ); + AVB_TRACE_EXIT(AVB_TRACE_MAP_DETAIL); + return TX_CB_RET_PACKET_NOT_READY; + } + + if ((*dataLen - TOTAL_HEADER_SIZE) < pPvtData->payloadSize) { + AVB_LOG_ERROR("Not enough room in packet for payload"); + openavbMediaQTailUnlock(pMediaQ); + AVB_TRACE_EXIT(AVB_TRACE_MAP_DETAIL); + return TX_CB_RET_PACKET_NOT_READY; + } + + U32 bytesProcessed = 0; + while (bytesProcessed < bytesNeeded) { + pMediaQItem = openavbMediaQTailLock(pMediaQ, TRUE); + if (pMediaQItem && pMediaQItem->pPubData && pMediaQItem->dataLen > 0) { U32 tmp32; U8 *pHdrV0 = pData; U32 *pHdr = (U32 *)(pData + AVTP_V0_HEADER_SIZE); U8 *pPayload = pData + TOTAL_HEADER_SIZE; - media_q_pub_map_aaf_audio_info_t *pPubMapInfo = pMediaQ->pPubMapInfo; - pvt_data_t *pPvtData = pMediaQ->pPvtMapInfo; - if (!pPvtData) { - AVB_LOG_ERROR("Private mapping module data not allocated."); - return TX_CB_RET_PACKET_NOT_READY; - } - // timestamp set in the interface module, here just validate - if (openavbAvtpTimeTimestampIsValid(pMediaQItem->pAvtpTime)) { + // timestamp set in the interface module, here just validate + // In sparse mode, the timestamp valid flag should be set every eighth AAF AVPTDU. + if (pPvtData->sparseMode == TS_SPARSE_MODE_ENABLED && (pHdrV0[HIDX_AVTP_SEQ_NUM] & 0x07) != 0) { + // Skip over this timestamp, as using sparse mode. + pHdrV0[HIDX_AVTP_HIDE7_TV1] &= ~0x01; + pHdrV0[HIDX_AVTP_HIDE7_TU1] &= ~0x01; + *pHdr++ = 0; // Clear the timestamp field + } + else if (!openavbAvtpTimeTimestampIsValid(pMediaQItem->pAvtpTime)) { + // Error getting the timestamp. Clear timestamp valid flag. + AVB_LOG_ERROR("Unable to get the timestamp value"); + pHdrV0[HIDX_AVTP_HIDE7_TV1] &= ~0x01; + pHdrV0[HIDX_AVTP_HIDE7_TU1] &= ~0x01; + *pHdr++ = 0; // Clear the timestamp field + } + else { // Add the max transit time. openavbAvtpTimeAddUSec(pMediaQItem->pAvtpTime, pPvtData->maxTransitUsec); @@ -474,11 +516,6 @@ tx_cb_ret_t openavbMapAVTPAudioTxCB(media_q_t *pMediaQ, U8 *pData, U32 *dataLen) openavbAvtpTimeSetTimestampValid(pMediaQItem->pAvtpTime, FALSE); } - else { - // Clear timestamp valid flag - pHdrV0[HIDX_AVTP_HIDE7_TV1] &= ~0x01; - pHdr++; // Move past the timestamp field - } // - 4 bytes format info (format, sample rate, channels per frame, bit depth) tmp32 = pPvtData->aaf_format << 24; @@ -499,12 +536,6 @@ tx_cb_ret_t openavbMapAVTPAudioTxCB(media_q_t *pMediaQ, U8 *pData, U32 *dataLen) pHdrV0[HIDX_AVTP_HIDE7_SP] &= ~SP_M0_BIT; } - if ((*dataLen - TOTAL_HEADER_SIZE) < pPvtData->payloadSize) { - AVB_LOG_ERROR("Not enough room in packet for payload"); - AVB_TRACE_EXIT(AVB_TRACE_MAP_DETAIL); - return TX_CB_RET_PACKET_NOT_READY; - } - if ((pMediaQItem->dataLen - pMediaQItem->readIdx) < pPvtData->payloadSize) { // This should not happen so we will just toss it away. AVB_LOG_ERROR("Not enough data in media queue item for packet"); @@ -514,6 +545,7 @@ tx_cb_ret_t openavbMapAVTPAudioTxCB(media_q_t *pMediaQ, U8 *pData, U32 *dataLen) } memcpy(pPayload, (uint8_t *)pMediaQItem->pPubData + pMediaQItem->readIdx, pPvtData->payloadSize); + bytesProcessed += pPvtData->payloadSize; pMediaQItem->readIdx += pPvtData->payloadSize; if (pMediaQItem->readIdx >= pMediaQItem->dataLen) { @@ -524,23 +556,17 @@ tx_cb_ret_t openavbMapAVTPAudioTxCB(media_q_t *pMediaQ, U8 *pData, U32 *dataLen) // More to read next interval openavbMediaQTailUnlock(pMediaQ); } - - // Set outbound data length (entire packet length) - *dataLen = pPvtData->payloadSize + TOTAL_HEADER_SIZE; - - AVB_TRACE_EXIT(AVB_TRACE_MAP_DETAIL); - return TX_CB_RET_PACKET_READY; } else { openavbMediaQTailPull(pMediaQ); - AVB_TRACE_EXIT(AVB_TRACE_MAP_DETAIL); - return TX_CB_RET_PACKET_NOT_READY; // No payload } - } + // Set out bound data length (entire packet length) + *dataLen = bytesNeeded + TOTAL_HEADER_SIZE; + AVB_TRACE_EXIT(AVB_TRACE_MAP_DETAIL); - return TX_CB_RET_PACKET_NOT_READY; + return TX_CB_RET_PACKET_READY; } // A call to this callback indicates that this mapping module will be @@ -562,12 +588,16 @@ void openavbMapAVTPAudioRxInitCB(media_q_t *pMediaQ) // sparse mode enabled so check packing factor // listener should work correct for packing_factors: // 1, 2, 4, 8, 16, 24, 32, 40, 48, (+8) ... - if (pPvtData->packingFactor < 8) { + if (pPvtData->packingFactor == 0) { + badPckFctrValue = TRUE; + } + else if (pPvtData->packingFactor < 8) { // check if power of 2 if ((pPvtData->packingFactor & (pPvtData->packingFactor - 1)) != 0) { badPckFctrValue = TRUE; } - } else { + } + else { // check if multiple of 8 if (pPvtData->packingFactor % 8 != 0) { badPckFctrValue = TRUE; @@ -649,11 +679,15 @@ bool openavbMapAVTPAudioRxCB(media_q_t *pMediaQ, U8 *pData, U32 dataLen) AVB_LOGF_ERROR("Listener event field (%d) doesn't match received data (%d)", pPvtData->aaf_event_field, tmp); } - if (streamSparseMode != listenerSparseMode) { - if (pPvtData->dataValid) - AVB_LOGF_ERROR("Listener sparse mode (%d) doesn't match stream sparse mode (%d)", - listenerSparseMode, streamSparseMode); - dataValid = FALSE; + if (streamSparseMode && !listenerSparseMode) { + AVB_LOG_INFO("Listener enabling sparse mode to match incoming stream"); + pPvtData->sparseMode = TS_SPARSE_MODE_ENABLED; + listenerSparseMode = TRUE; + } + if (!streamSparseMode && listenerSparseMode) { + AVB_LOG_INFO("Listener disabling sparse mode to match incoming stream"); + pPvtData->sparseMode = TS_SPARSE_MODE_DISABLED; + listenerSparseMode = FALSE; } if (dataValid) { diff --git a/lib/avtp_pipeline/map_uncmp_audio/openavb_map_uncmp_audio.c b/lib/avtp_pipeline/map_uncmp_audio/openavb_map_uncmp_audio.c index fc59606f..ca342b2b 100755 --- a/lib/avtp_pipeline/map_uncmp_audio/openavb_map_uncmp_audio.c +++ b/lib/avtp_pipeline/map_uncmp_audio/openavb_map_uncmp_audio.c @@ -724,7 +724,6 @@ extern DLL_EXPORT bool openavbMapUncmpAudioInitialize(media_q_t *pMediaQ, openav } pvt_data_t *pPvtData = pMediaQ->pPvtMapInfo; - media_q_pub_map_uncmp_audio_info_t *pPubMapInfo = pMediaQ->pPubMapInfo; pMapCB->map_cfg_cb = openavbMapUncmpAudioCfgCB; pMapCB->map_subtype_cb = openavbMapUncmpAudioSubtypeCB; @@ -746,8 +745,6 @@ extern DLL_EXPORT bool openavbMapUncmpAudioInitialize(media_q_t *pMediaQ, openav pPvtData->DBC = 0; pPvtData->audioMcr = AVB_MCR_NONE; - pPubMapInfo->sparseMode = TS_SPARSE_MODE_UNSPEC; - openavbMediaQSetMaxLatency(pMediaQ, inMaxTransitUsec); } diff --git a/lib/avtp_pipeline/map_uncmp_audio/openavb_map_uncmp_audio_pub.h b/lib/avtp_pipeline/map_uncmp_audio/openavb_map_uncmp_audio_pub.h index 9bb1f321..aab1f973 100755 --- a/lib/avtp_pipeline/map_uncmp_audio/openavb_map_uncmp_audio_pub.h +++ b/lib/avtp_pipeline/map_uncmp_audio/openavb_map_uncmp_audio_pub.h @@ -60,19 +60,6 @@ https://github.com/benhoyt/inih/commit/74d2ca064fb293bc60a77b0bd068075b293cf175. */ #define MapUncmpAudioMediaQDataFormat "UncmpAudio" -/** Defines AAF timestamping mode: - * - TS_SPARSE_MODE_DISABLED - timestamp is valid in every avtp packet - * - TS_SPARSE_MODE_ENABLED - timestamp is valid in every 8th avtp packet - */ -typedef enum { - /// Unspecified - TS_SPARSE_MODE_UNSPEC = 0, - /// Disabled - TS_SPARSE_MODE_DISABLED = 1, - /// Enabled - TS_SPARSE_MODE_ENABLED = 8 -} avb_audio_sparse_mode_t; - /** Contains detailed information of the audio format. * \note Interface module has to set during the RX and TX init callbacks: * - audioRate, @@ -80,7 +67,6 @@ typedef enum { * - audioBitDepth, * - audioEndian, * - audioChannels, - * - sparseMode. * \note The rest of fields mapping module will set these during the RX and TX * init callbacks. The interface module can use these during the RX and TX * callbacks. @@ -96,8 +82,6 @@ typedef struct { avb_audio_endian_t audioEndian; /// Number of channels avb_audio_channels_t audioChannels; - /// Sparse timestamping mode - avb_audio_sparse_mode_t sparseMode; // The mapping module will set these during the RX and TX init callbacks // The interface module can use these during the RX and TX callbacks. diff --git a/lib/avtp_pipeline/map_uncmp_audio/uncmp_audio_map.md b/lib/avtp_pipeline/map_uncmp_audio/uncmp_audio_map.md index f826249c..862241b6 100644 --- a/lib/avtp_pipeline/map_uncmp_audio/uncmp_audio_map.md +++ b/lib/avtp_pipeline/map_uncmp_audio/uncmp_audio_map.md @@ -41,8 +41,6 @@ audioType |How the data is organized - what is the data type of \ samples @ref avb_audio_type_t audioBitDepth |What is the bit depth of audio @ref avb_audio_bit_depth_t audioChannels |How many channels there are @ref avb_audio_channels_t -sparseMode |Timestamping mode @ref avb_audio_sparse_mode_t \ - (not used in this mapping module) Below you can find description of how to set up those variables in interfaces * [wav file interface](@ref wav_file_intf) diff --git a/lib/avtp_pipeline/platform/Linux/intf_alsa/openavb_intf_alsa.c b/lib/avtp_pipeline/platform/Linux/intf_alsa/openavb_intf_alsa.c index 96f8c4c0..37d29140 100644 --- a/lib/avtp_pipeline/platform/Linux/intf_alsa/openavb_intf_alsa.c +++ b/lib/avtp_pipeline/platform/Linux/intf_alsa/openavb_intf_alsa.c @@ -106,9 +106,6 @@ typedef struct { // Use Media Clock Synth module instead of timestamps taken during Tx callback bool fixedTimestampEnabled; - - // How many extra items we have read ahead of schedule - S32 surplus; } pvt_data_t; @@ -586,26 +583,11 @@ bool openavbIntfAlsaTxCB(media_q_t *pMediaQ) return FALSE; } - //put current wall time into tail item used by AAF mapping module - if ((pPubMapUncmpAudioInfo->sparseMode != TS_SPARSE_MODE_UNSPEC)) { - pMediaQItem = openavbMediaQTailLock(pMediaQ, TRUE); - if ((pMediaQItem) && (pPvtData->intervalCounter % pPubMapUncmpAudioInfo->sparseMode == 0)) { - openavbAvtpTimeSetToWallTime(pMediaQItem->pAvtpTime); - } - openavbMediaQTailUnlock(pMediaQ); - pMediaQItem = NULL; - } - if (pPvtData->intervalCounter++ % pPubMapUncmpAudioInfo->packingFactor != 0) { AVB_TRACE_EXIT(AVB_TRACE_INTF_DETAIL); return TRUE; } - if (pPvtData->surplus > 0) { - pPvtData->surplus--; - return TRUE; - } - while (moreItems) { pMediaQItem = openavbMediaQHeadLock(pMediaQ); if (pMediaQItem) { @@ -627,7 +609,7 @@ bool openavbIntfAlsaTxCB(media_q_t *pMediaQ) } break; case -EAGAIN: - AVB_LOG_DEBUG("snd_pcm_readi() had no data available"); + { IF_LOG_INTERVAL(1000) AVB_LOG_DEBUG("snd_pcm_readi() had no data available"); } break; default: AVB_LOGF_ERROR("Unhandled snd_pcm_readi() error: %s", snd_strerror(rslt)); @@ -640,13 +622,12 @@ bool openavbIntfAlsaTxCB(media_q_t *pMediaQ) } } - pPvtData->surplus++; - pMediaQItem->dataLen += rslt * pPubMapUncmpAudioInfo->itemFrameSizeBytes; if (pMediaQItem->dataLen != pPubMapUncmpAudioInfo->itemSize) { openavbMediaQHeadUnlock(pMediaQ); } else { + // Always get the timestamp. Protocols such as AAF can choose to ignore them if not needed. if (!pPvtData->fixedTimestampEnabled) { openavbAvtpTimeSetToWallTime(pMediaQItem->pAvtpTime); } else { @@ -660,8 +641,6 @@ bool openavbIntfAlsaTxCB(media_q_t *pMediaQ) moreItems = FALSE; } } - - IF_LOG_INTERVAL(1000) AVB_LOGF_DEBUG("Surplus: %d", pPvtData->surplus); } AVB_TRACE_EXIT(AVB_TRACE_INTF_DETAIL); @@ -1139,7 +1118,6 @@ extern DLL_EXPORT bool openavbIntfAlsaInitialize(media_q_t *pMediaQ, openavb_int pPvtData->fixedTimestampEnabled = FALSE; pPvtData->clockSkewPPB = 0; - pPvtData->surplus = 0; } AVB_TRACE_EXIT(AVB_TRACE_INTF); diff --git a/lib/avtp_pipeline/platform/Linux/intf_wav_file/openavb_intf_wav_file.c b/lib/avtp_pipeline/platform/Linux/intf_wav_file/openavb_intf_wav_file.c index 3932ba04..e598bb94 100644 --- a/lib/avtp_pipeline/platform/Linux/intf_wav_file/openavb_intf_wav_file.c +++ b/lib/avtp_pipeline/platform/Linux/intf_wav_file/openavb_intf_wav_file.c @@ -450,16 +450,6 @@ bool openavbIntfWavFileTxCB(media_q_t *pMediaQ) return FALSE; } - //put current wall time into tail item used by AAF maping module - if ((pPubMapUncmpAudioInfo->sparseMode != TS_SPARSE_MODE_UNSPEC)) { - pMediaQItem = openavbMediaQTailLock(pMediaQ, TRUE); - if ((pMediaQItem) && (pPvtData->intervalCounter % pPubMapUncmpAudioInfo->sparseMode == 0)) { - openavbAvtpTimeSetToWallTime(pMediaQItem->pAvtpTime); - } - openavbMediaQTailUnlock(pMediaQ); - pMediaQItem = NULL; - } - if (pPvtData->intervalCounter++ % pPubMapUncmpAudioInfo->packingFactor != 0) return TRUE; -- cgit v1.2.1 From bb7868bd875257f4716ec61cb09d3a4bbbbbfc04 Mon Sep 17 00:00:00 2001 From: Brant Thomsen Date: Mon, 21 Aug 2017 16:20:00 -0600 Subject: AAF Integer Conversion Support Added When using AAF in 16-bit, 24-bit, or 32-bit integer mode, the incoming audio can be 16-bit, 24-bit, or 32-bit integer data. The incoming audio is converted to the desired internal format. Note that the conversion only works correctly if the incoming audio is big- endian, which IEEE 1722-2016 Clause 7.3.5 lists as a requirement for AAF. INI files updated to match IEEE 1722-2016 specification. --- .../map_aaf_audio/openavb_map_aaf_audio.c | 110 +++++++++++++++++---- .../map_aaf_audio/openavb_map_aaf_audio_pub.h | 2 +- .../platform/Linux/intf_alsa/aaf_file_talker.ini | 4 + .../platform/Linux/intf_alsa/aaf_listener.ini | 11 ++- .../platform/Linux/intf_alsa/aaf_listener_auto.ini | 11 ++- .../platform/Linux/intf_alsa/aaf_talker.ini | 10 +- 6 files changed, 116 insertions(+), 32 deletions(-) diff --git a/lib/avtp_pipeline/map_aaf_audio/openavb_map_aaf_audio.c b/lib/avtp_pipeline/map_aaf_audio/openavb_map_aaf_audio.c index b29563d2..ef3b4a91 100755 --- a/lib/avtp_pipeline/map_aaf_audio/openavb_map_aaf_audio.c +++ b/lib/avtp_pipeline/map_aaf_audio/openavb_map_aaf_audio.c @@ -32,7 +32,7 @@ https://github.com/benhoyt/inih/commit/74d2ca064fb293bc60a77b0bd068075b293cf175. /* * MODULE SUMMARY : Implementation for AAF mapping module * - * AAF is defined in IEEE 1722-rev1/D12 (still in draft as of Feb 2015). + * AAF (AVTP Audio Format) is defined in IEEE 1722-2016 Clause 7. */ #include @@ -141,6 +141,7 @@ typedef struct { aaf_sample_format_t aaf_format; U8 aaf_bit_depth; U32 payloadSize; + U32 payloadSizeMax; U8 aaf_event_field; @@ -273,13 +274,19 @@ static void x_calculateSizes(media_q_t *pMediaQ) // AAF packet size calculations pPubMapInfo->packetFrameSizeBytes = pPubMapInfo->packetSampleSizeBytes * pPubMapInfo->audioChannels; - pPvtData->payloadSize = pPubMapInfo->framesPerPacket * pPubMapInfo->packetFrameSizeBytes; + pPvtData->payloadSize = pPvtData->payloadSizeMax = + pPubMapInfo->framesPerPacket * pPubMapInfo->packetFrameSizeBytes; AVB_LOGF_INFO("packet: sampleSz=%d * channels=%d => frameSz=%d * %d => payloadSz=%d", pPubMapInfo->packetSampleSizeBytes, pPubMapInfo->audioChannels, pPubMapInfo->packetFrameSizeBytes, pPubMapInfo->framesPerPacket, pPvtData->payloadSize); + if (pPvtData->aaf_format >= AAF_FORMAT_INT_32 && pPvtData->aaf_format <= AAF_FORMAT_INT_16) { + // Determine the largest size we could receive before adjustments. + pPvtData->payloadSizeMax = 4 * pPubMapInfo->audioChannels * pPubMapInfo->framesPerPacket; + AVB_LOGF_DEBUG("packet: payloadSizeMax=%d", pPvtData->payloadSizeMax); + } // MediaQ item size calculations pPubMapInfo->packingFactor = pPvtData->packingFactor; @@ -378,7 +385,7 @@ U16 openavbMapAVTPAudioMaxDataSizeCB(media_q_t *pMediaQ) } AVB_TRACE_EXIT(AVB_TRACE_MAP); - return pPvtData->payloadSize + TOTAL_HEADER_SIZE; + return pPvtData->payloadSizeMax + TOTAL_HEADER_SIZE; } AVB_TRACE_EXIT(AVB_TRACE_MAP); return 0; @@ -626,8 +633,11 @@ bool openavbMapAVTPAudioRxCB(media_q_t *pMediaQ, U8 *pData, U32 dataLen) return FALSE; } + aaf_sample_format_t incoming_aaf_format; + U8 incoming_bit_depth; int tmp; bool dataValid = TRUE; + bool dataConversionEnabled = FALSE; U32 timestamp = ntohl(*pHdr++); U32 format_info = ntohl(*pHdr++); @@ -637,18 +647,26 @@ bool openavbMapAVTPAudioRxCB(media_q_t *pMediaQ, U8 *pData, U32 dataLen) bool streamSparseMode = (pHdrV0[HIDX_AVTP_HIDE7_SP] & SP_M0_BIT) ? TRUE : FALSE; U16 payloadLen = ntohs(*(U16 *)(&pHdrV0[HIDX_STREAM_DATA_LEN16])); - if (payloadLen > dataLen - TOTAL_HEADER_SIZE) { + if (payloadLen > dataLen - TOTAL_HEADER_SIZE) { if (pPvtData->dataValid) AVB_LOGF_ERROR("header data len %d > actual data len %d", payloadLen, dataLen - TOTAL_HEADER_SIZE); dataValid = FALSE; } - if ((tmp = ((format_info >> 24) & 0xFF)) != pPvtData->aaf_format) { - if (pPvtData->dataValid) - AVB_LOGF_ERROR("Listener format %d doesn't match received data (%d)", - pPvtData->aaf_format, tmp); - dataValid = FALSE; + if ((incoming_aaf_format = (aaf_sample_format_t) ((format_info >> 24) & 0xFF)) != pPvtData->aaf_format) { + // Check if we can convert the incoming data. + if (incoming_aaf_format >= AAF_FORMAT_INT_32 && incoming_aaf_format <= AAF_FORMAT_INT_16 && + pPvtData->aaf_format >= AAF_FORMAT_INT_32 && pPvtData->aaf_format <= AAF_FORMAT_INT_16) { + // Integer conversion should be supported. + dataConversionEnabled = TRUE; + } + else { + if (pPvtData->dataValid) + AVB_LOGF_ERROR("Listener format %d doesn't match received data (%d)", + pPvtData->aaf_format, incoming_aaf_format); + dataValid = FALSE; + } } if ((tmp = ((format_info >> 20) & 0x0F)) != pPvtData->aaf_rate) { if (pPvtData->dataValid) @@ -662,17 +680,29 @@ bool openavbMapAVTPAudioRxCB(media_q_t *pMediaQ, U8 *pData, U32 dataLen) pPubMapInfo->audioChannels, tmp); dataValid = FALSE; } - if ((tmp = (format_info & 0xFF)) != pPvtData->aaf_bit_depth) { + if ((incoming_bit_depth = (U8) (format_info & 0xFF)) == 0) { if (pPvtData->dataValid) - AVB_LOGF_ERROR("Listener bit depth (%d) doesn't match received data (%d)", - pPvtData->aaf_bit_depth, tmp); + AVB_LOGF_ERROR("Listener bit depth (%d) not valid", + incoming_bit_depth); dataValid = FALSE; } if ((tmp = ((packet_info >> 16) & 0xFFFF)) != pPvtData->payloadSize) { - if (pPvtData->dataValid) - AVB_LOGF_ERROR("Listener payload size (%d) doesn't match received data (%d)", - pPvtData->payloadSize, tmp); - dataValid = FALSE; + if (!dataConversionEnabled) { + if (pPvtData->dataValid) + AVB_LOGF_ERROR("Listener payload size (%d) doesn't match received data (%d)", + pPvtData->payloadSize, tmp); + dataValid = FALSE; + } + else { + int nInSampleLength = 6 - incoming_aaf_format; // Calculate the number of integer bytes per sample received + int nOutSampleLength = 6 - pPvtData->aaf_format; // Calculate the number of integer bytes per sample we want + if (tmp / nInSampleLength != pPvtData->payloadSize / nOutSampleLength) { + if (pPvtData->dataValid) + AVB_LOGF_ERROR("Listener payload samples (%d) doesn't match received data samples (%d)", + pPvtData->payloadSize / nOutSampleLength, tmp / nInSampleLength); + dataValid = FALSE; + } + } } if ((tmp = ((packet_info >> 8) & 0x0F)) != pPvtData->aaf_event_field) { if (pPvtData->dataValid) @@ -724,11 +754,53 @@ bool openavbMapAVTPAudioRxCB(media_q_t *pMediaQ, U8 *pData, U32 dataLen) } } if (dataValid) { - if (pPubMapInfo->intf_rx_translate_cb) { - pPubMapInfo->intf_rx_translate_cb(pMediaQ, pPayload, pPvtData->payloadSize); + if (!dataConversionEnabled) { + // Just use the raw incoming data, and ignore the incoming bit_depth. + if (pPubMapInfo->intf_rx_translate_cb) { + pPubMapInfo->intf_rx_translate_cb(pMediaQ, pPayload, pPvtData->payloadSize); + } + + memcpy((uint8_t *)pMediaQItem->pPubData + pMediaQItem->dataLen, pPayload, pPvtData->payloadSize); + } + else { + static U8 s_audioBuffer[1500]; + U8 *pInData = pPayload; + U8 *pInDataEnd = pPayload + payloadLen; + U8 *pOutData = s_audioBuffer; + int nInSampleLength = 6 - incoming_aaf_format; // Calculate the number of integer bytes per sample received + int nOutSampleLength = 6 - pPvtData->aaf_format; // Calculate the number of integer bytes per sample we want + int i; + if (nInSampleLength < nOutSampleLength) { + // We need to pad the data supplied. + while (pInData < pInDataEnd) { + for (i = 0; i < nInSampleLength; ++i) { + *pOutData++ = *pInData++; + } + for ( ; i < nOutSampleLength; ++i) { + *pOutData++ = 0; // Value specified in Clause 7.3.4. + } + } + } + else { + // We need to truncate the data supplied. + while (pInData < pInDataEnd) { + for (i = 0; i < nOutSampleLength; ++i) { + *pOutData++ = *pInData++; + } + pInData += (nInSampleLength - nOutSampleLength); + } + } + if (pOutData - s_audioBuffer != pPvtData->payloadSize) { + AVB_LOGF_ERROR("Output not expected size (%d instead of %d)", pOutData - s_audioBuffer, pPvtData->payloadSize); + } + + if (pPubMapInfo->intf_rx_translate_cb) { + pPubMapInfo->intf_rx_translate_cb(pMediaQ, s_audioBuffer, pPvtData->payloadSize); + } + + memcpy((uint8_t *)pMediaQItem->pPubData + pMediaQItem->dataLen, s_audioBuffer, pPvtData->payloadSize); } - memcpy((uint8_t *)pMediaQItem->pPubData + pMediaQItem->dataLen, pPayload, pPvtData->payloadSize); pMediaQItem->dataLen += pPvtData->payloadSize; } diff --git a/lib/avtp_pipeline/map_aaf_audio/openavb_map_aaf_audio_pub.h b/lib/avtp_pipeline/map_aaf_audio/openavb_map_aaf_audio_pub.h index d8f82906..213f3bb6 100755 --- a/lib/avtp_pipeline/map_aaf_audio/openavb_map_aaf_audio_pub.h +++ b/lib/avtp_pipeline/map_aaf_audio/openavb_map_aaf_audio_pub.h @@ -32,7 +32,7 @@ https://github.com/benhoyt/inih/commit/74d2ca064fb293bc60a77b0bd068075b293cf175. /* * HEADER SUMMARY : AVTP Audio Format mapping module public interface * - * AAF is defined in IEEE 1722a (still in draft as of Feb 2015). + * AAF (AVTP Audio Format) is defined in IEEE 1722-2016 Clause 7. * * map_nv_tx_rate must be set in the .ini file. */ diff --git a/lib/avtp_pipeline/platform/Linux/intf_alsa/aaf_file_talker.ini b/lib/avtp_pipeline/platform/Linux/intf_alsa/aaf_file_talker.ini index 18535347..210ae966 100644 --- a/lib/avtp_pipeline/platform/Linux/intf_alsa/aaf_file_talker.ini +++ b/lib/avtp_pipeline/platform/Linux/intf_alsa/aaf_file_talker.ini @@ -127,3 +127,7 @@ intf_fn = openavbIntfWavFileInitialize # intf_nv_file_name: The fully qualified file name. intf_nv_file_name = test.wav + +# AAF is defined to be big-endian. +intf_nv_audio_endian = big + diff --git a/lib/avtp_pipeline/platform/Linux/intf_alsa/aaf_listener.ini b/lib/avtp_pipeline/platform/Linux/intf_alsa/aaf_listener.ini index 3ba10918..c7c799de 100644 --- a/lib/avtp_pipeline/platform/Linux/intf_alsa/aaf_listener.ini +++ b/lib/avtp_pipeline/platform/Linux/intf_alsa/aaf_listener.ini @@ -115,15 +115,14 @@ intf_fn = openavbIntfAlsaInitialize intf_nv_device_name = default # intf_nv_audio_rate: Valid values that are supported by AAF are: -# 8000, 16000, 32000, 44100, 48000, 88200, 96000, 176400 and 192000 +# 8000, 16000, 24000, 32000, 44100, 48000, 88200, 96000, 176400 and 192000 intf_nv_audio_rate = 48000 # intf_nv_audio_bit_depth: Valid values that are supported by AAF are: -# 8, 16, 32 +# 16, 24, 32 intf_nv_audio_bit_depth = 32 -# intf_nv_audio_channels: Valid values that are supported by AAF are: -# 1 - 8 +# intf_nv_audio_channels intf_nv_audio_channels = 2 # intf_nv_allow_resampling: 0 = disable software resampling. 1 = allow software resampling. Default is disable. @@ -137,3 +136,7 @@ intf_nv_start_threshold_periods = 3 # This influence ALSA's period_time and period_size parameters and the result value should be the nearest possible. # Initial playback latency is equal intf_nv_start_threshold_periods * intf_nv_period_time. If not set internal defaults are used. # intf_nv_period_time = 31250 + +# AAF is defined to be big-endian. +intf_nv_audio_endian = big + diff --git a/lib/avtp_pipeline/platform/Linux/intf_alsa/aaf_listener_auto.ini b/lib/avtp_pipeline/platform/Linux/intf_alsa/aaf_listener_auto.ini index 7c1f48b3..d3d064b1 100644 --- a/lib/avtp_pipeline/platform/Linux/intf_alsa/aaf_listener_auto.ini +++ b/lib/avtp_pipeline/platform/Linux/intf_alsa/aaf_listener_auto.ini @@ -111,15 +111,14 @@ intf_fn = openavbIntfAlsaInitialize intf_nv_device_name = default # intf_nv_audio_rate: Valid values that are supported by AAF are: -# 8000, 16000, 32000, 44100, 48000, 88200, 96000, 176400 and 192000 +# 8000, 16000, 24000, 32000, 44100, 48000, 88200, 96000, 176400 and 192000 intf_nv_audio_rate = 48000 # intf_nv_audio_bit_depth: Valid values that are supported by AAF are: -# 8, 16, 32 +# 16, 24, 32 intf_nv_audio_bit_depth = 16 -# intf_nv_audio_channels: Valid values that are supported by AAF are: -# 1 - 8 +# intf_nv_audio_channels intf_nv_audio_channels = 2 # intf_nv_allow_resampling: 0 = disable software resampling. 1 = allow software resampling. Default is disable. @@ -133,3 +132,7 @@ intf_nv_start_threshold_periods = 3 # This influence ALSA's period_time and period_size parameters and the result value should be the nearest possible. # Initial playback latency is equal intf_nv_start_threshold_periods * intf_nv_period_time. If not set internal defaults are used. # intf_nv_period_time = 31250 + +# AAF is defined to be big-endian. +intf_nv_audio_endian = big + diff --git a/lib/avtp_pipeline/platform/Linux/intf_alsa/aaf_talker.ini b/lib/avtp_pipeline/platform/Linux/intf_alsa/aaf_talker.ini index 5ff3d2d5..a6a85071 100644 --- a/lib/avtp_pipeline/platform/Linux/intf_alsa/aaf_talker.ini +++ b/lib/avtp_pipeline/platform/Linux/intf_alsa/aaf_talker.ini @@ -152,17 +152,19 @@ intf_fn = openavbIntfAlsaInitialize intf_nv_device_name = default # intf_nv_audio_rate: Valid values that are supported by AAF are: -# 8000, 16000, 32000, 44100, 48000, 88200, 96000, 176400 and 192000 +# 8000, 16000, 24000, 32000, 44100, 48000, 88200, 96000, 176400 and 192000 intf_nv_audio_rate = 48000 # intf_nv_audio_bit_depth: Valid values that are supported by AAF are: -# 8, 16, 32 +# 16, 24, 32 intf_nv_audio_bit_depth = 32 -# intf_nv_audio_channels: Valid values that are supported by AAF are: -# 1 - 8 +# intf_nv_audio_channels intf_nv_audio_channels = 2 # intf_nv_allow_resampling: 0 = disable software resampling. 1 = allow software resampling. Default is disable. intf_nv_allow_resampling = 1 +# AAF is defined to be big-endian. +intf_nv_audio_endian = big + -- cgit v1.2.1