diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2012-12-10 01:27:10 +0100 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2012-12-10 01:27:10 +0100 |
commit | 78ac7ee97040a2e9a69b81f82a89edd779e124bb (patch) | |
tree | bae6d10aa246d91b094bf2754efcb944049c7c46 | |
parent | 5c75708cf08d57ed4f9744201554d276c8d5c2e9 (diff) | |
parent | 5d471b73d20616f5ac701ff62e5de49465cda264 (diff) | |
download | ffmpeg-78ac7ee97040a2e9a69b81f82a89edd779e124bb.tar.gz |
Merge commit '5d471b73d20616f5ac701ff62e5de49465cda264'
* commit '5d471b73d20616f5ac701ff62e5de49465cda264':
rtpdec: K&R formatting and spelling cosmetics
cosmetics: Fix dropable --> droppable typo
Conflicts:
libavcodec/h264.c
Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r-- | libavcodec/flvdec.c | 7 | ||||
-rw-r--r-- | libavcodec/h263dec.c | 4 | ||||
-rw-r--r-- | libavcodec/h264.c | 36 | ||||
-rw-r--r-- | libavcodec/mpegvideo.c | 8 | ||||
-rw-r--r-- | libavcodec/mpegvideo.h | 2 | ||||
-rw-r--r-- | libavcodec/vc1dec.c | 2 | ||||
-rw-r--r-- | libavformat/rtpdec.c | 212 |
7 files changed, 141 insertions, 130 deletions
diff --git a/libavcodec/flvdec.c b/libavcodec/flvdec.c index 774fde775c..bb693d76c6 100644 --- a/libavcodec/flvdec.c +++ b/libavcodec/flvdec.c @@ -89,8 +89,8 @@ int ff_flv_decode_picture_header(MpegEncContext *s) s->height = height; s->pict_type = AV_PICTURE_TYPE_I + get_bits(&s->gb, 2); - s->dropable= s->pict_type > AV_PICTURE_TYPE_P; - if (s->dropable) + s->droppable = s->pict_type > AV_PICTURE_TYPE_P; + if (s->droppable) s->pict_type = AV_PICTURE_TYPE_P; skip_bits1(&s->gb); /* deblocking flag */ @@ -109,7 +109,8 @@ int ff_flv_decode_picture_header(MpegEncContext *s) if(s->avctx->debug & FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_DEBUG, "%c esc_type:%d, qp:%d num:%d\n", - s->dropable ? 'D' : av_get_picture_type_char(s->pict_type), s->h263_flv-1, s->qscale, s->picture_number); + s->droppable ? 'D' : av_get_picture_type_char(s->pict_type), + s->h263_flv - 1, s->qscale, s->picture_number); } s->y_dc_scale_table= diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c index d457466006..27e1c91cb6 100644 --- a/libavcodec/h263dec.c +++ b/libavcodec/h263dec.c @@ -628,7 +628,9 @@ retry: s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; /* skip B-frames if we don't have reference frames */ - if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)) return get_consumed_bytes(s, buf_size); + if (s->last_picture_ptr == NULL && + (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) + return get_consumed_bytes(s, buf_size); if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B) || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I) || avctx->skip_frame >= AVDISCARD_ALL) diff --git a/libavcodec/h264.c b/libavcodec/h264.c index 2616b0a38c..6becfcb05d 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -1275,7 +1275,7 @@ static int decode_update_thread_context(AVCodecContext *dst, if (!s->current_picture_ptr) return 0; - if (!s->dropable) { + if (!s->droppable) { err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); h->prev_poc_msb = h->poc_msb; h->prev_poc_lsb = h->poc_lsb; @@ -2252,7 +2252,7 @@ static int field_end(H264Context *h, int in_setup) int err = 0; s->mb_y = 0; - if (!in_setup && !s->dropable) + if (!in_setup && !s->droppable) ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, s->picture_structure == PICT_BOTTOM_FIELD); @@ -2261,7 +2261,7 @@ static int field_end(H264Context *h, int in_setup) ff_vdpau_h264_set_reference_frames(s); if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) { - if (!s->dropable) { + if (!s->droppable) { err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); h->prev_poc_msb = h->poc_msb; h->prev_poc_lsb = h->poc_lsb; @@ -2376,7 +2376,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) int num_ref_idx_active_override_flag; unsigned int slice_type, tmp, i, j; int default_ref_list_done = 0; - int last_pic_structure, last_pic_dropable; + int last_pic_structure, last_pic_droppable; int must_reinit; /* FIXME: 2tap qpel isn't implemented for high bit depth. */ @@ -2398,7 +2398,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) h0->current_slice = 0; if (!s0->first_field) { - if (s->current_picture_ptr && !s->dropable && + if (s->current_picture_ptr && !s->droppable && s->current_picture_ptr->owner2 == s) { ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, s->picture_structure == PICT_BOTTOM_FIELD); @@ -2684,8 +2684,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0) h->mb_mbaff = 0; h->mb_aff_frame = 0; last_pic_structure = s0->picture_structure; - last_pic_dropable = s0->dropable; - s->dropable = h->nal_ref_idc == 0; + last_pic_droppable = s0->droppable; + s->droppable = h->nal_ref_idc == 0; if (h->sps.frame_mbs_only_flag) { s->picture_structure = PICT_FRAME; } else { @@ -2704,12 +2704,12 @@ static int decode_slice_header(H264Context *h, H264Context *h0) if (h0->current_slice != 0) { if (last_pic_structure != s->picture_structure || - last_pic_dropable != s->dropable) { + last_pic_droppable != s->droppable) { av_log(h->s.avctx, AV_LOG_ERROR, "Changing field mode (%d -> %d) between slices is not allowed\n", last_pic_structure, s->picture_structure); s->picture_structure = last_pic_structure; - s->dropable = last_pic_dropable; + s->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; } else if (!s0->current_picture_ptr) { av_log(s->avctx, AV_LOG_ERROR, @@ -2747,7 +2747,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) assert(s0->current_picture_ptr->f.reference != DELAYED_PIC_REF); /* Mark old field/frame as completed */ - if (!last_pic_dropable && s0->current_picture_ptr->owner2 == s0) { + if (!last_pic_droppable && s0->current_picture_ptr->owner2 == s0) { ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX, last_pic_structure == PICT_BOTTOM_FIELD); } @@ -2756,7 +2756,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ - if (!last_pic_dropable && last_pic_structure != PICT_FRAME) { + if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } @@ -2766,7 +2766,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) * different frame_nums. Consider this field first in * pair. Throw away previous field except for reference * purposes. */ - if (!last_pic_dropable && last_pic_structure != PICT_FRAME) { + if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } @@ -2780,14 +2780,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0) "Invalid field mode combination %d/%d\n", last_pic_structure, s->picture_structure); s->picture_structure = last_pic_structure; - s->dropable = last_pic_dropable; + s->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; - } else if (last_pic_dropable != s->dropable) { + } else if (last_pic_droppable != s->droppable) { av_log(s->avctx, AV_LOG_ERROR, "Cannot combine reference and non-reference fields in the same frame\n"); av_log_ask_for_sample(s->avctx, NULL); s->picture_structure = last_pic_structure; - s->dropable = last_pic_dropable; + s->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; } @@ -3531,7 +3531,7 @@ static void decode_finish_row(H264Context *h) ff_draw_horiz_band(s, top, height); - if (s->dropable) + if (s->droppable) return; ff_thread_report_progress(&s->current_picture_ptr->f, top + height - 1, @@ -3737,7 +3737,7 @@ static int execute_decode_slices(H264Context *h, int context_count) hx = h->thread_context[context_count - 1]; s->mb_x = hx->s.mb_x; s->mb_y = hx->s.mb_y; - s->dropable = hx->s.dropable; + s->droppable = hx->s.droppable; s->picture_structure = hx->s.picture_structure; for (i = 1; i < context_count; i++) h->s.error_count += h->thread_context[i]->s.error_count; @@ -4082,7 +4082,7 @@ again: end: /* clean up */ if (s->current_picture_ptr && s->current_picture_ptr->owner2 == s && - !s->dropable) { + !s->droppable) { ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, s->picture_structure == PICT_BOTTOM_FIELD); } diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index 866fc41542..dc1fc6ca04 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -594,7 +594,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst, // B-frame info s->max_b_frames = s1->max_b_frames; s->low_delay = s1->low_delay; - s->dropable = s1->dropable; + s->droppable = s1->droppable; // DivX handling (doesn't work) s->divx_packed = s1->divx_packed; @@ -1348,7 +1348,7 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) } pic->f.reference = 0; - if (!s->dropable) { + if (!s->droppable) { if (s->codec_id == AV_CODEC_ID_H264) pic->f.reference = s->picture_structure; else if (s->pict_type != AV_PICTURE_TYPE_B) @@ -1383,7 +1383,7 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) if (s->pict_type != AV_PICTURE_TYPE_B) { s->last_picture_ptr = s->next_picture_ptr; - if (!s->dropable) + if (!s->droppable) s->next_picture_ptr = s->current_picture_ptr; } av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", @@ -1391,7 +1391,7 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL, s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL, s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL, - s->pict_type, s->dropable); + s->pict_type, s->droppable); if (s->codec_id != AV_CODEC_ID_H264) { if ((s->last_picture_ptr == NULL || diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h index b366899930..1bdead56eb 100644 --- a/libavcodec/mpegvideo.h +++ b/libavcodec/mpegvideo.h @@ -346,7 +346,7 @@ typedef struct MpegEncContext { int vbv_delay; int last_pict_type; //FIXME removes int last_non_b_pict_type; ///< used for mpeg4 gmc b-frames & ratecontrol - int dropable; + int droppable; int frame_rate_index; AVRational mpeg2_frame_rate_ext; int last_lambda_for[5]; ///< last lambda for a specific pict type diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c index d406b63978..f676284bda 100644 --- a/libavcodec/vc1dec.c +++ b/libavcodec/vc1dec.c @@ -5571,7 +5571,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; /* skip B-frames if we don't have reference frames */ - if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->dropable)) { + if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) { goto err; } if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) || diff --git a/libavformat/rtpdec.c b/libavformat/rtpdec.c index e92419d9d9..3780c47b78 100644 --- a/libavformat/rtpdec.c +++ b/libavformat/rtpdec.c @@ -25,49 +25,46 @@ #include "libavcodec/get_bits.h" #include "avformat.h" #include "mpegts.h" -#include "url.h" - #include "network.h" - +#include "url.h" #include "rtpdec.h" #include "rtpdec_formats.h" -//#define DEBUG - -/* TODO: - add RTCP statistics reporting (should be optional). - - - add support for h263/mpeg4 packetized output : IDEA: send a - buffer to 'rtp_write_packet' contains all the packets for ONE - frame. Each packet should have a four byte header containing - the length in big endian format (same trick as - 'ffio_open_dyn_packet_buf') -*/ +/* TODO: + * - add RTCP statistics reporting (should be optional). + * + * - add support for H.263/MPEG-4 packetized output: IDEA: send a + * buffer to 'rtp_write_packet' contains all the packets for ONE + * frame. Each packet should have a four byte header containing + * the length in big-endian format (same trick as + * 'ffio_open_dyn_packet_buf'). + */ static RTPDynamicProtocolHandler realmedia_mp3_dynamic_handler = { - .enc_name = "X-MP3-draft-00", - .codec_type = AVMEDIA_TYPE_AUDIO, - .codec_id = AV_CODEC_ID_MP3ADU, + .enc_name = "X-MP3-draft-00", + .codec_type = AVMEDIA_TYPE_AUDIO, + .codec_id = AV_CODEC_ID_MP3ADU, }; static RTPDynamicProtocolHandler speex_dynamic_handler = { - .enc_name = "speex", - .codec_type = AVMEDIA_TYPE_AUDIO, - .codec_id = AV_CODEC_ID_SPEEX, + .enc_name = "speex", + .codec_type = AVMEDIA_TYPE_AUDIO, + .codec_id = AV_CODEC_ID_SPEEX, }; static RTPDynamicProtocolHandler opus_dynamic_handler = { - .enc_name = "opus", - .codec_type = AVMEDIA_TYPE_AUDIO, - .codec_id = AV_CODEC_ID_OPUS, + .enc_name = "opus", + .codec_type = AVMEDIA_TYPE_AUDIO, + .codec_id = AV_CODEC_ID_OPUS, }; /* statistics functions */ -static RTPDynamicProtocolHandler *RTPFirstDynamicPayloadHandler= NULL; +static RTPDynamicProtocolHandler *RTPFirstDynamicPayloadHandler = NULL; void ff_register_dynamic_payload_handler(RTPDynamicProtocolHandler *handler) { - handler->next= RTPFirstDynamicPayloadHandler; - RTPFirstDynamicPayloadHandler= handler; + handler->next = RTPFirstDynamicPayloadHandler; + RTPFirstDynamicPayloadHandler = handler; } void av_register_rtp_dynamic_payload_handlers(void) @@ -108,7 +105,7 @@ void av_register_rtp_dynamic_payload_handlers(void) } RTPDynamicProtocolHandler *ff_rtp_handler_find_by_name(const char *name, - enum AVMediaType codec_type) + enum AVMediaType codec_type) { RTPDynamicProtocolHandler *handler; for (handler = RTPFirstDynamicPayloadHandler; @@ -120,7 +117,7 @@ RTPDynamicProtocolHandler *ff_rtp_handler_find_by_name(const char *name, } RTPDynamicProtocolHandler *ff_rtp_handler_find_by_id(int id, - enum AVMediaType codec_type) + enum AVMediaType codec_type) { RTPDynamicProtocolHandler *handler; for (handler = RTPFirstDynamicPayloadHandler; @@ -131,7 +128,8 @@ RTPDynamicProtocolHandler *ff_rtp_handler_find_by_id(int id, return NULL; } -static int rtcp_parse_packet(RTPDemuxContext *s, const unsigned char *buf, int len) +static int rtcp_parse_packet(RTPDemuxContext *s, const unsigned char *buf, + int len) { int payload_len; while (len >= 4) { @@ -140,11 +138,12 @@ static int rtcp_parse_packet(RTPDemuxContext *s, const unsigned char *buf, int l switch (buf[1]) { case RTCP_SR: if (payload_len < 20) { - av_log(NULL, AV_LOG_ERROR, "Invalid length for RTCP SR packet\n"); + av_log(NULL, AV_LOG_ERROR, + "Invalid length for RTCP SR packet\n"); return AVERROR_INVALIDDATA; } - s->last_rtcp_ntp_time = AV_RB64(buf + 8); + s->last_rtcp_ntp_time = AV_RB64(buf + 8); s->last_rtcp_timestamp = AV_RB32(buf + 16); if (s->first_rtcp_ntp_time == AV_NOPTS_VALUE) { s->first_rtcp_ntp_time = s->last_rtcp_ntp_time; @@ -164,7 +163,7 @@ static int rtcp_parse_packet(RTPDemuxContext *s, const unsigned char *buf, int l return -1; } -#define RTP_SEQ_MOD (1<<16) +#define RTP_SEQ_MOD (1 << 16) static void rtp_init_statistics(RTPStatistics *s, uint16_t base_sequence) { @@ -174,8 +173,9 @@ static void rtp_init_statistics(RTPStatistics *s, uint16_t base_sequence) } /* -* called whenever there is a large jump in sequence numbers, or when they get out of probation... -*/ + * Called whenever there is a large jump in sequence numbers, + * or when they get out of probation... + */ static void rtp_init_sequence(RTPStatistics *s, uint16_t seq) { s->max_seq = seq; @@ -189,9 +189,7 @@ static void rtp_init_sequence(RTPStatistics *s, uint16_t seq) s->transit = 0; } -/* -* returns 1 if we should handle this packet. -*/ +/* Returns 1 if we should handle this packet. */ static int rtp_valid_packet_in_sequence(RTPStatistics *s, uint16_t seq) { uint16_t udelta = seq - s->max_seq; @@ -199,7 +197,8 @@ static int rtp_valid_packet_in_sequence(RTPStatistics *s, uint16_t seq) const int MAX_MISORDER = 100; const int MIN_SEQUENTIAL = 2; - /* source not valid until MIN_SEQUENTIAL packets with sequence seq. numbers have been received */ + /* source not valid until MIN_SEQUENTIAL packets with sequence + * seq. numbers have been received */ if (s->probation) { if (seq == s->max_seq + 1) { s->probation--; @@ -211,7 +210,7 @@ static int rtp_valid_packet_in_sequence(RTPStatistics *s, uint16_t seq) } } else { s->probation = MIN_SEQUENTIAL - 1; - s->max_seq = seq; + s->max_seq = seq; } } else if (udelta < MAX_DROPOUT) { // in order, with permissible gap @@ -223,7 +222,8 @@ static int rtp_valid_packet_in_sequence(RTPStatistics *s, uint16_t seq) } else if (udelta <= RTP_SEQ_MOD - MAX_MISORDER) { // sequence made a large jump... if (seq == s->bad_seq) { - // two sequential packets-- assume that the other side restarted without telling us; just resync. + /* two sequential packets -- assume that the other side + * restarted without telling us; just resync. */ rtp_init_sequence(s, seq); } else { s->bad_seq = (seq + 1) & (RTP_SEQ_MOD - 1); @@ -256,7 +256,7 @@ int ff_rtp_check_and_send_back_rr(RTPDemuxContext *s, int count) return -1; /* TODO: I think this is way too often; RFC 1889 has algorithm for this */ - /* XXX: mpeg pts hardcoded. RTCP send every 0.5 seconds */ + /* XXX: MPEG pts hardcoded. RTCP send every 0.5 seconds */ s->octet_count += count; rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) / RTCP_TX_RATIO_DEN; @@ -277,15 +277,15 @@ int ff_rtp_check_and_send_back_rr(RTPDemuxContext *s, int count) avio_wb32(pb, s->ssrc); // server SSRC // some placeholders we should really fill... // RFC 1889/p64 - extended_max = stats->cycles + stats->max_seq; - expected = extended_max - stats->base_seq + 1; - lost = expected - stats->received; - lost = FFMIN(lost, 0xffffff); // clamp it since it's only 24 bits... - expected_interval = expected - stats->expected_prior; + extended_max = stats->cycles + stats->max_seq; + expected = extended_max - stats->base_seq + 1; + lost = expected - stats->received; + lost = FFMIN(lost, 0xffffff); // clamp it since it's only 24 bits... + expected_interval = expected - stats->expected_prior; stats->expected_prior = expected; - received_interval = stats->received - stats->received_prior; + received_interval = stats->received - stats->received_prior; stats->received_prior = stats->received; - lost_interval = expected_interval - received_interval; + lost_interval = expected_interval - received_interval; if (expected_interval == 0 || lost_interval <= 0) fraction = 0; else @@ -301,7 +301,7 @@ int ff_rtp_check_and_send_back_rr(RTPDemuxContext *s, int count) avio_wb32(pb, 0); /* last SR timestamp */ avio_wb32(pb, 0); /* delay since last SR */ } else { - uint32_t middle_32_bits = s->last_rtcp_ntp_time >> 16; // this is valid, right? do we need to handle 64 bit values special? + uint32_t middle_32_bits = s->last_rtcp_ntp_time >> 16; // this is valid, right? do we need to handle 64 bit values special? uint32_t delay_since_last = ntp_time - s->last_rtcp_ntp_time; avio_wb32(pb, middle_32_bits); /* last SR timestamp */ @@ -318,23 +318,22 @@ int ff_rtp_check_and_send_back_rr(RTPDemuxContext *s, int count) avio_w8(pb, len); avio_write(pb, s->hostname, len); // padding - for (len = (6 + len) % 4; len % 4; len++) { + for (len = (6 + len) % 4; len % 4; len++) avio_w8(pb, 0); - } avio_flush(pb); len = avio_close_dyn_buf(pb, &buf); if ((len > 0) && buf) { int av_unused result; av_dlog(s->ic, "sending %d bytes of RR\n", len); - result= ffurl_write(s->rtp_ctx, buf, len); + result = ffurl_write(s->rtp_ctx, buf, len); av_dlog(s->ic, "result from ffurl_write: %d\n", result); av_free(buf); } return 0; } -void ff_rtp_send_punch_packets(URLContext* rtp_handle) +void ff_rtp_send_punch_packets(URLContext *rtp_handle) { AVIOContext *pb; uint8_t *buf; @@ -372,25 +371,26 @@ void ff_rtp_send_punch_packets(URLContext* rtp_handle) av_free(buf); } - /** * open a new RTP parse context for stream 'st'. 'st' can be NULL for - * MPEG2TS streams to indicate that they should be demuxed inside the + * MPEG2-TS streams to indicate that they should be demuxed inside the * rtp demux (otherwise AV_CODEC_ID_MPEG2TS packets are returned) */ -RTPDemuxContext *ff_rtp_parse_open(AVFormatContext *s1, AVStream *st, URLContext *rtpc, int payload_type, int queue_size) +RTPDemuxContext *ff_rtp_parse_open(AVFormatContext *s1, AVStream *st, + URLContext *rtpc, int payload_type, + int queue_size) { RTPDemuxContext *s; s = av_mallocz(sizeof(RTPDemuxContext)); if (!s) return NULL; - s->payload_type = payload_type; - s->last_rtcp_ntp_time = AV_NOPTS_VALUE; + s->payload_type = payload_type; + s->last_rtcp_ntp_time = AV_NOPTS_VALUE; s->first_rtcp_ntp_time = AV_NOPTS_VALUE; - s->ic = s1; - s->st = st; - s->queue_size = queue_size; + s->ic = s1; + s->st = st; + s->queue_size = queue_size; rtp_init_statistics(&s->statistics, 0); // do we know the initial sequence from sdp? if (!strcmp(ff_rtp_enc_name(payload_type), "MP2T")) { s->ts = ff_mpegts_parse_open(s->ic); @@ -399,7 +399,7 @@ RTPDemuxContext *ff_rtp_parse_open(AVFormatContext *s1, AVStream *st, URLContext return NULL; } } else if (st) { - switch(st->codec->codec_id) { + switch (st->codec->codec_id) { case AV_CODEC_ID_MPEG1VIDEO: case AV_CODEC_ID_MPEG2VIDEO: case AV_CODEC_ID_MP2: @@ -432,11 +432,12 @@ void ff_rtp_parse_set_dynamic_protocol(RTPDemuxContext *s, PayloadContext *ctx, RTPDynamicProtocolHandler *handler) { s->dynamic_protocol_context = ctx; - s->parse_packet = handler->parse_packet; + s->parse_packet = handler->parse_packet; } /** - * This was the second switch in rtp_parse packet. Normalizes time, if required, sets stream_index, etc. + * This was the second switch in rtp_parse packet. + * Normalizes time, if required, sets stream_index, etc. */ static void finalize_packet(RTPDemuxContext *s, AVPacket *pkt, uint32_t timestamp) { @@ -452,7 +453,9 @@ static void finalize_packet(RTPDemuxContext *s, AVPacket *pkt, uint32_t timestam /* compute pts from timestamp with received ntp_time */ delta_timestamp = timestamp - s->last_rtcp_timestamp; /* convert to the PTS timebase */ - addend = av_rescale(s->last_rtcp_ntp_time - s->first_rtcp_ntp_time, s->st->time_base.den, (uint64_t)s->st->time_base.num << 32); + addend = av_rescale(s->last_rtcp_ntp_time - s->first_rtcp_ntp_time, + s->st->time_base.den, + (uint64_t) s->st->time_base.num << 32); pkt->pts = s->range_start_offset + s->rtcp_ts_offset + addend + delta_timestamp; return; @@ -460,13 +463,15 @@ static void finalize_packet(RTPDemuxContext *s, AVPacket *pkt, uint32_t timestam if (!s->base_timestamp) s->base_timestamp = timestamp; - /* assume that the difference is INT32_MIN < x < INT32_MAX, but allow the first timestamp to exceed INT32_MAX */ + /* assume that the difference is INT32_MIN < x < INT32_MAX, + * but allow the first timestamp to exceed INT32_MAX */ if (!s->timestamp) s->unwrapped_timestamp += timestamp; else s->unwrapped_timestamp += (int32_t)(timestamp - s->timestamp); s->timestamp = timestamp; - pkt->pts = s->unwrapped_timestamp + s->range_start_offset - s->base_timestamp; + pkt->pts = s->unwrapped_timestamp + s->range_start_offset - + s->base_timestamp; } static int rtp_parse_packet_internal(RTPDemuxContext *s, AVPacket *pkt, @@ -477,15 +482,15 @@ static int rtp_parse_packet_internal(RTPDemuxContext *s, AVPacket *pkt, int ext; AVStream *st; uint32_t timestamp; - int rv= 0; + int rv = 0; - ext = buf[0] & 0x10; + ext = buf[0] & 0x10; payload_type = buf[1] & 0x7f; if (buf[1] & 0x80) flags |= RTP_FLAG_MARKER; - seq = AV_RB16(buf + 2); + seq = AV_RB16(buf + 2); timestamp = AV_RB32(buf + 4); - ssrc = AV_RB32(buf + 8); + ssrc = AV_RB32(buf + 8); /* store the ssrc in the RTPDemuxContext */ s->ssrc = ssrc; @@ -495,9 +500,9 @@ static int rtp_parse_packet_internal(RTPDemuxContext *s, AVPacket *pkt, st = s->st; // only do something with this if all the rtp checks pass... - if(!rtp_valid_packet_in_sequence(&s->statistics, seq)) - { - av_log(st?st->codec:NULL, AV_LOG_ERROR, "RTP: PT=%02x: bad cseq %04x expected=%04x\n", + if (!rtp_valid_packet_in_sequence(&s->statistics, seq)) { + av_log(st ? st->codec : NULL, AV_LOG_ERROR, + "RTP: PT=%02x: bad cseq %04x expected=%04x\n", payload_type, seq, ((s->seq + 1) & 0xffff)); return -1; } @@ -509,8 +514,8 @@ static int rtp_parse_packet_internal(RTPDemuxContext *s, AVPacket *pkt, } s->seq = seq; - len -= 12; - buf += 12; + len -= 12; + buf += 12; /* RFC 3550 Section 5.3.1 RTP Header Extension handling */ if (ext) { @@ -528,7 +533,7 @@ static int rtp_parse_packet_internal(RTPDemuxContext *s, AVPacket *pkt, } if (!st) { - /* specific MPEG2TS demux support */ + /* specific MPEG2-TS demux support */ ret = ff_mpegts_parse_packet(s->ts, pkt, buf, len); /* The only error that can be returned from ff_mpegts_parse_packet * is "no more data to return from the provided buffer", so return @@ -546,14 +551,15 @@ static int rtp_parse_packet_internal(RTPDemuxContext *s, AVPacket *pkt, rv = s->parse_packet(s->ic, s->dynamic_protocol_context, s->st, pkt, ×tamp, buf, len, flags); } else { - // at this point, the RTP header has been stripped; This is ASSUMING that there is only 1 CSRC, which in't wise. - switch(st->codec->codec_id) { + /* At this point, the RTP header has been stripped; + * This is ASSUMING that there is only 1 CSRC, which isn't wise. */ + switch (st->codec->codec_id) { case AV_CODEC_ID_MP2: case AV_CODEC_ID_MP3: - /* better than nothing: skip mpeg audio RTP header */ + /* better than nothing: skip MPEG audio RTP header */ if (len <= 4) return -1; - h = AV_RB32(buf); + h = AV_RB32(buf); len -= 4; buf += 4; if (av_new_packet(pkt, len) < 0) @@ -562,14 +568,14 @@ static int rtp_parse_packet_internal(RTPDemuxContext *s, AVPacket *pkt, break; case AV_CODEC_ID_MPEG1VIDEO: case AV_CODEC_ID_MPEG2VIDEO: - /* better than nothing: skip mpeg video RTP header */ + /* better than nothing: skip MPEG video RTP header */ if (len <= 4) return -1; - h = AV_RB32(buf); + h = AV_RB32(buf); buf += 4; len -= 4; if (h & (1 << 26)) { - /* mpeg2 */ + /* MPEG-2 */ if (len <= 4) return -1; buf += 4; @@ -610,7 +616,7 @@ void ff_rtp_reset_packet_queue(RTPDemuxContext *s) static void enqueue_packet(RTPDemuxContext *s, uint8_t *buf, int len) { - uint16_t seq = AV_RB16(buf + 2); + uint16_t seq = AV_RB16(buf + 2); RTPPacket *cur = s->queue, *prev = NULL, *packet; /* Find the correct place in the queue to insert the packet */ @@ -619,17 +625,17 @@ static void enqueue_packet(RTPDemuxContext *s, uint8_t *buf, int len) if (diff < 0) break; prev = cur; - cur = cur->next; + cur = cur->next; } packet = av_mallocz(sizeof(*packet)); if (!packet) return; packet->recvtime = av_gettime(); - packet->seq = seq; - packet->len = len; - packet->buf = buf; - packet->next = cur; + packet->seq = seq; + packet->len = len; + packet->buf = buf; + packet->next = cur; if (prev) prev->next = packet; else @@ -660,7 +666,7 @@ static int rtp_parse_queued_packet(RTPDemuxContext *s, AVPacket *pkt) "RTP: missed %d packets\n", s->queue->seq - s->seq - 1); /* Parse the first packet in the queue, and dequeue it */ - rv = rtp_parse_packet_internal(s, pkt, s->queue->buf, s->queue->len); + rv = rtp_parse_packet_internal(s, pkt, s->queue->buf, s->queue->len); next = s->queue->next; av_free(s->queue->buf); av_free(s->queue); @@ -672,10 +678,10 @@ static int rtp_parse_queued_packet(RTPDemuxContext *s, AVPacket *pkt) static int rtp_parse_one_packet(RTPDemuxContext *s, AVPacket *pkt, uint8_t **bufptr, int len) { - uint8_t* buf = bufptr ? *bufptr : NULL; + uint8_t *buf = bufptr ? *bufptr : NULL; int ret, flags = 0; uint32_t timestamp; - int rv= 0; + int rv = 0; if (!buf) { /* If parsing of the previous packet actually returned 0 or an error, @@ -684,12 +690,12 @@ static int rtp_parse_one_packet(RTPDemuxContext *s, AVPacket *pkt, if (s->prev_ret <= 0) return rtp_parse_queued_packet(s, pkt); /* return the next packets, if any */ - if(s->st && s->parse_packet) { + if (s->st && s->parse_packet) { /* timestamp should be overwritten by parse_packet, if not, * the packet is left with pts == AV_NOPTS_VALUE */ timestamp = RTP_NOTS_VALUE; - rv= s->parse_packet(s->ic, s->dynamic_protocol_context, - s->st, pkt, ×tamp, NULL, 0, flags); + rv = s->parse_packet(s->ic, s->dynamic_protocol_context, + s->st, pkt, ×tamp, NULL, 0, flags); finalize_packet(s, pkt, timestamp); return rv; } else { @@ -697,7 +703,7 @@ static int rtp_parse_one_packet(RTPDemuxContext *s, AVPacket *pkt, if (s->read_buf_index >= s->read_buf_size) return AVERROR(EAGAIN); ret = ff_mpegts_parse_packet(s->ts, pkt, s->buf + s->read_buf_index, - s->read_buf_size - s->read_buf_index); + s->read_buf_size - s->read_buf_index); if (ret < 0) return AVERROR(EAGAIN); s->read_buf_index += ret; @@ -789,14 +795,16 @@ int ff_parse_fmtp(AVStream *stream, PayloadContext *data, const char *p, } // remove protocol identifier - while (*p && *p == ' ') p++; // strip spaces - while (*p && *p != ' ') p++; // eat protocol identifier - while (*p && *p == ' ') p++; // strip trailing spaces + while (*p && *p == ' ') + p++; // strip spaces + while (*p && *p != ' ') + p++; // eat protocol identifier + while (*p && *p == ' ') + p++; // strip trailing spaces while (ff_rtsp_next_attr_and_value(&p, attr, sizeof(attr), value, value_size)) { - res = parse_fmtp(stream, data, attr, value); if (res < 0 && res != AVERROR_PATCHWELCOME) { av_free(value); @@ -811,9 +819,9 @@ int ff_rtp_finalize_packet(AVPacket *pkt, AVIOContext **dyn_buf, int stream_idx) { av_init_packet(pkt); - pkt->size = avio_close_dyn_buf(*dyn_buf, &pkt->data); + pkt->size = avio_close_dyn_buf(*dyn_buf, &pkt->data); pkt->stream_index = stream_idx; pkt->destruct = av_destruct_packet; - *dyn_buf = NULL; + *dyn_buf = NULL; return pkt->size; } |