1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Add ByteIOContext argument to public ff_rm_* functions so that we can

specify the data source as function argument instead of in s->pb before
calling the function. Discussed in ML thread "[PATCH] fix small memleak
in rdt.c".

Originally committed as revision 15849 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
Ronald S. Bultje 2008-11-17 14:20:00 +00:00
parent 074bfa7de7
commit fcc995a533
3 changed files with 31 additions and 29 deletions

View File

@ -159,8 +159,7 @@ rdt_load_mdpr (PayloadContext *rdt, AVStream *st, int rule_nr)
size = rdt->mlti_data_size;
url_fseek(pb, 0, SEEK_SET);
}
rdt->rmctx->pb = pb;
if (ff_rm_read_mdpr_codecdata(rdt->rmctx, st, size) < 0)
if (ff_rm_read_mdpr_codecdata(rdt->rmctx, pb, st, size) < 0)
return -1;
url_close_buf(pb);
@ -259,7 +258,7 @@ rdt_parse_packet (PayloadContext *rdt, AVStream *st,
const uint8_t *buf, int len, int flags)
{
int seq = 1, res;
ByteIOContext *pb = rdt->rmctx->pb;
ByteIOContext *pb;
RMContext *rm = rdt->rmctx->priv_data;
if (rm->audio_pkt_cnt == 0) {
@ -267,8 +266,7 @@ rdt_parse_packet (PayloadContext *rdt, AVStream *st,
url_open_buf (&pb, buf, len, URL_RDONLY);
flags = (flags & PKT_FLAG_KEY) ? 2 : 0;
rdt->rmctx->pb = pb;
res = ff_rm_parse_packet (rdt->rmctx, st, len, pkt,
res = ff_rm_parse_packet (rdt->rmctx, pb, st, len, pkt,
&seq, &flags, timestamp);
pos = url_ftell(pb);
url_close_buf (pb);
@ -277,14 +275,13 @@ rdt_parse_packet (PayloadContext *rdt, AVStream *st,
if (rm->audio_pkt_cnt > 0 &&
st->codec->codec_id == CODEC_ID_AAC) {
memcpy (rdt->buffer, buf + pos, len - pos);
url_open_buf (&pb, rdt->buffer, len - pos, URL_RDONLY);
rdt->rmctx->pb = pb;
url_open_buf (&rdt->rmctx->pb, rdt->buffer, len - pos, URL_RDONLY);
}
} else {
ff_rm_retrieve_cache (rdt->rmctx, st, pkt);
ff_rm_retrieve_cache (rdt->rmctx, rdt->rmctx->pb, st, pkt);
if (rm->audio_pkt_cnt == 0 &&
st->codec->codec_id == CODEC_ID_AAC)
url_close_buf (pb);
url_close_buf (rdt->rmctx->pb);
}
pkt->stream_index = st->index;
pkt->pts = *timestamp;

View File

@ -71,17 +71,20 @@ extern AVInputFormat rdt_demuxer;
* parameters.
*
* @param s context containing RMContext and ByteIOContext for stream reading
* @param pb context to read the data from
* @param st the stream that the MDPR chunk belongs to and where to store the
* parameters read from the chunk into
* @param codec_data_size size of the MDPR chunk
* @return 0 on success, errno codes on error
*/
int ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVStream *st, int codec_data_size);
int ff_rm_read_mdpr_codecdata (AVFormatContext *s, ByteIOContext *pb,
AVStream *st, int codec_data_size);
/**
* Parse one rm-stream packet from the input bytestream.
*
* @param s context containing RMContext and ByteIOContext for stream reading
* @param pb context to read the data from
* @param st stream to which the packet to be read belongs
* @param len packet length to read from the input
* @param pkt packet location to store the parsed packet data
@ -92,7 +95,8 @@ int ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVStream *st, int codec_data_
* @param ts pointer to timestamp, may be updated
* @return 0 on success, errno codes on error
*/
int ff_rm_parse_packet (AVFormatContext *s, AVStream *st, int len,
int ff_rm_parse_packet (AVFormatContext *s, ByteIOContext *pb,
AVStream *st, int len,
AVPacket *pkt, int *seq, int *flags, int64_t *ts);
/**
@ -104,9 +108,11 @@ int ff_rm_parse_packet (AVFormatContext *s, AVStream *st, int len,
* of those packets can be retrieved sequentially.
*
* @param s context containing RMContext and ByteIOContext for stream reading
* @param pb context to read the data from
* @param st stream that this packet belongs to
* @param pkt location to store the packet data
*/
void ff_rm_retrieve_cache (AVFormatContext *s, AVStream *st, AVPacket *pkt);
void ff_rm_retrieve_cache (AVFormatContext *s, ByteIOContext *pb,
AVStream *st, AVPacket *pkt);
#endif /* AVFORMAT_RM_H */

View File

@ -47,11 +47,10 @@ static void get_str8(ByteIOContext *pb, char *buf, int buf_size)
get_strl(pb, buf, buf_size, get_byte(pb));
}
static int rm_read_audio_stream_info(AVFormatContext *s, AVStream *st,
int read_all)
static int rm_read_audio_stream_info(AVFormatContext *s, ByteIOContext *pb,
AVStream *st, int read_all)
{
RMContext *rm = s->priv_data;
ByteIOContext *pb = s->pb;
char buf[256];
uint32_t version;
int i;
@ -196,9 +195,9 @@ static int rm_read_audio_stream_info(AVFormatContext *s, AVStream *st,
}
int
ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVStream *st, int codec_data_size)
ff_rm_read_mdpr_codecdata (AVFormatContext *s, ByteIOContext *pb,
AVStream *st, int codec_data_size)
{
ByteIOContext *pb = s->pb;
unsigned int v;
int size;
int64_t codec_pos;
@ -208,7 +207,7 @@ ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVStream *st, int codec_data_size
v = get_be32(pb);
if (v == MKTAG(0xfd, 'a', 'r', '.')) {
/* ra type header */
if (rm_read_audio_stream_info(s, st, 0))
if (rm_read_audio_stream_info(s, pb, st, 0))
return -1;
} else {
int fps, fps2;
@ -275,7 +274,7 @@ static int rm_read_header_old(AVFormatContext *s, AVFormatParameters *ap)
st = av_new_stream(s, 0);
if (!st)
return -1;
return rm_read_audio_stream_info(s, st, 1);
return rm_read_audio_stream_info(s, s->pb, st, 1);
}
static int rm_read_header(AVFormatContext *s, AVFormatParameters *ap)
@ -357,7 +356,7 @@ static int rm_read_header(AVFormatContext *s, AVFormatParameters *ap)
get_str8(pb, buf, sizeof(buf)); /* desc */
get_str8(pb, buf, sizeof(buf)); /* mimetype */
st->codec->codec_type = CODEC_TYPE_DATA;
if (ff_rm_read_mdpr_codecdata(s, st, get_be32(pb)) < 0)
if (ff_rm_read_mdpr_codecdata(s, s->pb, st, get_be32(pb)) < 0)
return -1;
break;
case MKTAG('D', 'A', 'T', 'A'):
@ -452,9 +451,9 @@ skip:
return -1;
}
static int rm_assemble_video_frame(AVFormatContext *s, RMContext *rm, AVPacket *pkt, int len)
static int rm_assemble_video_frame(AVFormatContext *s, ByteIOContext *pb,
RMContext *rm, AVPacket *pkt, int len)
{
ByteIOContext *pb = s->pb;
int hdr, seq, pic_num, len2, pos;
int type;
@ -550,15 +549,15 @@ rm_ac3_swap_bytes (AVStream *st, AVPacket *pkt)
}
int
ff_rm_parse_packet (AVFormatContext *s, AVStream *st, int len, AVPacket *pkt,
ff_rm_parse_packet (AVFormatContext *s, ByteIOContext *pb,
AVStream *st, int len, AVPacket *pkt,
int *seq, int *flags, int64_t *timestamp)
{
ByteIOContext *pb = s->pb;
RMContext *rm = s->priv_data;
if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
rm->current_stream= st->id;
if(rm_assemble_video_frame(s, rm, pkt, len) == 1)
if(rm_assemble_video_frame(s, pb, rm, pkt, len) == 1)
return -1; //got partial frame
} else if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
if ((st->codec->codec_id == CODEC_ID_RA_288) ||
@ -649,9 +648,9 @@ ff_rm_parse_packet (AVFormatContext *s, AVStream *st, int len, AVPacket *pkt,
}
void
ff_rm_retrieve_cache (AVFormatContext *s, AVStream *st, AVPacket *pkt)
ff_rm_retrieve_cache (AVFormatContext *s, ByteIOContext *pb,
AVStream *st, AVPacket *pkt)
{
ByteIOContext *pb = s->pb;
RMContext *rm = s->priv_data;
assert (rm->audio_pkt_cnt > 0);
@ -681,7 +680,7 @@ static int rm_read_packet(AVFormatContext *s, AVPacket *pkt)
if (rm->audio_pkt_cnt) {
// If there are queued audio packet return them first
st = s->streams[rm->audio_stream_num];
ff_rm_retrieve_cache(s, st, pkt);
ff_rm_retrieve_cache(s, s->pb, st, pkt);
} else if (rm->old_format) {
st = s->streams[0];
if (st->codec->codec_id == CODEC_ID_RA_288) {
@ -717,7 +716,7 @@ resync:
return AVERROR(EIO);
st = s->streams[i];
if (ff_rm_parse_packet (s, st, len, pkt, &seq, &flags, &timestamp) < 0)
if (ff_rm_parse_packet (s, s->pb, st, len, pkt, &seq, &flags, &timestamp) < 0)
goto resync;
if((flags&2) && (seq&0x7F) == 1)