1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-11-21 10:55:51 +02:00

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  rtmp: Return a proper error code instead of -1
  rtmp: Check malloc calls
  rtmp: Check ff_rtmp_packet_create calls
  lavfi: add audio mix filter
  flvdec: Make sure sample_rate is set to the updated value
  tqi: Pass errors from the MB decoder

Conflicts:
	Changelog
	doc/filters.texi
	libavcodec/eatqi.c
	libavfilter/Makefile
	libavfilter/allfilters.c
	libavfilter/version.h

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-05-25 00:38:16 +02:00
commit 65e63072f8
11 changed files with 795 additions and 86 deletions

View File

@ -38,6 +38,7 @@ version 0.11:
- audio split filter
- vorbis parser
- png parser
- audio mix filter
version 0.10:

View File

@ -216,6 +216,44 @@ amovie=input.mkv:si=5 [a5];
[x3][a5] amerge" -c:a pcm_s16le output.mkv
@end example
@section amix
Mixes multiple audio inputs into a single output.
For example
@example
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
@end example
will mix 3 input audio streams to a single output with the same duration as the
first input and a dropout transition time of 3 seconds.
The filter accepts the following named parameters:
@table @option
@item inputs
Number of inputs. If unspecified, it defaults to 2.
@item duration
How to determine the end-of-stream.
@table @option
@item longest
Duration of longest input. (default)
@item shortest
Duration of shortest input.
@item first
Duration of first input.
@end table
@item dropout_transition
Transition time, in seconds, for volume renormalization when an input
stream ends. The default value is 2 seconds.
@end table
@section anull
Pass the audio source unchanged to the output.

View File

@ -62,7 +62,7 @@ static int tqi_decode_mb(MpegEncContext *s, DCTELEM (*block)[64])
int n;
s->dsp.clear_blocks(block[0]);
for (n=0; n<6; n++)
if(ff_mpeg1_decode_block_intra(s, block[n], n)<0)
if (ff_mpeg1_decode_block_intra(s, block[n], n) < 0)
return -1;
return 0;
@ -138,7 +138,7 @@ static int tqi_decode_frame(AVCodecContext *avctx,
for (s->mb_y=0; s->mb_y<(avctx->height+15)/16; s->mb_y++)
for (s->mb_x=0; s->mb_x<(avctx->width+15)/16; s->mb_x++)
{
if(tqi_decode_mb(s, t->block) < 0)
if (tqi_decode_mb(s, t->block) < 0)
goto end;
tqi_idct_put(t, t->block);
}

View File

@ -47,6 +47,7 @@ OBJS-$(CONFIG_SWSCALE) += lswsutils.o
OBJS-$(CONFIG_ACONVERT_FILTER) += af_aconvert.o
OBJS-$(CONFIG_AFORMAT_FILTER) += af_aformat.o
OBJS-$(CONFIG_AMERGE_FILTER) += af_amerge.o
OBJS-$(CONFIG_AMIX_FILTER) += af_amix.o
OBJS-$(CONFIG_ANULL_FILTER) += af_anull.o
OBJS-$(CONFIG_ARESAMPLE_FILTER) += af_aresample.o
OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o

545
libavfilter/af_amix.c Normal file
View File

@ -0,0 +1,545 @@
/*
* Audio Mix Filter
* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Audio Mix Filter
*
* Mixes audio from multiple sources into a single output. The channel layout,
* sample rate, and sample format will be the same for all inputs and the
* output.
*/
#include "libavutil/audioconvert.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#define INPUT_OFF 0 /**< input has reached EOF */
#define INPUT_ON 1 /**< input is active */
#define INPUT_INACTIVE 2 /**< input is on, but is currently inactive */
#define DURATION_LONGEST 0
#define DURATION_SHORTEST 1
#define DURATION_FIRST 2
typedef struct FrameInfo {
int nb_samples;
int64_t pts;
struct FrameInfo *next;
} FrameInfo;
/**
* Linked list used to store timestamps and frame sizes of all frames in the
* FIFO for the first input.
*
* This is needed to keep timestamps synchronized for the case where multiple
* input frames are pushed to the filter for processing before a frame is
* requested by the output link.
*/
typedef struct FrameList {
int nb_frames;
int nb_samples;
FrameInfo *list;
FrameInfo *end;
} FrameList;
static void frame_list_clear(FrameList *frame_list)
{
if (frame_list) {
while (frame_list->list) {
FrameInfo *info = frame_list->list;
frame_list->list = info->next;
av_free(info);
}
frame_list->nb_frames = 0;
frame_list->nb_samples = 0;
frame_list->end = NULL;
}
}
static int frame_list_next_frame_size(FrameList *frame_list)
{
if (!frame_list->list)
return 0;
return frame_list->list->nb_samples;
}
static int64_t frame_list_next_pts(FrameList *frame_list)
{
if (!frame_list->list)
return AV_NOPTS_VALUE;
return frame_list->list->pts;
}
static void frame_list_remove_samples(FrameList *frame_list, int nb_samples)
{
if (nb_samples >= frame_list->nb_samples) {
frame_list_clear(frame_list);
} else {
int samples = nb_samples;
while (samples > 0) {
FrameInfo *info = frame_list->list;
av_assert0(info != NULL);
if (info->nb_samples <= samples) {
samples -= info->nb_samples;
frame_list->list = info->next;
if (!frame_list->list)
frame_list->end = NULL;
frame_list->nb_frames--;
frame_list->nb_samples -= info->nb_samples;
av_free(info);
} else {
info->nb_samples -= samples;
info->pts += samples;
frame_list->nb_samples -= samples;
samples = 0;
}
}
}
}
static int frame_list_add_frame(FrameList *frame_list, int nb_samples, int64_t pts)
{
FrameInfo *info = av_malloc(sizeof(*info));
if (!info)
return AVERROR(ENOMEM);
info->nb_samples = nb_samples;
info->pts = pts;
info->next = NULL;
if (!frame_list->list) {
frame_list->list = info;
frame_list->end = info;
} else {
av_assert0(frame_list->end != NULL);
frame_list->end->next = info;
frame_list->end = info;
}
frame_list->nb_frames++;
frame_list->nb_samples += nb_samples;
return 0;
}
typedef struct MixContext {
const AVClass *class; /**< class for AVOptions */
int nb_inputs; /**< number of inputs */
int active_inputs; /**< number of input currently active */
int duration_mode; /**< mode for determining duration */
float dropout_transition; /**< transition time when an input drops out */
int nb_channels; /**< number of channels */
int sample_rate; /**< sample rate */
AVAudioFifo **fifos; /**< audio fifo for each input */
uint8_t *input_state; /**< current state of each input */
float *input_scale; /**< mixing scale factor for each input */
float scale_norm; /**< normalization factor for all inputs */
int64_t next_pts; /**< calculated pts for next output frame */
FrameList *frame_list; /**< list of frame info for the first input */
} MixContext;
#define OFFSET(x) offsetof(MixContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
static const AVOption options[] = {
{ "inputs", "Number of inputs.",
OFFSET(nb_inputs), AV_OPT_TYPE_INT, { 2 }, 1, 32, A },
{ "duration", "How to determine the end-of-stream.",
OFFSET(duration_mode), AV_OPT_TYPE_INT, { DURATION_LONGEST }, 0, 2, A, "duration" },
{ "longest", "Duration of longest input.", 0, AV_OPT_TYPE_CONST, { DURATION_LONGEST }, INT_MIN, INT_MAX, A, "duration" },
{ "shortest", "Duration of shortest input.", 0, AV_OPT_TYPE_CONST, { DURATION_SHORTEST }, INT_MIN, INT_MAX, A, "duration" },
{ "first", "Duration of first input.", 0, AV_OPT_TYPE_CONST, { DURATION_FIRST }, INT_MIN, INT_MAX, A, "duration" },
{ "dropout_transition", "Transition time, in seconds, for volume "
"renormalization when an input stream ends.",
OFFSET(dropout_transition), AV_OPT_TYPE_FLOAT, { 2.0 }, 0, INT_MAX, A },
{ NULL },
};
static const AVClass amix_class = {
.class_name = "amix filter",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
/**
* Update the scaling factors to apply to each input during mixing.
*
* This balances the full volume range between active inputs and handles
* volume transitions when EOF is encountered on an input but mixing continues
* with the remaining inputs.
*/
static void calculate_scales(MixContext *s, int nb_samples)
{
int i;
if (s->scale_norm > s->active_inputs) {
s->scale_norm -= nb_samples / (s->dropout_transition * s->sample_rate);
s->scale_norm = FFMAX(s->scale_norm, s->active_inputs);
}
for (i = 0; i < s->nb_inputs; i++) {
if (s->input_state[i] == INPUT_ON)
s->input_scale[i] = 1.0f / s->scale_norm;
else
s->input_scale[i] = 0.0f;
}
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
MixContext *s = ctx->priv;
int i;
char buf[64];
s->sample_rate = outlink->sample_rate;
outlink->time_base = (AVRational){ 1, outlink->sample_rate };
s->next_pts = AV_NOPTS_VALUE;
s->frame_list = av_mallocz(sizeof(*s->frame_list));
if (!s->frame_list)
return AVERROR(ENOMEM);
s->fifos = av_mallocz(s->nb_inputs * sizeof(*s->fifos));
if (!s->fifos)
return AVERROR(ENOMEM);
s->nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
for (i = 0; i < s->nb_inputs; i++) {
s->fifos[i] = av_audio_fifo_alloc(outlink->format, s->nb_channels, 1024);
if (!s->fifos[i])
return AVERROR(ENOMEM);
}
s->input_state = av_malloc(s->nb_inputs);
if (!s->input_state)
return AVERROR(ENOMEM);
memset(s->input_state, INPUT_ON, s->nb_inputs);
s->active_inputs = s->nb_inputs;
s->input_scale = av_mallocz(s->nb_inputs * sizeof(*s->input_scale));
if (!s->input_scale)
return AVERROR(ENOMEM);
s->scale_norm = s->active_inputs;
calculate_scales(s, 0);
av_get_channel_layout_string(buf, sizeof(buf), -1, outlink->channel_layout);
av_log(ctx, AV_LOG_VERBOSE,
"inputs:%d fmt:%s srate:%"PRId64" cl:%s\n", s->nb_inputs,
av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf);
return 0;
}
/* TODO: move optimized version from DSPContext to libavutil */
static void vector_fmac_scalar(float *dst, const float *src, float mul, int len)
{
int i;
for (i = 0; i < len; i++)
dst[i] += src[i] * mul;
}
/**
* Read samples from the input FIFOs, mix, and write to the output link.
*/
static int output_frame(AVFilterLink *outlink, int nb_samples)
{
AVFilterContext *ctx = outlink->src;
MixContext *s = ctx->priv;
AVFilterBufferRef *out_buf, *in_buf;
int i;
calculate_scales(s, nb_samples);
out_buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
in_buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
if (!in_buf)
return AVERROR(ENOMEM);
for (i = 0; i < s->nb_inputs; i++) {
if (s->input_state[i] == INPUT_ON) {
av_audio_fifo_read(s->fifos[i], (void **)in_buf->extended_data,
nb_samples);
vector_fmac_scalar((float *)out_buf->extended_data[0],
(float *) in_buf->extended_data[0],
s->input_scale[i], nb_samples * s->nb_channels);
}
}
avfilter_unref_buffer(in_buf);
out_buf->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += nb_samples;
ff_filter_samples(outlink, out_buf);
return 0;
}
/**
* Returns the smallest number of samples available in the input FIFOs other
* than that of the first input.
*/
static int get_available_samples(MixContext *s)
{
int i;
int available_samples = INT_MAX;
av_assert0(s->nb_inputs > 1);
for (i = 1; i < s->nb_inputs; i++) {
int nb_samples;
if (s->input_state[i] == INPUT_OFF)
continue;
nb_samples = av_audio_fifo_size(s->fifos[i]);
available_samples = FFMIN(available_samples, nb_samples);
}
if (available_samples == INT_MAX)
return 0;
return available_samples;
}
/**
* Requests a frame, if needed, from each input link other than the first.
*/
static int request_samples(AVFilterContext *ctx, int min_samples)
{
MixContext *s = ctx->priv;
int i, ret;
av_assert0(s->nb_inputs > 1);
for (i = 1; i < s->nb_inputs; i++) {
ret = 0;
if (s->input_state[i] == INPUT_OFF)
continue;
while (!ret && av_audio_fifo_size(s->fifos[i]) < min_samples)
ret = avfilter_request_frame(ctx->inputs[i]);
if (ret == AVERROR_EOF) {
if (av_audio_fifo_size(s->fifos[i]) == 0) {
s->input_state[i] = INPUT_OFF;
continue;
}
} else if (ret)
return ret;
}
return 0;
}
/**
* Calculates the number of active inputs and determines EOF based on the
* duration option.
*
* @return 0 if mixing should continue, or AVERROR_EOF if mixing should stop.
*/
static int calc_active_inputs(MixContext *s)
{
int i;
int active_inputs = 0;
for (i = 0; i < s->nb_inputs; i++)
active_inputs += !!(s->input_state[i] != INPUT_OFF);
s->active_inputs = active_inputs;
if (!active_inputs ||
(s->duration_mode == DURATION_FIRST && s->input_state[0] == INPUT_OFF) ||
(s->duration_mode == DURATION_SHORTEST && active_inputs != s->nb_inputs))
return AVERROR_EOF;
return 0;
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
MixContext *s = ctx->priv;
int ret;
int wanted_samples, available_samples;
if (s->input_state[0] == INPUT_OFF) {
ret = request_samples(ctx, 1);
if (ret < 0)
return ret;
ret = calc_active_inputs(s);
if (ret < 0)
return ret;
available_samples = get_available_samples(s);
if (!available_samples)
return 0;
return output_frame(outlink, available_samples);
}
if (s->frame_list->nb_frames == 0) {
ret = avfilter_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF) {
s->input_state[0] = INPUT_OFF;
if (s->nb_inputs == 1)
return AVERROR_EOF;
else
return AVERROR(EAGAIN);
} else if (ret)
return ret;
}
av_assert0(s->frame_list->nb_frames > 0);
wanted_samples = frame_list_next_frame_size(s->frame_list);
ret = request_samples(ctx, wanted_samples);
if (ret < 0)
return ret;
ret = calc_active_inputs(s);
if (ret < 0)
return ret;
if (s->active_inputs > 1) {
available_samples = get_available_samples(s);
if (!available_samples)
return 0;
available_samples = FFMIN(available_samples, wanted_samples);
} else {
available_samples = wanted_samples;
}
s->next_pts = frame_list_next_pts(s->frame_list);
frame_list_remove_samples(s->frame_list, available_samples);
return output_frame(outlink, available_samples);
}
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
{
AVFilterContext *ctx = inlink->dst;
MixContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int i;
for (i = 0; i < ctx->input_count; i++)
if (ctx->inputs[i] == inlink)
break;
if (i >= ctx->input_count) {
av_log(ctx, AV_LOG_ERROR, "unknown input link\n");
return;
}
if (i == 0) {
int64_t pts = av_rescale_q(buf->pts, inlink->time_base,
outlink->time_base);
frame_list_add_frame(s->frame_list, buf->audio->nb_samples, pts);
}
av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data,
buf->audio->nb_samples);
avfilter_unref_buffer(buf);
}
static int init(AVFilterContext *ctx, const char *args, void *opaque)
{
MixContext *s = ctx->priv;
int i, ret;
s->class = &amix_class;
av_opt_set_defaults(s);
if ((ret = av_set_options_string(s, args, "=", ":")) < 0) {
av_log(ctx, AV_LOG_ERROR, "Error parsing options string '%s'.\n", args);
return ret;
}
av_opt_free(s);
for (i = 0; i < s->nb_inputs; i++) {
char name[32];
AVFilterPad pad = { 0 };
snprintf(name, sizeof(name), "input%d", i);
pad.type = AVMEDIA_TYPE_AUDIO;
pad.name = av_strdup(name);
pad.filter_samples = filter_samples;
avfilter_insert_inpad(ctx, i, &pad);
}
return 0;
}
static void uninit(AVFilterContext *ctx)
{
int i;
MixContext *s = ctx->priv;
if (s->fifos) {
for (i = 0; i < s->nb_inputs; i++)
av_audio_fifo_free(s->fifos[i]);
av_freep(&s->fifos);
}
frame_list_clear(s->frame_list);
av_freep(&s->frame_list);
av_freep(&s->input_state);
av_freep(&s->input_scale);
for (i = 0; i < ctx->input_count; i++)
av_freep(&ctx->input_pads[i].name);
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
avfilter_add_format(&formats, AV_SAMPLE_FMT_FLT);
avfilter_set_common_formats(ctx, formats);
ff_set_common_channel_layouts(ctx, ff_all_channel_layouts());
ff_set_common_samplerates(ctx, ff_all_samplerates());
return 0;
}
AVFilter avfilter_af_amix = {
.name = "amix",
.description = NULL_IF_CONFIG_SMALL("Audio mixing."),
.priv_size = sizeof(MixContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = (const AVFilterPad[]) {{ .name = NULL}},
.outputs = (const AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame },
{ .name = NULL}},
};

View File

@ -37,6 +37,7 @@ void avfilter_register_all(void)
REGISTER_FILTER (ACONVERT, aconvert, af);
REGISTER_FILTER (AFORMAT, aformat, af);
REGISTER_FILTER (AMERGE, amerge, af);
REGISTER_FILTER (AMIX, amix, af);
REGISTER_FILTER (ANULL, anull, af);
REGISTER_FILTER (ARESAMPLE, aresample, af);
REGISTER_FILTER (ASHOWINFO, ashowinfo, af);

View File

@ -29,7 +29,7 @@
#include "libavutil/avutil.h"
#define LIBAVFILTER_VERSION_MAJOR 2
#define LIBAVFILTER_VERSION_MINOR 76
#define LIBAVFILTER_VERSION_MINOR 77
#define LIBAVFILTER_VERSION_MICRO 100
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \

View File

@ -591,8 +591,8 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
}
if(!st->codec->codec_id){
flv_set_audio_codec(s, st, st->codec, flags & FLV_AUDIO_CODECID_MASK);
flv->last_sample_rate = st->codec->sample_rate;
flv->last_channels = st->codec->channels;
flv->last_sample_rate = sample_rate = st->codec->sample_rate;
flv->last_channels = channels = st->codec->channels;
} else {
AVCodecContext ctx;
ctx.sample_rate = sample_rate;

View File

@ -79,6 +79,7 @@ int ff_rtmp_packet_read(URLContext *h, RTMPPacket *p,
uint32_t extra = 0;
enum RTMPPacketType type;
int size = 0;
int ret;
if (ffurl_read(h, &hdr, 1) != 1)
return AVERROR(EIO);
@ -129,8 +130,9 @@ int ff_rtmp_packet_read(URLContext *h, RTMPPacket *p,
if (hdr != RTMP_PS_TWELVEBYTES)
timestamp += prev_pkt[channel_id].timestamp;
if (ff_rtmp_packet_create(p, channel_id, type, timestamp, data_size))
return -1;
if ((ret = ff_rtmp_packet_create(p, channel_id, type, timestamp,
data_size)) < 0)
return ret;
p->extra = extra;
// save history
prev_pkt[channel_id].channel_id = channel_id;

View File

@ -115,12 +115,16 @@ static const uint8_t rtmp_server_key[] = {
/**
* Generate 'connect' call and send it to the server.
*/
static void gen_connect(URLContext *s, RTMPContext *rt)
static int gen_connect(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
0, 4096)) < 0)
return ret;
ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 4096);
p = pkt.data;
ff_amf_write_string(&p, "connect");
@ -165,19 +169,23 @@ static void gen_connect(URLContext *s, RTMPContext *rt)
ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
return 0;
}
/**
* Generate 'releaseStream' call and send it to the server. It should make
* the server release some channel for media streams.
*/
static void gen_release_stream(URLContext *s, RTMPContext *rt)
static int gen_release_stream(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0,
29 + strlen(rt->playpath));
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
0, 29 + strlen(rt->playpath))) < 0)
return ret;
av_log(s, AV_LOG_DEBUG, "Releasing stream...\n");
p = pkt.data;
@ -188,19 +196,23 @@ static void gen_release_stream(URLContext *s, RTMPContext *rt)
ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
return 0;
}
/**
* Generate 'FCPublish' call and send it to the server. It should make
* the server preapare for receiving media streams.
*/
static void gen_fcpublish_stream(URLContext *s, RTMPContext *rt)
static int gen_fcpublish_stream(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0,
25 + strlen(rt->playpath));
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
0, 25 + strlen(rt->playpath))) < 0)
return ret;
av_log(s, AV_LOG_DEBUG, "FCPublish stream...\n");
p = pkt.data;
@ -211,19 +223,23 @@ static void gen_fcpublish_stream(URLContext *s, RTMPContext *rt)
ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
return 0;
}
/**
* Generate 'FCUnpublish' call and send it to the server. It should make
* the server destroy stream.
*/
static void gen_fcunpublish_stream(URLContext *s, RTMPContext *rt)
static int gen_fcunpublish_stream(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0,
27 + strlen(rt->playpath));
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
0, 27 + strlen(rt->playpath))) < 0)
return ret;
av_log(s, AV_LOG_DEBUG, "UnPublishing stream...\n");
p = pkt.data;
@ -234,19 +250,25 @@ static void gen_fcunpublish_stream(URLContext *s, RTMPContext *rt)
ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
return 0;
}
/**
* Generate 'createStream' call and send it to the server. It should make
* the server allocate some channel for media streams.
*/
static void gen_create_stream(URLContext *s, RTMPContext *rt)
static int gen_create_stream(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
av_log(s, AV_LOG_DEBUG, "Creating stream...\n");
ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 25);
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
0, 25)) < 0)
return ret;
p = pkt.data;
ff_amf_write_string(&p, "createStream");
@ -256,6 +278,8 @@ static void gen_create_stream(URLContext *s, RTMPContext *rt)
ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
return 0;
}
@ -263,13 +287,17 @@ static void gen_create_stream(URLContext *s, RTMPContext *rt)
* Generate 'deleteStream' call and send it to the server. It should make
* the server remove some channel for media streams.
*/
static void gen_delete_stream(URLContext *s, RTMPContext *rt)
static int gen_delete_stream(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
av_log(s, AV_LOG_DEBUG, "Deleting stream...\n");
ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 34);
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
0, 34)) < 0)
return ret;
p = pkt.data;
ff_amf_write_string(&p, "deleteStream");
@ -279,20 +307,26 @@ static void gen_delete_stream(URLContext *s, RTMPContext *rt)
ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
return 0;
}
/**
* Generate 'play' call and send it to the server, then ping the server
* to start actual playing.
*/
static void gen_play(URLContext *s, RTMPContext *rt)
static int gen_play(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
av_log(s, AV_LOG_DEBUG, "Sending play command for '%s'\n", rt->playpath);
ff_rtmp_packet_create(&pkt, RTMP_VIDEO_CHANNEL, RTMP_PT_INVOKE, 0,
29 + strlen(rt->playpath));
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_VIDEO_CHANNEL, RTMP_PT_INVOKE,
0, 29 + strlen(rt->playpath))) < 0)
return ret;
pkt.extra = rt->main_channel_id;
p = pkt.data;
@ -306,7 +340,9 @@ static void gen_play(URLContext *s, RTMPContext *rt)
ff_rtmp_packet_destroy(&pkt);
// set client buffer time disguised in ping packet
ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING, 1, 10);
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING,
1, 10)) < 0)
return ret;
p = pkt.data;
bytestream_put_be16(&p, 3);
@ -315,19 +351,25 @@ static void gen_play(URLContext *s, RTMPContext *rt)
ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
return 0;
}
/**
* Generate 'publish' call and send it to the server.
*/
static void gen_publish(URLContext *s, RTMPContext *rt)
static int gen_publish(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
av_log(s, AV_LOG_DEBUG, "Sending publish command for '%s'\n", rt->playpath);
ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE, 0,
30 + strlen(rt->playpath));
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE,
0, 30 + strlen(rt->playpath))) < 0)
return ret;
pkt.extra = rt->main_channel_id;
p = pkt.data;
@ -339,48 +381,65 @@ static void gen_publish(URLContext *s, RTMPContext *rt)
ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
return ret;
}
/**
* Generate ping reply and send it to the server.
*/
static void gen_pong(URLContext *s, RTMPContext *rt, RTMPPacket *ppkt)
static int gen_pong(URLContext *s, RTMPContext *rt, RTMPPacket *ppkt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING,
ppkt->timestamp + 1, 6)) < 0)
return ret;
ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING, ppkt->timestamp + 1, 6);
p = pkt.data;
bytestream_put_be16(&p, 7);
bytestream_put_be32(&p, AV_RB32(ppkt->data+2));
ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
return 0;
}
/**
* Generate server bandwidth message and send it to the server.
*/
static void gen_server_bw(URLContext *s, RTMPContext *rt)
static int gen_server_bw(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_SERVER_BW,
0, 4)) < 0)
return ret;
ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_SERVER_BW, 0, 4);
p = pkt.data;
bytestream_put_be32(&p, 2500000);
ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
return 0;
}
/**
* Generate check bandwidth message and send it to the server.
*/
static void gen_check_bw(URLContext *s, RTMPContext *rt)
static int gen_check_bw(URLContext *s, RTMPContext *rt)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 21);
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
0, 21)) < 0)
return ret;
p = pkt.data;
ff_amf_write_string(&p, "_checkbw");
@ -389,21 +448,29 @@ static void gen_check_bw(URLContext *s, RTMPContext *rt)
ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
return ret;
}
/**
* Generate report on bytes read so far and send it to the server.
*/
static void gen_bytes_read(URLContext *s, RTMPContext *rt, uint32_t ts)
static int gen_bytes_read(URLContext *s, RTMPContext *rt, uint32_t ts)
{
RTMPPacket pkt;
uint8_t *p;
int ret;
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_BYTES_READ,
ts, 4)) < 0)
return ret;
ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_BYTES_READ, ts, 4);
p = pkt.data;
bytestream_put_be32(&p, rt->bytes_read);
ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
return 0;
}
//TODO: Move HMAC code somewhere. Eventually.
@ -421,14 +488,16 @@ static void gen_bytes_read(URLContext *s, RTMPContext *rt, uint32_t ts)
* @param keylen digest key length
* @param dst buffer where calculated digest will be stored (32 bytes)
*/
static void rtmp_calc_digest(const uint8_t *src, int len, int gap,
const uint8_t *key, int keylen, uint8_t *dst)
static int rtmp_calc_digest(const uint8_t *src, int len, int gap,
const uint8_t *key, int keylen, uint8_t *dst)
{
struct AVSHA *sha;
uint8_t hmac_buf[64+32] = {0};
int i;
sha = av_mallocz(av_sha_size);
if (!sha)
return AVERROR(ENOMEM);
if (keylen < 64) {
memcpy(hmac_buf, key, keylen);
@ -457,6 +526,8 @@ static void rtmp_calc_digest(const uint8_t *src, int len, int gap,
av_sha_final(sha, dst);
av_free(sha);
return 0;
}
/**
@ -469,14 +540,18 @@ static void rtmp_calc_digest(const uint8_t *src, int len, int gap,
static int rtmp_handshake_imprint_with_digest(uint8_t *buf)
{
int i, digest_pos = 0;
int ret;
for (i = 8; i < 12; i++)
digest_pos += buf[i];
digest_pos = (digest_pos % 728) + 12;
rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
rtmp_player_key, PLAYER_KEY_OPEN_PART_LEN,
buf + digest_pos);
ret = rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
rtmp_player_key, PLAYER_KEY_OPEN_PART_LEN,
buf + digest_pos);
if (ret < 0)
return ret;
return digest_pos;
}
@ -491,14 +566,18 @@ static int rtmp_validate_digest(uint8_t *buf, int off)
{
int i, digest_pos = 0;
uint8_t digest[32];
int ret;
for (i = 0; i < 4; i++)
digest_pos += buf[i + off];
digest_pos = (digest_pos % 728) + off + 4;
rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
rtmp_server_key, SERVER_KEY_OPEN_PART_LEN,
digest);
ret = rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
rtmp_server_key, SERVER_KEY_OPEN_PART_LEN,
digest);
if (ret < 0)
return ret;
if (!memcmp(digest, buf + digest_pos, 32))
return digest_pos;
return 0;
@ -526,6 +605,7 @@ static int rtmp_handshake(URLContext *s, RTMPContext *rt)
int i;
int server_pos, client_pos;
uint8_t digest[32];
int ret;
av_log(s, AV_LOG_DEBUG, "Handshaking...\n");
@ -534,17 +614,19 @@ static int rtmp_handshake(URLContext *s, RTMPContext *rt)
for (i = 9; i <= RTMP_HANDSHAKE_PACKET_SIZE; i++)
tosend[i] = av_lfg_get(&rnd) >> 24;
client_pos = rtmp_handshake_imprint_with_digest(tosend + 1);
if (client_pos < 0)
return client_pos;
ffurl_write(rt->stream, tosend, RTMP_HANDSHAKE_PACKET_SIZE + 1);
i = ffurl_read_complete(rt->stream, serverdata, RTMP_HANDSHAKE_PACKET_SIZE + 1);
if (i != RTMP_HANDSHAKE_PACKET_SIZE + 1) {
av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
return -1;
return AVERROR(EIO);
}
i = ffurl_read_complete(rt->stream, clientdata, RTMP_HANDSHAKE_PACKET_SIZE);
if (i != RTMP_HANDSHAKE_PACKET_SIZE) {
av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
return -1;
return AVERROR(EIO);
}
av_log(s, AV_LOG_DEBUG, "Server version %d.%d.%d.%d\n",
@ -552,33 +634,48 @@ static int rtmp_handshake(URLContext *s, RTMPContext *rt)
if (rt->is_input && serverdata[5] >= 3) {
server_pos = rtmp_validate_digest(serverdata + 1, 772);
if (server_pos < 0)
return server_pos;
if (!server_pos) {
server_pos = rtmp_validate_digest(serverdata + 1, 8);
if (server_pos < 0)
return server_pos;
if (!server_pos) {
av_log(s, AV_LOG_ERROR, "Server response validating failed\n");
return -1;
return AVERROR(EIO);
}
}
rtmp_calc_digest(tosend + 1 + client_pos, 32, 0,
rtmp_server_key, sizeof(rtmp_server_key),
digest);
rtmp_calc_digest(clientdata, RTMP_HANDSHAKE_PACKET_SIZE-32, 0,
digest, 32,
digest);
ret = rtmp_calc_digest(tosend + 1 + client_pos, 32, 0, rtmp_server_key,
sizeof(rtmp_server_key), digest);
if (ret < 0)
return ret;
ret = rtmp_calc_digest(clientdata, RTMP_HANDSHAKE_PACKET_SIZE - 32, 0,
digest, 32, digest);
if (ret < 0)
return ret;
if (memcmp(digest, clientdata + RTMP_HANDSHAKE_PACKET_SIZE - 32, 32)) {
av_log(s, AV_LOG_ERROR, "Signature mismatch\n");
return -1;
return AVERROR(EIO);
}
for (i = 0; i < RTMP_HANDSHAKE_PACKET_SIZE; i++)
tosend[i] = av_lfg_get(&rnd) >> 24;
rtmp_calc_digest(serverdata + 1 + server_pos, 32, 0,
rtmp_player_key, sizeof(rtmp_player_key),
digest);
rtmp_calc_digest(tosend, RTMP_HANDSHAKE_PACKET_SIZE - 32, 0,
digest, 32,
tosend + RTMP_HANDSHAKE_PACKET_SIZE - 32);
ret = rtmp_calc_digest(serverdata + 1 + server_pos, 32, 0,
rtmp_player_key, sizeof(rtmp_player_key),
digest);
if (ret < 0)
return ret;
ret = rtmp_calc_digest(tosend, RTMP_HANDSHAKE_PACKET_SIZE - 32, 0,
digest, 32,
tosend + RTMP_HANDSHAKE_PACKET_SIZE - 32);
if (ret < 0)
return ret;
// write reply back to the server
ffurl_write(rt->stream, tosend, RTMP_HANDSHAKE_PACKET_SIZE);
@ -599,6 +696,7 @@ static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
{
int i, t;
const uint8_t *data_end = pkt->data + pkt->data_size;
int ret;
#ifdef DEBUG
ff_rtmp_packet_dump(s, pkt);
@ -623,7 +721,8 @@ static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
case RTMP_PT_PING:
t = AV_RB16(pkt->data);
if (t == 6)
gen_pong(s, rt, pkt);
if ((ret = gen_pong(s, rt, pkt)) < 0)
return ret;
break;
case RTMP_PT_CLIENT_BW:
if (pkt->data_size < 4) {
@ -648,14 +747,18 @@ static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
switch (rt->state) {
case STATE_HANDSHAKED:
if (!rt->is_input) {
gen_release_stream(s, rt);
gen_fcpublish_stream(s, rt);
if ((ret = gen_release_stream(s, rt)) < 0)
return ret;
if ((ret = gen_fcpublish_stream(s, rt)) < 0)
return ret;
rt->state = STATE_RELEASING;
} else {
gen_server_bw(s, rt);
if ((ret = gen_server_bw(s, rt)) < 0)
return ret;
rt->state = STATE_CONNECTING;
}
gen_create_stream(s, rt);
if ((ret = gen_create_stream(s, rt)) < 0)
return ret;
break;
case STATE_FCPUBLISH:
rt->state = STATE_CONNECTING;
@ -679,9 +782,11 @@ static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
rt->main_channel_id = av_int2double(AV_RB64(pkt->data + 21));
}
if (rt->is_input) {
gen_play(s, rt);
if ((ret = gen_play(s, rt)) < 0)
return ret;
} else {
gen_publish(s, rt);
if ((ret = gen_publish(s, rt)) < 0)
return ret;
}
rt->state = STATE_READY;
break;
@ -711,7 +816,8 @@ static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
if (!t && !strcmp(tmpstr, "NetStream.Play.UnpublishNotify")) rt->state = STATE_STOPPED;
if (!t && !strcmp(tmpstr, "NetStream.Publish.Start")) rt->state = STATE_PUBLISHING;
} else if (!memcmp(pkt->data, "\002\000\010onBWDone", 11)) {
gen_check_bw(s, rt);
if ((ret = gen_check_bw(s, rt)) < 0)
return ret;
}
break;
}
@ -754,14 +860,15 @@ static int get_packet(URLContext *s, int for_header)
rt->bytes_read += ret;
if (rt->bytes_read - rt->last_bytes_read > rt->client_report_size) {
av_log(s, AV_LOG_DEBUG, "Sending bytes read report\n");
gen_bytes_read(s, rt, rpkt.timestamp + 1);
if ((ret = gen_bytes_read(s, rt, rpkt.timestamp + 1)) < 0)
return ret;
rt->last_bytes_read = rt->bytes_read;
}
ret = rtmp_parse_result(s, rt, &rpkt);
if (ret < 0) {//serious error in current packet
ff_rtmp_packet_destroy(&rpkt);
return -1;
return ret;
}
if (rt->state == STATE_STOPPED) {
ff_rtmp_packet_destroy(&rpkt);
@ -825,20 +932,21 @@ static int get_packet(URLContext *s, int for_header)
static int rtmp_close(URLContext *h)
{
RTMPContext *rt = h->priv_data;
int ret = 0;
if (!rt->is_input) {
rt->flv_data = NULL;
if (rt->out_pkt.data_size)
ff_rtmp_packet_destroy(&rt->out_pkt);
if (rt->state > STATE_FCPUBLISH)
gen_fcunpublish_stream(h, rt);
ret = gen_fcunpublish_stream(h, rt);
}
if (rt->state > STATE_HANDSHAKED)
gen_delete_stream(h, rt);
ret = gen_delete_stream(h, rt);
av_freep(&rt->flv_data);
ffurl_close(rt->stream);
return 0;
return ret;
}
/**
@ -868,14 +976,14 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
port = RTMP_DEFAULT_PORT;
ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
if (ffurl_open(&rt->stream, buf, AVIO_FLAG_READ_WRITE,
&s->interrupt_callback, NULL) < 0) {
if ((ret = ffurl_open(&rt->stream, buf, AVIO_FLAG_READ_WRITE,
&s->interrupt_callback, NULL)) < 0) {
av_log(s , AV_LOG_ERROR, "Cannot open connection %s\n", buf);
goto fail;
}
rt->state = STATE_START;
if (rtmp_handshake(s, rt))
if ((ret = rtmp_handshake(s, rt)) < 0)
goto fail;
rt->chunk_size = 128;
@ -886,8 +994,8 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
rt->app = av_malloc(APP_MAX_LENGTH);
if (!rt->app) {
rtmp_close(s);
return AVERROR(ENOMEM);
ret = AVERROR(ENOMEM);
goto fail;
}
//extract "app" part from path
@ -922,8 +1030,8 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
if (!rt->playpath) {
rt->playpath = av_malloc(PLAYPATH_MAX_LENGTH);
if (!rt->playpath) {
rtmp_close(s);
return AVERROR(ENOMEM);
ret = AVERROR(ENOMEM);
goto fail;
}
if (!strchr(fname, ':') &&
@ -938,12 +1046,20 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
if (!rt->tcurl) {
rt->tcurl = av_malloc(TCURL_MAX_LENGTH);
if (!rt->tcurl) {
ret = AVERROR(ENOMEM);
goto fail;
}
ff_url_join(rt->tcurl, TCURL_MAX_LENGTH, proto, NULL, hostname,
port, "/%s", rt->app);
}
if (!rt->flashver) {
rt->flashver = av_malloc(FLASHVER_MAX_LENGTH);
if (!rt->flashver) {
ret = AVERROR(ENOMEM);
goto fail;
}
if (rt->is_input) {
snprintf(rt->flashver, FLASHVER_MAX_LENGTH, "%s %d,%d,%d,%d",
RTMP_CLIENT_PLATFORM, RTMP_CLIENT_VER1, RTMP_CLIENT_VER2,
@ -960,7 +1076,8 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
av_log(s, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n",
proto, path, rt->app, rt->playpath);
gen_connect(s, rt);
if ((ret = gen_connect(s, rt)) < 0)
goto fail;
do {
ret = get_packet(s, 1);
@ -987,7 +1104,7 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
fail:
rtmp_close(s);
return AVERROR(EIO);
return ret;
}
static int rtmp_read(URLContext *s, uint8_t *buf, int size)
@ -1024,6 +1141,7 @@ static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
int pktsize, pkttype;
uint32_t ts;
const uint8_t *buf_temp = buf;
int ret;
do {
if (rt->skip_bytes) {
@ -1059,7 +1177,10 @@ static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
}
//this can be a big packet, it's better to send it right here
ff_rtmp_packet_create(&rt->out_pkt, RTMP_SOURCE_CHANNEL, pkttype, ts, pktsize);
if ((ret = ff_rtmp_packet_create(&rt->out_pkt, RTMP_SOURCE_CHANNEL,
pkttype, ts, pktsize)) < 0)
return ret;
rt->out_pkt.extra = rt->main_channel_id;
rt->flv_data = rt->out_pkt.data;

View File

@ -39,8 +39,8 @@ fate-nellymoser-aref-encode: $(AREF)
fate-nellymoser-aref-encode: CMD = enc_dec_pcm flv wav s16le $(REF) -c:a nellymoser
fate-nellymoser-aref-encode: CMP = stddev
fate-nellymoser-aref-encode: REF = ./tests/data/acodec-16000-1.ref.wav
fate-nellymoser-aref-encode: CMP_SHIFT = -1172
fate-nellymoser-aref-encode: CMP_TARGET = 9617
fate-nellymoser-aref-encode: CMP_SHIFT = -244
fate-nellymoser-aref-encode: CMP_TARGET = 9612
fate-nellymoser-aref-encode: SIZE_TOLERANCE = 268
FATE_SAMPLES_AUDIO += fate-sierra-vmd-audio