mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-01-24 13:56:33 +02:00
Merge remote-tracking branch 'qatar/master'
* qatar/master: rtmp: Add support for SWFVerification api-example: use new video encoding API. x86: avcodec: Appropriately name files containing only init functions mpegvideo_mmx_template: drop some commented-out cruft libavresample: add mix level normalization option w32pthreads: Add missing #includes to make header compile standalone rtmp: Gracefully ignore _checkbw errors by tracking them rtmp: Do not send _checkbw calls as notifications prores: interlaced ProRes encoding Conflicts: doc/examples/decoding_encoding.c libavcodec/proresenc_kostya.c libavcodec/w32pthreads.h libavcodec/x86/Makefile libavformat/version.h Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
9e89bc37ed
@ -31,10 +31,10 @@
|
||||
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavutil/audioconvert.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
|
||||
@ -315,11 +315,11 @@ static void video_encode_example(const char *filename, int codec_id)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int i, out_size, x, y, outbuf_size;
|
||||
int i, ret, x, y, got_output;
|
||||
FILE *f;
|
||||
AVFrame *picture;
|
||||
uint8_t *outbuf;
|
||||
int had_output=0;
|
||||
AVPacket pkt;
|
||||
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
|
||||
|
||||
printf("Encode video file %s\n", filename);
|
||||
|
||||
@ -359,17 +359,25 @@ static void video_encode_example(const char *filename, int codec_id)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* alloc image and output buffer */
|
||||
outbuf_size = 100000 + 12*c->width*c->height;
|
||||
outbuf = malloc(outbuf_size);
|
||||
|
||||
/* the image can be allocated by any means and av_image_alloc() is
|
||||
* just the most convenient way if av_malloc() is to be used */
|
||||
av_image_alloc(picture->data, picture->linesize,
|
||||
c->width, c->height, c->pix_fmt, 1);
|
||||
ret = av_image_alloc(picture->data, picture->linesize, c->width, c->height,
|
||||
c->pix_fmt, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "could not alloc raw picture buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
picture->format = c->pix_fmt;
|
||||
picture->width = c->width;
|
||||
picture->height = c->height;
|
||||
|
||||
/* encode 1 second of video */
|
||||
for(i=0;i<25;i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
fflush(stdout);
|
||||
/* prepare a dummy image */
|
||||
/* Y */
|
||||
@ -387,35 +395,46 @@ static void video_encode_example(const char *filename, int codec_id)
|
||||
}
|
||||
}
|
||||
|
||||
picture->pts = i;
|
||||
|
||||
/* encode the image */
|
||||
out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
|
||||
had_output |= out_size;
|
||||
printf("encoding frame %3d (size=%5d)\n", i, out_size);
|
||||
fwrite(outbuf, 1, out_size, f);
|
||||
ret = avcodec_encode_video2(c, &pkt, picture, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("encoding frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* get the delayed frames */
|
||||
for(; out_size || !had_output; i++) {
|
||||
for (got_output = 1; got_output; i++) {
|
||||
fflush(stdout);
|
||||
|
||||
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
|
||||
had_output |= out_size;
|
||||
printf("write frame %3d (size=%5d)\n", i, out_size);
|
||||
fwrite(outbuf, 1, out_size, f);
|
||||
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* add sequence end code to have a real mpeg file */
|
||||
outbuf[0] = 0x00;
|
||||
outbuf[1] = 0x00;
|
||||
outbuf[2] = 0x01;
|
||||
outbuf[3] = 0xb7;
|
||||
fwrite(outbuf, 1, 4, f);
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
fclose(f);
|
||||
free(outbuf);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
av_free(picture->data[0]);
|
||||
av_freep(&picture->data[0]);
|
||||
av_free(picture);
|
||||
printf("\n");
|
||||
}
|
||||
|
@ -272,6 +272,12 @@ Name of live stream to subscribe to. By default no value will be sent.
|
||||
It is only sent if the option is specified or if rtmp_live
|
||||
is set to live.
|
||||
|
||||
@item rtmp_swfhash
|
||||
SHA256 hash of the decompressed SWF file (32 bytes).
|
||||
|
||||
@item rtmp_swfsize
|
||||
Size of the decompressed SWF file, required for SWFVerification.
|
||||
|
||||
@item rtmp_swfurl
|
||||
URL of the SWF player for the media. By default no value will be sent.
|
||||
|
||||
|
@ -406,10 +406,15 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic,
|
||||
int total_size = 0;
|
||||
const uint16_t *src;
|
||||
int slice_width_factor = av_log2(mbs_per_slice);
|
||||
int num_cblocks, pwidth, linesize, line_offset;
|
||||
int num_cblocks, pwidth, linesize, line_add;
|
||||
int plane_factor, is_chroma;
|
||||
uint16_t *qmat;
|
||||
|
||||
if (ctx->pictures_per_frame == 1)
|
||||
line_add = 0;
|
||||
else
|
||||
line_add = ctx->cur_picture_idx ^ !pic->top_field_first;
|
||||
|
||||
if (ctx->force_quant) {
|
||||
qmat = ctx->quants[0];
|
||||
} else if (quant < MAX_STORED_Q) {
|
||||
@ -437,15 +442,14 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic,
|
||||
pwidth = avctx->width >> 1;
|
||||
}
|
||||
|
||||
line_offset = ((ctx->cur_picture_idx ^ !pic->top_field_first) &
|
||||
(ctx->pictures_per_frame - 1)) * pic->linesize[i];
|
||||
linesize = pic->linesize[i] * ctx->pictures_per_frame;
|
||||
src = (const uint16_t*)(pic->data[i] + yp * linesize + line_offset) + xp;
|
||||
src = (const uint16_t*)(pic->data[i] + yp * linesize +
|
||||
line_add * pic->linesize[i]) + xp;
|
||||
|
||||
get_slice_data(ctx, src, linesize, xp, yp,
|
||||
pwidth, avctx->height / ctx->pictures_per_frame,
|
||||
ctx->blocks[0], ctx->emu_buf, mbs_per_slice,
|
||||
num_cblocks, is_chroma);
|
||||
ctx->blocks[0], ctx->emu_buf,
|
||||
mbs_per_slice, num_cblocks, is_chroma);
|
||||
sizes[i] = encode_slice_plane(ctx, pb, src, linesize,
|
||||
mbs_per_slice, ctx->blocks[0],
|
||||
num_cblocks, plane_factor,
|
||||
@ -579,8 +583,12 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic,
|
||||
int slice_bits[TRELLIS_WIDTH], slice_score[TRELLIS_WIDTH];
|
||||
int overquant;
|
||||
uint16_t *qmat;
|
||||
int linesize[4], line_offset;
|
||||
int linesize[4], line_add;
|
||||
|
||||
if (ctx->pictures_per_frame == 1)
|
||||
line_add = 0;
|
||||
else
|
||||
line_add = ctx->cur_picture_idx ^ !pic->top_field_first;
|
||||
mbs = x + mbs_per_slice;
|
||||
|
||||
for (i = 0; i < ctx->num_planes; i++) {
|
||||
@ -600,15 +608,14 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic,
|
||||
pwidth = avctx->width >> 1;
|
||||
}
|
||||
|
||||
line_offset = ((ctx->cur_picture_idx ^ !pic->top_field_first) &
|
||||
(ctx->pictures_per_frame - 1)) * pic->linesize[i];
|
||||
linesize[i] = pic->linesize[i] * ctx->pictures_per_frame;
|
||||
src = (const uint16_t*)(pic->data[i] + yp * linesize[i] + line_offset) + xp;
|
||||
src = (const uint16_t*)(pic->data[i] + yp * linesize[i] +
|
||||
line_add * pic->linesize[i]) + xp;
|
||||
|
||||
get_slice_data(ctx, src, linesize[i], xp, yp,
|
||||
pwidth, avctx->height / ctx->pictures_per_frame,
|
||||
td->blocks[i], td->emu_buf, mbs_per_slice,
|
||||
num_cblocks[i], is_chroma[i]);
|
||||
td->blocks[i], td->emu_buf,
|
||||
mbs_per_slice, num_cblocks[i], is_chroma[i]);
|
||||
}
|
||||
|
||||
for (q = min_quant; q < max_quant + 2; q++) {
|
||||
@ -767,9 +774,8 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
bytestream_put_be16 (&buf, avctx->height);
|
||||
|
||||
frame_flags = ctx->chroma_factor << 6;
|
||||
if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
|
||||
if (avctx->flags & CODEC_FLAG_INTERLACED_DCT)
|
||||
frame_flags |= pic->top_field_first ? 0x04 : 0x08;
|
||||
}
|
||||
bytestream_put_byte (&buf, frame_flags);
|
||||
|
||||
bytestream_put_byte (&buf, 0); // reserved
|
||||
@ -791,7 +797,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
}
|
||||
bytestream_put_be16 (&tmp, buf - orig_buf); // write back frame header size
|
||||
|
||||
for (ctx->cur_picture_idx = 0; ctx->cur_picture_idx < ctx->pictures_per_frame; ++ctx->cur_picture_idx) {
|
||||
for (ctx->cur_picture_idx = 0;
|
||||
ctx->cur_picture_idx < ctx->pictures_per_frame;
|
||||
ctx->cur_picture_idx++) {
|
||||
// picture header
|
||||
picture_size_pos = buf + 1;
|
||||
bytestream_put_byte (&buf, 0x40); // picture header size (in bits)
|
||||
@ -845,7 +853,6 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
frame_size = buf - orig_buf;
|
||||
bytestream_put_be32(&orig_buf, frame_size);
|
||||
|
||||
|
||||
pkt->size = frame_size;
|
||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||
*got_packet = 1;
|
||||
@ -927,7 +934,8 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
if (!ctx->force_quant) {
|
||||
if (!ctx->bits_per_mb) {
|
||||
for (i = 0; i < NUM_MB_LIMITS - 1; i++)
|
||||
if (prores_mb_limits[i] >= ctx->mb_width * ctx->mb_height * ctx->pictures_per_frame)
|
||||
if (prores_mb_limits[i] >= ctx->mb_width * ctx->mb_height *
|
||||
ctx->pictures_per_frame)
|
||||
break;
|
||||
ctx->bits_per_mb = ctx->profile_info->br_tab[i];
|
||||
} else if (ctx->bits_per_mb < 128) {
|
||||
@ -991,12 +999,15 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
ctx->frame_size_upper_bound = ctx->pictures_per_frame *
|
||||
ctx->slices_per_picture *
|
||||
(2 + 2 * ctx->num_planes +
|
||||
(mps * ctx->bits_per_mb) / 8) + 200;
|
||||
(mps * ctx->bits_per_mb) / 8)
|
||||
+ 200;
|
||||
|
||||
avctx->codec_tag = ctx->profile_info->tag;
|
||||
|
||||
av_log(avctx, AV_LOG_DEBUG, "profile %d, %d slices/pic, %d pics/frame, %d bits per MB\n",
|
||||
ctx->profile, ctx->slices_per_picture, ctx->pictures_per_frame, ctx->bits_per_mb);
|
||||
av_log(avctx, AV_LOG_DEBUG,
|
||||
"profile %d, %d slices, interlacing: %s, %d bits per MB\n",
|
||||
ctx->profile, ctx->slices_per_picture * ctx->pictures_per_frame,
|
||||
interlaced ? "yes" : "no", ctx->bits_per_mb);
|
||||
av_log(avctx, AV_LOG_DEBUG, "frame size upper bound: %d\n",
|
||||
ctx->frame_size_upper_bound);
|
||||
|
||||
|
@ -40,6 +40,8 @@
|
||||
#include <process.h>
|
||||
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/internal.h"
|
||||
#include "libavutil/mem.h"
|
||||
|
||||
typedef struct {
|
||||
void *handle;
|
||||
|
@ -5,7 +5,7 @@ OBJS-$(CONFIG_XMM_CLOBBER_TEST) += x86/w64xmmtest.o
|
||||
|
||||
MMX-OBJS += x86/dsputil_mmx.o \
|
||||
x86/fdct_mmx.o \
|
||||
x86/fmtconvert_mmx.o \
|
||||
x86/fmtconvert_init.o \
|
||||
x86/idct_mmx_xvid.o \
|
||||
x86/idct_sse2_xvid.o \
|
||||
x86/motion_est_mmx.o \
|
||||
@ -13,15 +13,15 @@ MMX-OBJS += x86/dsputil_mmx.o \
|
||||
x86/simple_idct_mmx.o \
|
||||
|
||||
MMX-OBJS-$(CONFIG_AAC_DECODER) += x86/sbrdsp_init.o
|
||||
MMX-OBJS-$(CONFIG_AC3DSP) += x86/ac3dsp_mmx.o
|
||||
MMX-OBJS-$(CONFIG_AC3DSP) += x86/ac3dsp_init.o
|
||||
MMX-OBJS-$(CONFIG_CAVS_DECODER) += x86/cavsdsp_mmx.o
|
||||
MMX-OBJS-$(CONFIG_DNXHD_ENCODER) += x86/dnxhd_mmx.o
|
||||
MMX-OBJS-$(CONFIG_DWT) += x86/snowdsp_mmx.o \
|
||||
x86/dwt.o
|
||||
MMX-OBJS-$(CONFIG_ENCODERS) += x86/dsputilenc_mmx.o
|
||||
MMX-OBJS-$(CONFIG_FFT) += x86/fft.o
|
||||
MMX-OBJS-$(CONFIG_FFT) += x86/fft_init.o
|
||||
MMX-OBJS-$(CONFIG_GPL) += x86/idct_mmx.o
|
||||
MMX-OBJS-$(CONFIG_H264DSP) += x86/h264dsp_mmx.o
|
||||
MMX-OBJS-$(CONFIG_H264DSP) += x86/h264dsp_init.o
|
||||
MMX-OBJS-$(CONFIG_H264PRED) += x86/h264_intrapred_init.o
|
||||
MMX-OBJS-$(CONFIG_LPC) += x86/lpc_mmx.o
|
||||
MMX-OBJS-$(CONFIG_MPEGAUDIODSP) += x86/mpegaudiodec_mmx.o
|
||||
|
@ -360,13 +360,5 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
|
||||
block[0x3E] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F];
|
||||
}
|
||||
end:
|
||||
/*
|
||||
for(i=0; i<last_non_zero_p1; i++)
|
||||
{
|
||||
int j= zigzag_direct_noperm[i];
|
||||
block[block_permute_op(j)]= temp_block[j];
|
||||
}
|
||||
*/
|
||||
|
||||
return last_non_zero_p1 - 1;
|
||||
}
|
||||
|
@ -32,15 +32,6 @@
|
||||
#define HMAC_IPAD_VAL 0x36
|
||||
#define HMAC_OPAD_VAL 0x5C
|
||||
|
||||
/**
|
||||
* A non-zero transaction id requires the server to send back
|
||||
* a _result or _error response.
|
||||
* Setting it to 0 marks the message as a notification not
|
||||
* requiring feedback.
|
||||
*/
|
||||
|
||||
#define RTMP_NOTIFICATION 0
|
||||
|
||||
/**
|
||||
* emulated Flash client version - 9.0.124.2 on Linux
|
||||
* @{
|
||||
|
@ -91,7 +91,11 @@ typedef struct RTMPContext {
|
||||
int nb_invokes; ///< keeps track of invoke messages
|
||||
char* tcurl; ///< url of the target stream
|
||||
char* flashver; ///< version of the flash plugin
|
||||
char* swfhash; ///< SHA256 hash of the decompressed SWF file (32 bytes)
|
||||
int swfhash_len; ///< length of the SHA256 hash
|
||||
int swfsize; ///< size of the decompressed SWF file
|
||||
char* swfurl; ///< url of the swf player
|
||||
char swfverification[42]; ///< hash of the SWF verification
|
||||
char* pageurl; ///< url of the web page
|
||||
char* subscribe; ///< name of live stream to subscribe
|
||||
int server_bw; ///< server bandwidth
|
||||
@ -592,6 +596,27 @@ static int gen_pong(URLContext *s, RTMPContext *rt, RTMPPacket *ppkt)
|
||||
return rtmp_send_packet(rt, &pkt, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate SWF verification message and send it to the server.
|
||||
*/
|
||||
static int gen_swf_verification(URLContext *s, RTMPContext *rt)
|
||||
{
|
||||
RTMPPacket pkt;
|
||||
uint8_t *p;
|
||||
int ret;
|
||||
|
||||
av_log(s, AV_LOG_DEBUG, "Sending SWF verification...\n");
|
||||
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING,
|
||||
0, 44)) < 0)
|
||||
return ret;
|
||||
|
||||
p = pkt.data;
|
||||
bytestream_put_be16(&p, 27);
|
||||
memcpy(p, rt->swfverification, 42);
|
||||
|
||||
return rtmp_send_packet(rt, &pkt, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate server bandwidth message and send it to the server.
|
||||
*/
|
||||
@ -626,10 +651,10 @@ static int gen_check_bw(URLContext *s, RTMPContext *rt)
|
||||
|
||||
p = pkt.data;
|
||||
ff_amf_write_string(&p, "_checkbw");
|
||||
ff_amf_write_number(&p, RTMP_NOTIFICATION);
|
||||
ff_amf_write_number(&p, ++rt->nb_invokes);
|
||||
ff_amf_write_null(&p);
|
||||
|
||||
return rtmp_send_packet(rt, &pkt, 0);
|
||||
return rtmp_send_packet(rt, &pkt, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -776,6 +801,30 @@ static int rtmp_validate_digest(uint8_t *buf, int off)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rtmp_calc_swf_verification(URLContext *s, RTMPContext *rt,
|
||||
uint8_t *buf)
|
||||
{
|
||||
uint8_t *p;
|
||||
int ret;
|
||||
|
||||
if (rt->swfhash_len != 32) {
|
||||
av_log(s, AV_LOG_ERROR,
|
||||
"Hash of the decompressed SWF file is not 32 bytes long.\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
p = &rt->swfverification[0];
|
||||
bytestream_put_byte(&p, 1);
|
||||
bytestream_put_byte(&p, 1);
|
||||
bytestream_put_be32(&p, rt->swfsize);
|
||||
bytestream_put_be32(&p, rt->swfsize);
|
||||
|
||||
if ((ret = ff_rtmp_calc_digest(rt->swfhash, 32, 0, buf, 32, p)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform handshake with the server by means of exchanging pseudorandom data
|
||||
* signed with HMAC-SHA2 digest.
|
||||
@ -866,6 +915,14 @@ static int rtmp_handshake(URLContext *s, RTMPContext *rt)
|
||||
}
|
||||
}
|
||||
|
||||
/* Generate SWFVerification token (SHA256 HMAC hash of decompressed SWF,
|
||||
* key are the last 32 bytes of the server handshake. */
|
||||
if (rt->swfsize) {
|
||||
if ((ret = rtmp_calc_swf_verification(s, rt, serverdata + 1 +
|
||||
RTMP_HANDSHAKE_PACKET_SIZE - 32)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ff_rtmp_calc_digest(tosend + 1 + client_pos, 32, 0,
|
||||
rtmp_server_key, sizeof(rtmp_server_key),
|
||||
digest);
|
||||
@ -1001,6 +1058,13 @@ static int handle_ping(URLContext *s, RTMPPacket *pkt)
|
||||
if (t == 6) {
|
||||
if ((ret = gen_pong(s, rt, pkt)) < 0)
|
||||
return ret;
|
||||
} else if (t == 26) {
|
||||
if (rt->swfsize) {
|
||||
if ((ret = gen_swf_verification(s, rt)) < 0)
|
||||
return ret;
|
||||
} else {
|
||||
av_log(s, AV_LOG_WARNING, "Ignoring SWFVerification request.\n");
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1055,15 +1119,27 @@ static int handle_server_bw(URLContext *s, RTMPPacket *pkt)
|
||||
static int handle_invoke_error(URLContext *s, RTMPPacket *pkt)
|
||||
{
|
||||
const uint8_t *data_end = pkt->data + pkt->data_size;
|
||||
char *tracked_method = NULL;
|
||||
int level = AV_LOG_ERROR;
|
||||
uint8_t tmpstr[256];
|
||||
int ret;
|
||||
|
||||
if ((ret = find_tracked_method(s, pkt, 9, &tracked_method)) < 0)
|
||||
return ret;
|
||||
|
||||
if (!ff_amf_get_field_value(pkt->data + 9, data_end,
|
||||
"description", tmpstr, sizeof(tmpstr))) {
|
||||
av_log(s, AV_LOG_ERROR, "Server error: %s\n", tmpstr);
|
||||
return -1;
|
||||
if (tracked_method && !strcmp(tracked_method, "_checkbw")) {
|
||||
/* Ignore _checkbw errors. */
|
||||
level = AV_LOG_WARNING;
|
||||
ret = 0;
|
||||
} else
|
||||
ret = -1;
|
||||
av_log(s, level, "Server error: %s\n", tmpstr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
av_free(tracked_method);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int handle_invoke_result(URLContext *s, RTMPPacket *pkt)
|
||||
@ -1705,6 +1781,8 @@ static const AVOption rtmp_options[] = {
|
||||
{"rtmp_pageurl", "URL of the web page in which the media was embedded. By default no value will be sent.", OFFSET(pageurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
|
||||
{"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
|
||||
{"rtmp_subscribe", "Name of live stream to subscribe to. Defaults to rtmp_playpath.", OFFSET(subscribe), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
|
||||
{"rtmp_swfhash", "SHA256 hash of the decompressed SWF file (32 bytes).", OFFSET(swfhash), AV_OPT_TYPE_BINARY, .flags = DEC},
|
||||
{"rtmp_swfsize", "Size of the decompressed SWF file, required for SWFVerification.", OFFSET(swfsize), AV_OPT_TYPE_INT, {0}, 0, INT_MAX, DEC},
|
||||
{"rtmp_swfurl", "URL of the SWF player. By default no value will be sent", OFFSET(swfurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
|
||||
{"rtmp_tcurl", "URL of the target stream. Defaults to proto://host[:port]/app.", OFFSET(tcurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
|
||||
{ NULL },
|
||||
|
@ -31,7 +31,7 @@
|
||||
|
||||
#define LIBAVFORMAT_VERSION_MAJOR 54
|
||||
#define LIBAVFORMAT_VERSION_MINOR 23
|
||||
#define LIBAVFORMAT_VERSION_MICRO 100
|
||||
#define LIBAVFORMAT_VERSION_MICRO 101
|
||||
|
||||
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
|
||||
LIBAVFORMAT_VERSION_MINOR, \
|
||||
|
@ -335,7 +335,9 @@ int ff_audio_mix_init(AVAudioResampleContext *avr)
|
||||
avr->out_channel_layout,
|
||||
avr->center_mix_level,
|
||||
avr->surround_mix_level,
|
||||
avr->lfe_mix_level, 1, matrix_dbl,
|
||||
avr->lfe_mix_level,
|
||||
avr->normalize_mix_level,
|
||||
matrix_dbl,
|
||||
avr->in_channels,
|
||||
avr->matrix_encoding);
|
||||
if (ret < 0) {
|
||||
|
@ -45,6 +45,7 @@ struct AVAudioResampleContext {
|
||||
double center_mix_level; /**< center mix level */
|
||||
double surround_mix_level; /**< surround mix level */
|
||||
double lfe_mix_level; /**< lfe mix level */
|
||||
int normalize_mix_level; /**< enable mix level normalization */
|
||||
int force_resampling; /**< force resampling */
|
||||
int filter_size; /**< length of each FIR filter in the resampling filterbank relative to the cutoff frequency */
|
||||
int phase_shift; /**< log2 of the number of entries in the resampling polyphase filterbank */
|
||||
|
@ -47,6 +47,7 @@ static const AVOption options[] = {
|
||||
{ "center_mix_level", "Center Mix Level", OFFSET(center_mix_level), AV_OPT_TYPE_DOUBLE, { M_SQRT1_2 }, -32.0, 32.0, PARAM },
|
||||
{ "surround_mix_level", "Surround Mix Level", OFFSET(surround_mix_level), AV_OPT_TYPE_DOUBLE, { M_SQRT1_2 }, -32.0, 32.0, PARAM },
|
||||
{ "lfe_mix_level", "LFE Mix Level", OFFSET(lfe_mix_level), AV_OPT_TYPE_DOUBLE, { 0.0 }, -32.0, 32.0, PARAM },
|
||||
{ "normalize_mix_level", "Normalize Mix Level", OFFSET(normalize_mix_level), AV_OPT_TYPE_INT, { 1 }, 0, 1, PARAM },
|
||||
{ "force_resampling", "Force Resampling", OFFSET(force_resampling), AV_OPT_TYPE_INT, { 0 }, 0, 1, PARAM },
|
||||
{ "filter_size", "Resampling Filter Size", OFFSET(filter_size), AV_OPT_TYPE_INT, { 16 }, 0, 32, /* ??? */ PARAM },
|
||||
{ "phase_shift", "Resampling Phase Shift", OFFSET(phase_shift), AV_OPT_TYPE_INT, { 10 }, 0, 30, /* ??? */ PARAM },
|
||||
|
Loading…
x
Reference in New Issue
Block a user