2003-08-26 23:23:13 +03:00
/*
2006-10-23 11:57:54 +03:00
* MOV , 3 GP , MP4 muxer
2009-01-19 17:46:40 +02:00
* Copyright ( c ) 2003 Thomas Raivio
* Copyright ( c ) 2004 Gildas Bazin < gbazin at videolan dot org >
2009-03-15 12:53:12 +02:00
* Copyright ( c ) 2009 Baptiste Coudurier < baptiste dot coudurier at gmail dot com >
2003-08-26 23:23:13 +03:00
*
2006-10-07 18:30:46 +03:00
* This file is part of FFmpeg .
*
* FFmpeg is free software ; you can redistribute it and / or
2003-08-26 23:23:13 +03:00
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation ; either
2006-10-07 18:30:46 +03:00
* version 2.1 of the License , or ( at your option ) any later version .
2003-08-26 23:23:13 +03:00
*
2006-10-07 18:30:46 +03:00
* FFmpeg is distributed in the hope that it will be useful ,
2003-08-26 23:23:13 +03:00
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* Lesser General Public License for more details .
*
* You should have received a copy of the GNU Lesser General Public
2006-10-07 18:30:46 +03:00
* License along with FFmpeg ; if not , write to the Free Software
2006-01-13 00:43:26 +02:00
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2003-08-26 23:23:13 +03:00
*/
2009-03-15 12:49:52 +02:00
2013-11-23 23:32:55 +03:00
# include <stdint.h>
2014-04-01 21:19:59 +03:00
# include <inttypes.h>
2013-11-23 23:32:55 +03:00
2010-05-18 22:38:37 +03:00
# include "movenc.h"
2003-08-26 23:23:13 +03:00
# include "avformat.h"
2011-02-24 08:36:02 +02:00
# include "avio_internal.h"
2006-07-12 03:09:34 +03:00
# include "riff.h"
2003-08-26 23:23:13 +03:00
# include "avio.h"
2006-08-01 17:58:15 +03:00
# include "isom.h"
2008-01-11 03:24:55 +02:00
# include "avc.h"
2014-10-07 15:57:19 +03:00
# include "libavcodec/ac3_parser.h"
2009-04-13 19:20:26 +03:00
# include "libavcodec/get_bits.h"
2009-04-12 11:35:26 +03:00
# include "libavcodec/put_bits.h"
2014-09-01 23:57:24 +03:00
# include "libavcodec/vc1_common.h"
2014-04-12 00:35:11 +03:00
# include "libavcodec/raw.h"
2010-05-18 22:47:24 +03:00
# include "internal.h"
# include "libavutil/avstring.h"
2011-11-27 17:04:16 +03:00
# include "libavutil/intfloat.h"
2011-06-04 14:58:23 +03:00
# include "libavutil/mathematics.h"
2015-03-05 12:40:13 +02:00
# include "libavutil/libm.h"
2011-05-21 14:57:04 +03:00
# include "libavutil/opt.h"
2011-05-22 13:46:29 +03:00
# include "libavutil/dict.h"
2014-04-12 00:35:11 +03:00
# include "libavutil/pixdesc.h"
2014-09-04 21:00:01 +03:00
# include "libavutil/timecode.h"
2015-03-05 12:40:13 +02:00
# include "libavutil/color_utils.h"
2014-03-03 17:53:41 +03:00
# include "hevc.h"
2011-05-21 14:58:43 +03:00
# include "rtpenc.h"
2011-12-03 21:32:45 +03:00
# include "mov_chan.h"
2003-08-26 23:23:13 +03:00
2011-05-21 14:57:04 +03:00
static const AVOption options [ ] = {
2012-08-31 12:52:18 +03:00
{ " movflags " , " MOV muxer flags " , offsetof ( MOVMuxContext , flags ) , AV_OPT_TYPE_FLAGS , { . i64 = 0 } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
2012-08-31 12:45:52 +03:00
{ " rtphint " , " Add RTP hint tracks " , 0 , AV_OPT_TYPE_CONST , { . i64 = FF_MOV_FLAG_RTP_HINT } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
2012-09-05 15:26:01 +03:00
{ " moov_size " , " maximum moov size so it can be placed at the begin " , offsetof ( MOVMuxContext , reserved_moov_size ) , AV_OPT_TYPE_INT , { . i64 = 0 } , 0 , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , 0 } ,
2014-11-03 15:27:42 +02:00
{ " empty_moov " , " Make the initial moov atom empty " , 0 , AV_OPT_TYPE_CONST , { . i64 = FF_MOV_FLAG_EMPTY_MOOV } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
2012-08-31 12:45:52 +03:00
{ " frag_keyframe " , " Fragment at video keyframes " , 0 , AV_OPT_TYPE_CONST , { . i64 = FF_MOV_FLAG_FRAG_KEYFRAME } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
{ " separate_moof " , " Write separate moof/mdat atoms for each track " , 0 , AV_OPT_TYPE_CONST , { . i64 = FF_MOV_FLAG_SEPARATE_MOOF } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
{ " frag_custom " , " Flush fragments on caller requests " , 0 , AV_OPT_TYPE_CONST , { . i64 = FF_MOV_FLAG_FRAG_CUSTOM } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
{ " isml " , " Create a live smooth streaming feed (for pushing to a publishing point) " , 0 , AV_OPT_TYPE_CONST , { . i64 = FF_MOV_FLAG_ISML } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
2013-08-20 02:05:31 +03:00
{ " faststart " , " Run a second pass to put the index (moov atom) at the beginning of the file " , 0 , AV_OPT_TYPE_CONST , { . i64 = FF_MOV_FLAG_FASTSTART } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
2013-09-09 15:04:57 +03:00
{ " omit_tfhd_offset " , " Omit the base data offset in tfhd atoms " , 0 , AV_OPT_TYPE_CONST , { . i64 = FF_MOV_FLAG_OMIT_TFHD_OFFSET } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
2014-08-04 23:13:44 +03:00
{ " disable_chpl " , " Disable Nero chapter atom " , 0 , AV_OPT_TYPE_CONST , { . i64 = FF_MOV_FLAG_DISABLE_CHPL } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
2014-10-29 11:53:21 +02:00
{ " default_base_moof " , " Set the default-base-is-moof flag in tfhd atoms " , 0 , AV_OPT_TYPE_CONST , { . i64 = FF_MOV_FLAG_DEFAULT_BASE_MOOF } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
2014-10-05 23:51:47 +03:00
{ " dash " , " Write DASH compatible fragmented MP4 " , 0 , AV_OPT_TYPE_CONST , { . i64 = FF_MOV_FLAG_DASH } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
2014-11-20 09:51:05 +02:00
{ " frag_discont " , " Signal that the next fragment is discontinuous from earlier ones " , 0 , AV_OPT_TYPE_CONST , { . i64 = FF_MOV_FLAG_FRAG_DISCONT } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
{ " delay_moov " , " Delay writing the initial moov until the first fragment is cut, or until the first fragment flush " , 0 , AV_OPT_TYPE_CONST , { . i64 = FF_MOV_FLAG_DELAY_MOOV } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
2015-08-09 22:11:55 +02:00
{ " global_sidx " , " Write a global sidx index at the start of the file " , 0 , AV_OPT_TYPE_CONST , { . i64 = FF_MOV_FLAG_GLOBAL_SIDX } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
2015-03-04 23:36:23 +02:00
{ " write_colr " , " Write colr atom (Experimental, may be renamed or changed, do not use from scripts) " , 0 , AV_OPT_TYPE_CONST , { . i64 = FF_MOV_FLAG_WRITE_COLR } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
2015-03-05 12:40:13 +02:00
{ " write_gama " , " Write deprecated gama atom " , 0 , AV_OPT_TYPE_CONST , { . i64 = FF_MOV_FLAG_WRITE_GAMA } , INT_MIN , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM , " movflags " } ,
2012-07-21 17:13:48 +03:00
FF_RTP_FLAG_OPTS ( MOVMuxContext , rtp_flags ) ,
2015-11-21 23:05:07 +02:00
{ " skip_iods " , " Skip writing iods atom. " , offsetof ( MOVMuxContext , iods_skip ) , AV_OPT_TYPE_BOOL , { . i64 = 1 } , 0 , 1 , AV_OPT_FLAG_ENCODING_PARAM } ,
2012-08-31 13:22:31 +03:00
{ " iods_audio_profile " , " iods audio profile atom. " , offsetof ( MOVMuxContext , iods_audio_profile ) , AV_OPT_TYPE_INT , { . i64 = - 1 } , - 1 , 255 , AV_OPT_FLAG_ENCODING_PARAM } ,
{ " iods_video_profile " , " iods video profile atom. " , offsetof ( MOVMuxContext , iods_video_profile ) , AV_OPT_TYPE_INT , { . i64 = - 1 } , - 1 , 255 , AV_OPT_FLAG_ENCODING_PARAM } ,
{ " frag_duration " , " Maximum fragment duration " , offsetof ( MOVMuxContext , max_fragment_duration ) , AV_OPT_TYPE_INT , { . i64 = 0 } , 0 , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM } ,
{ " min_frag_duration " , " Minimum fragment duration " , offsetof ( MOVMuxContext , min_fragment_duration ) , AV_OPT_TYPE_INT , { . i64 = 0 } , 0 , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM } ,
{ " frag_size " , " Maximum fragment size " , offsetof ( MOVMuxContext , max_fragment_size ) , AV_OPT_TYPE_INT , { . i64 = 0 } , 0 , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM } ,
{ " ism_lookahead " , " Number of lookahead entries for ISM files " , offsetof ( MOVMuxContext , ism_lookahead ) , AV_OPT_TYPE_INT , { . i64 = 0 } , 0 , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM } ,
2013-05-17 00:35:05 +03:00
{ " video_track_timescale " , " set timescale of all video tracks " , offsetof ( MOVMuxContext , video_track_timescale ) , AV_OPT_TYPE_INT , { . i64 = 0 } , 0 , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM } ,
2014-03-18 16:11:00 +03:00
{ " brand " , " Override major brand " , offsetof ( MOVMuxContext , major_brand ) , AV_OPT_TYPE_STRING , { . str = NULL } , . flags = AV_OPT_FLAG_ENCODING_PARAM } ,
2015-11-21 23:05:07 +02:00
{ " use_editlist " , " use edit list " , offsetof ( MOVMuxContext , use_editlist ) , AV_OPT_TYPE_BOOL , { . i64 = - 1 } , - 1 , 1 , AV_OPT_FLAG_ENCODING_PARAM } ,
2015-01-20 16:33:12 +02:00
{ " fragment_index " , " Fragment number of the next fragment " , offsetof ( MOVMuxContext , fragments ) , AV_OPT_TYPE_INT , { . i64 = 1 } , 1 , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM } ,
2015-03-05 12:40:13 +02:00
{ " mov_gamma " , " gamma value for gama atom " , offsetof ( MOVMuxContext , gamma ) , AV_OPT_TYPE_FLOAT , { . dbl = 0.0 } , 0.0 , 10 , AV_OPT_FLAG_ENCODING_PARAM } ,
2013-09-10 15:35:41 +03:00
{ " frag_interleave " , " Interleave samples within fragments (max number of consecutive samples, lower is tighter interleaving, but with more overhead) " , offsetof ( MOVMuxContext , frag_interleave ) , AV_OPT_TYPE_INT , { . i64 = 0 } , 0 , INT_MAX , AV_OPT_FLAG_ENCODING_PARAM } ,
2016-03-10 19:42:27 +02:00
{ " encryption_scheme " , " Configures the encryption scheme, allowed values are none, cenc-aes-ctr " , offsetof ( MOVMuxContext , encryption_scheme_str ) , AV_OPT_TYPE_STRING , { . str = NULL } , . flags = AV_OPT_FLAG_ENCODING_PARAM } ,
{ " encryption_key " , " The media encryption key (hex) " , offsetof ( MOVMuxContext , encryption_key ) , AV_OPT_TYPE_BINARY , . flags = AV_OPT_FLAG_ENCODING_PARAM } ,
{ " encryption_kid " , " The media encryption key identifier (hex) " , offsetof ( MOVMuxContext , encryption_kid ) , AV_OPT_TYPE_BINARY , . flags = AV_OPT_FLAG_ENCODING_PARAM } ,
2011-05-21 14:57:04 +03:00
{ NULL } ,
} ;
2011-10-03 20:14:03 +03:00
# define MOV_CLASS(flavor)\
static const AVClass flavor # # _muxer_class = { \
. class_name = # flavor " muxer " , \
. item_name = av_default_item_name , \
. option = options , \
. version = LIBAVUTIL_VERSION_INT , \
2011-05-21 14:57:04 +03:00
} ;
2013-08-22 14:37:16 +03:00
static int get_moov_size ( AVFormatContext * s ) ;
2014-07-31 20:00:05 +03:00
static int utf8len ( const uint8_t * b )
{
int len = 0 ;
int val ;
while ( * b ) {
GET_UTF8 ( val , * b + + , return - 1 ; )
len + + ;
}
return len ;
}
2007-06-12 21:50:50 +03:00
//FIXME support 64 bit variant with wide placeholders
2012-01-30 17:19:15 +03:00
static int64_t update_size ( AVIOContext * pb , int64_t pos )
2003-08-26 23:23:13 +03:00
{
2011-03-03 21:11:45 +02:00
int64_t curpos = avio_tell ( pb ) ;
2011-02-28 15:57:54 +02:00
avio_seek ( pb , pos , SEEK_SET ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , curpos - pos ) ; /* rewrite size */
2011-02-28 15:57:54 +02:00
avio_seek ( pb , curpos , SEEK_SET ) ;
2003-09-10 02:03:04 +03:00
return curpos - pos ;
2003-08-26 23:23:13 +03:00
}
2013-08-20 17:46:09 +03:00
static int co64_required ( const MOVTrack * track )
2012-09-20 11:39:04 +03:00
{
2013-08-20 17:46:09 +03:00
if ( track - > entry > 0 & & track - > cluster [ track - > entry - 1 ] . pos + track - > data_offset > UINT32_MAX )
return 1 ;
2012-09-20 11:39:04 +03:00
return 0 ;
}
2003-11-03 23:51:07 +02:00
/* Chunk offset atom */
2011-02-20 12:04:12 +02:00
static int mov_write_stco_tag ( AVIOContext * pb , MOVTrack * track )
2003-08-26 23:23:13 +03:00
{
int i ;
2013-08-20 17:46:09 +03:00
int mode64 = co64_required ( track ) ; // use 32 bit size variant if possible
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2012-09-20 11:39:04 +03:00
if ( mode64 )
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " co64 " ) ;
2012-09-20 11:39:04 +03:00
else
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " stco " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* version & flags */
2011-11-29 06:03:22 +03:00
avio_wb32 ( pb , track - > chunkCount ) ; /* entry count */
2013-07-07 14:59:47 +03:00
for ( i = 0 ; i < track - > entry ; i + + ) {
2013-07-08 05:08:07 +03:00
if ( ! track - > cluster [ i ] . chunkNum )
2011-11-29 06:03:22 +03:00
continue ;
2013-07-07 14:59:47 +03:00
if ( mode64 = = 1 )
movenc: Buffer the mdat for the initial moov fragment, too
This allows writing QuickTime-compatible fragmented mp4 (with
a non-empty moov atom) to a non-seekable output.
This buffers the mdat for the initial fragment just as it does
for all normal fragments, too. Previously, the resulting
atom structure was mdat,moov, moof,mdat ..., while it now
is moov,mdat, moof,mdat.
Signed-off-by: Martin Storsjö <martin@martin.st>
2012-02-02 13:50:26 +03:00
avio_wb64 ( pb , track - > cluster [ i ] . pos + track - > data_offset ) ;
2006-01-23 16:12:03 +02:00
else
movenc: Buffer the mdat for the initial moov fragment, too
This allows writing QuickTime-compatible fragmented mp4 (with
a non-empty moov atom) to a non-seekable output.
This buffers the mdat for the initial fragment just as it does
for all normal fragments, too. Previously, the resulting
atom structure was mdat,moov, moof,mdat ..., while it now
is moov,mdat, moof,mdat.
Signed-off-by: Martin Storsjö <martin@martin.st>
2012-02-02 13:50:26 +03:00
avio_wb32 ( pb , track - > cluster [ i ] . pos + track - > data_offset ) ;
2003-08-26 23:23:13 +03:00
}
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2003-08-26 23:23:13 +03:00
}
2003-11-03 23:51:07 +02:00
/* Sample size atom */
2011-02-20 12:04:12 +02:00
static int mov_write_stsz_tag ( AVIOContext * pb , MOVTrack * track )
2003-08-26 23:23:13 +03:00
{
2003-09-29 00:09:32 +03:00
int equalChunks = 1 ;
2003-11-03 23:51:07 +02:00
int i , j , entries = 0 , tst = - 1 , oldtst = - 1 ;
2003-08-26 23:23:13 +03:00
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " stsz " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* version & flags */
2003-08-26 23:23:13 +03:00
2013-07-07 14:59:47 +03:00
for ( i = 0 ; i < track - > entry ; i + + ) {
tst = track - > cluster [ i ] . size / track - > cluster [ i ] . entries ;
if ( oldtst ! = - 1 & & tst ! = oldtst )
2003-11-03 23:51:07 +02:00
equalChunks = 0 ;
2003-09-29 00:09:32 +03:00
oldtst = tst ;
2006-06-24 21:09:20 +03:00
entries + = track - > cluster [ i ] . entries ;
2003-09-29 00:09:32 +03:00
}
2012-01-05 14:57:05 +03:00
if ( equalChunks & & track - > entry ) {
2013-07-07 14:59:47 +03:00
int sSize = track - > entry ? track - > cluster [ 0 ] . size / track - > cluster [ 0 ] . entries : 0 ;
2011-02-28 02:29:21 +02:00
sSize = FFMAX ( 1 , sSize ) ; // adpcm mono case could make sSize == 0
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , sSize ) ; // sample size
avio_wb32 ( pb , entries ) ; // sample count
2013-07-07 14:59:47 +03:00
} else {
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; // sample size
avio_wb32 ( pb , entries ) ; // sample count
2013-07-07 14:59:47 +03:00
for ( i = 0 ; i < track - > entry ; i + + ) {
for ( j = 0 ; j < track - > cluster [ i ] . entries ; j + + ) {
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , track - > cluster [ i ] . size /
2013-07-07 14:59:47 +03:00
track - > cluster [ i ] . entries ) ;
2003-11-03 23:51:07 +02:00
}
2003-08-26 23:23:13 +03:00
}
}
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2003-08-26 23:23:13 +03:00
}
2003-11-03 23:51:07 +02:00
/* Sample to chunk atom */
2011-02-20 12:04:12 +02:00
static int mov_write_stsc_tag ( AVIOContext * pb , MOVTrack * track )
2003-08-26 23:23:13 +03:00
{
2006-01-23 16:12:03 +02:00
int index = 0 , oldval = - 1 , i ;
2008-10-03 13:16:29 +03:00
int64_t entryPos , curpos ;
2003-09-29 00:09:32 +03:00
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " stsc " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; // version & flags
2011-03-03 21:11:45 +02:00
entryPos = avio_tell ( pb ) ;
2011-11-29 06:03:22 +03:00
avio_wb32 ( pb , track - > chunkCount ) ; // entry count
2013-07-07 14:59:47 +03:00
for ( i = 0 ; i < track - > entry ; i + + ) {
2013-07-08 05:08:07 +03:00
if ( oldval ! = track - > cluster [ i ] . samples_in_chunk & & track - > cluster [ i ] . chunkNum ) {
2011-11-29 06:03:22 +03:00
avio_wb32 ( pb , track - > cluster [ i ] . chunkNum ) ; // first chunk
2012-01-30 17:19:15 +03:00
avio_wb32 ( pb , track - > cluster [ i ] . samples_in_chunk ) ; // samples per chunk
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x1 ) ; // sample description index
2012-01-30 17:19:15 +03:00
oldval = track - > cluster [ i ] . samples_in_chunk ;
2003-09-29 00:09:32 +03:00
index + + ;
2003-08-26 23:23:13 +03:00
}
}
2011-03-03 21:11:45 +02:00
curpos = avio_tell ( pb ) ;
2011-02-28 15:57:54 +02:00
avio_seek ( pb , entryPos , SEEK_SET ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , index ) ; // rewrite size
2011-02-28 15:57:54 +02:00
avio_seek ( pb , curpos , SEEK_SET ) ;
2003-08-26 23:23:13 +03:00
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2003-08-26 23:23:13 +03:00
}
2003-11-03 23:51:07 +02:00
/* Sync sample atom */
2011-02-20 12:04:12 +02:00
static int mov_write_stss_tag ( AVIOContext * pb , MOVTrack * track , uint32_t flag )
2003-08-26 23:23:13 +03:00
{
2008-10-03 13:16:29 +03:00
int64_t curpos , entryPos ;
2006-01-23 16:12:03 +02:00
int i , index = 0 ;
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; // size
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , flag = = MOV_SYNC_SAMPLE ? " stss " : " stps " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; // version & flags
2011-03-03 21:11:45 +02:00
entryPos = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , track - > entry ) ; // entry count
2013-07-07 14:59:47 +03:00
for ( i = 0 ; i < track - > entry ; i + + ) {
2009-05-15 09:11:53 +03:00
if ( track - > cluster [ i ] . flags & flag ) {
2013-07-07 14:59:47 +03:00
avio_wb32 ( pb , i + 1 ) ;
2003-09-29 00:09:32 +03:00
index + + ;
}
}
2011-03-03 21:11:45 +02:00
curpos = avio_tell ( pb ) ;
2011-02-28 15:57:54 +02:00
avio_seek ( pb , entryPos , SEEK_SET ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , index ) ; // rewrite size
2011-02-28 15:57:54 +02:00
avio_seek ( pb , curpos , SEEK_SET ) ;
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2003-08-26 23:23:13 +03:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_amr_tag ( AVIOContext * pb , MOVTrack * track )
2003-08-26 23:23:13 +03:00
{
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x11 ) ; /* size */
2011-02-24 08:36:02 +02:00
if ( track - > mode = = MODE_MOV ) ffio_wfourcc ( pb , " samr " ) ;
else ffio_wfourcc ( pb , " damr " ) ;
ffio_wfourcc ( pb , " FFMP " ) ;
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , 0 ) ; /* decoder version */
2006-05-14 00:00:52 +03:00
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , 0x81FF ) ; /* Mode set (all modes for AMR_NB) */
avio_w8 ( pb , 0x00 ) ; /* Mode change period (no restriction) */
avio_w8 ( pb , 0x01 ) ; /* Frames per sample */
2006-05-14 00:00:52 +03:00
return 0x11 ;
}
2011-02-20 12:04:12 +02:00
static int mov_write_ac3_tag ( AVIOContext * pb , MOVTrack * track )
2008-09-03 22:05:22 +03:00
{
GetBitContext gbc ;
PutBitContext pbc ;
uint8_t buf [ 3 ] ;
int fscod , bsid , bsmod , acmod , lfeon , frmsizecod ;
2012-01-30 17:19:15 +03:00
if ( track - > vos_len < 7 )
2008-09-03 22:05:22 +03:00
return - 1 ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 11 ) ;
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " dac3 " ) ;
2008-09-03 22:05:22 +03:00
2012-01-30 17:19:15 +03:00
init_get_bits ( & gbc , track - > vos_data + 4 , ( track - > vos_len - 4 ) * 8 ) ;
2008-09-03 22:05:22 +03:00
fscod = get_bits ( & gbc , 2 ) ;
frmsizecod = get_bits ( & gbc , 6 ) ;
bsid = get_bits ( & gbc , 5 ) ;
bsmod = get_bits ( & gbc , 3 ) ;
acmod = get_bits ( & gbc , 3 ) ;
if ( acmod = = 2 ) {
skip_bits ( & gbc , 2 ) ; // dsurmod
} else {
if ( ( acmod & 1 ) & & acmod ! = 1 )
skip_bits ( & gbc , 2 ) ; // cmixlev
if ( acmod & 4 )
skip_bits ( & gbc , 2 ) ; // surmixlev
}
lfeon = get_bits1 ( & gbc ) ;
init_put_bits ( & pbc , buf , sizeof ( buf ) ) ;
put_bits ( & pbc , 2 , fscod ) ;
put_bits ( & pbc , 5 , bsid ) ;
put_bits ( & pbc , 3 , bsmod ) ;
put_bits ( & pbc , 3 , acmod ) ;
put_bits ( & pbc , 1 , lfeon ) ;
2013-07-07 14:59:47 +03:00
put_bits ( & pbc , 5 , frmsizecod > > 1 ) ; // bit_rate_code
2008-09-03 22:05:22 +03:00
put_bits ( & pbc , 5 , 0 ) ; // reserved
flush_put_bits ( & pbc ) ;
2011-02-21 20:28:17 +02:00
avio_write ( pb , buf , sizeof ( buf ) ) ;
2008-09-03 22:05:22 +03:00
return 11 ;
}
2014-10-07 15:57:19 +03:00
struct eac3_info {
2014-10-13 10:35:11 +03:00
AVPacket pkt ;
2014-10-07 15:57:19 +03:00
uint8_t ec3_done ;
2014-10-13 10:35:11 +03:00
uint8_t num_blocks ;
2014-10-07 15:57:19 +03:00
/* Layout of the EC3SpecificBox */
/* maximum bitrate */
uint16_t data_rate ;
/* number of independent substreams */
uint8_t num_ind_sub ;
struct {
/* sample rate code (see ff_ac3_sample_rate_tab) 2 bits */
uint8_t fscod ;
/* bit stream identification 5 bits */
uint8_t bsid ;
/* one bit reserved */
/* audio service mixing (not supported yet) 1 bit */
/* bit stream mode 3 bits */
uint8_t bsmod ;
/* audio coding mode 3 bits */
uint8_t acmod ;
/* sub woofer on 1 bit */
uint8_t lfeon ;
/* 3 bits reserved */
/* number of dependent substreams associated with this substream 4 bits */
uint8_t num_dep_sub ;
/* channel locations of the dependent substream(s), if any, 9 bits */
uint16_t chan_loc ;
/* if there is no dependent substream, then one bit reserved instead */
} substream [ 1 ] ; /* TODO: support 8 independent substreams */
} ;
2015-02-08 16:48:26 +02:00
# if CONFIG_AC3_PARSER
2014-10-07 15:57:19 +03:00
static int handle_eac3 ( MOVMuxContext * mov , AVPacket * pkt , MOVTrack * track )
{
GetBitContext gbc ;
AC3HeaderInfo tmp , * hdr = & tmp ;
struct eac3_info * info ;
int num_blocks ;
if ( ! track - > eac3_priv & & ! ( track - > eac3_priv = av_mallocz ( sizeof ( * info ) ) ) )
return AVERROR ( ENOMEM ) ;
info = track - > eac3_priv ;
init_get_bits ( & gbc , pkt - > data , pkt - > size * 8 ) ;
2015-10-17 00:23:32 +02:00
if ( avpriv_ac3_parse_header ( & gbc , & hdr ) < 0 ) {
2014-10-07 15:57:19 +03:00
/* drop the packets until we see a good one */
if ( ! track - > entry ) {
av_log ( mov , AV_LOG_WARNING , " Dropping invalid packet from start of the stream \n " ) ;
return 0 ;
}
return AVERROR_INVALIDDATA ;
}
info - > data_rate = FFMAX ( info - > data_rate , hdr - > bit_rate / 1000 ) ;
num_blocks = hdr - > num_blocks ;
if ( ! info - > ec3_done ) {
/* AC-3 substream must be the first one */
if ( hdr - > bitstream_id < = 10 & & hdr - > substreamid ! = 0 )
return AVERROR ( EINVAL ) ;
/* this should always be the case, given that our AC-3 parser
* concatenates dependent frames to their independent parent */
if ( hdr - > frame_type = = EAC3_FRAME_TYPE_INDEPENDENT ) {
/* substream ids must be incremental */
if ( hdr - > substreamid > info - > num_ind_sub + 1 )
return AVERROR ( EINVAL ) ;
if ( hdr - > substreamid = = info - > num_ind_sub + 1 ) {
//info->num_ind_sub++;
2016-04-10 21:58:15 +02:00
avpriv_request_sample ( track - > par , " Multiple independent substreams " ) ;
2014-10-07 15:57:19 +03:00
return AVERROR_PATCHWELCOME ;
} else if ( hdr - > substreamid < info - > num_ind_sub | |
hdr - > substreamid = = 0 & & info - > substream [ 0 ] . bsid ) {
info - > ec3_done = 1 ;
goto concatenate ;
}
}
/* fill the info needed for the "dec3" atom */
info - > substream [ hdr - > substreamid ] . fscod = hdr - > sr_code ;
info - > substream [ hdr - > substreamid ] . bsid = hdr - > bitstream_id ;
info - > substream [ hdr - > substreamid ] . bsmod = hdr - > bitstream_mode ;
info - > substream [ hdr - > substreamid ] . acmod = hdr - > channel_mode ;
info - > substream [ hdr - > substreamid ] . lfeon = hdr - > lfe_on ;
/* Parse dependent substream(s), if any */
if ( pkt - > size ! = hdr - > frame_size ) {
int cumul_size = hdr - > frame_size ;
int parent = hdr - > substreamid ;
while ( cumul_size ! = pkt - > size ) {
int i ;
init_get_bits ( & gbc , pkt - > data + cumul_size , ( pkt - > size - cumul_size ) * 8 ) ;
2015-10-17 00:23:32 +02:00
if ( avpriv_ac3_parse_header ( & gbc , & hdr ) < 0 )
2014-10-07 15:57:19 +03:00
return AVERROR_INVALIDDATA ;
if ( hdr - > frame_type ! = EAC3_FRAME_TYPE_DEPENDENT )
return AVERROR ( EINVAL ) ;
cumul_size + = hdr - > frame_size ;
info - > substream [ parent ] . num_dep_sub + + ;
/* header is parsed up to lfeon, but custom channel map may be needed */
/* skip bsid */
skip_bits ( & gbc , 5 ) ;
/* skip volume control params */
for ( i = 0 ; i < ( hdr - > channel_mode ? 1 : 2 ) ; i + + ) {
skip_bits ( & gbc , 5 ) ; // skip dialog normalization
if ( get_bits1 ( & gbc ) ) {
skip_bits ( & gbc , 8 ) ; // skip compression gain word
}
}
/* get the dependent stream channel map, if exists */
if ( get_bits1 ( & gbc ) )
info - > substream [ parent ] . chan_loc | = ( get_bits ( & gbc , 16 ) > > 5 ) & 0x1f ;
else
info - > substream [ parent ] . chan_loc | = hdr - > channel_mode ;
}
}
}
concatenate :
2014-10-13 10:35:11 +03:00
if ( ! info - > num_blocks & & num_blocks = = 6 )
return pkt - > size ;
else if ( info - > num_blocks + num_blocks > 6 )
return AVERROR_INVALIDDATA ;
if ( ! info - > num_blocks ) {
int ret ;
if ( ( ret = av_copy_packet ( & info - > pkt , pkt ) ) < 0 )
return ret ;
info - > num_blocks = num_blocks ;
return 0 ;
} else {
int ret ;
if ( ( ret = av_grow_packet ( & info - > pkt , pkt - > size ) ) < 0 )
return ret ;
memcpy ( info - > pkt . data + info - > pkt . size - pkt - > size , pkt - > data , pkt - > size ) ;
info - > num_blocks + = num_blocks ;
info - > pkt . duration + = pkt - > duration ;
if ( ( ret = av_copy_packet_side_data ( & info - > pkt , pkt ) ) < 0 )
return ret ;
if ( info - > num_blocks ! = 6 )
return 0 ;
2015-10-27 15:35:30 +02:00
av_packet_unref ( pkt ) ;
2014-10-13 10:35:11 +03:00
if ( ( ret = av_copy_packet ( pkt , & info - > pkt ) ) < 0 )
return ret ;
2015-10-27 15:35:30 +02:00
av_packet_unref ( & info - > pkt ) ;
2014-10-13 10:35:11 +03:00
info - > num_blocks = 0 ;
2014-10-07 15:57:19 +03:00
}
return pkt - > size ;
}
2015-02-08 16:48:26 +02:00
# endif
2014-10-07 15:57:19 +03:00
static int mov_write_eac3_tag ( AVIOContext * pb , MOVTrack * track )
{
PutBitContext pbc ;
uint8_t * buf ;
struct eac3_info * info ;
int size , i ;
if ( ! track - > eac3_priv )
return AVERROR ( EINVAL ) ;
info = track - > eac3_priv ;
size = 2 + 4 * ( info - > num_ind_sub + 1 ) ;
buf = av_malloc ( size ) ;
if ( ! buf ) {
2014-10-13 10:35:11 +03:00
size = AVERROR ( ENOMEM ) ;
goto end ;
2014-10-07 15:57:19 +03:00
}
init_put_bits ( & pbc , buf , size ) ;
put_bits ( & pbc , 13 , info - > data_rate ) ;
put_bits ( & pbc , 3 , info - > num_ind_sub ) ;
for ( i = 0 ; i < = info - > num_ind_sub ; i + + ) {
put_bits ( & pbc , 2 , info - > substream [ i ] . fscod ) ;
put_bits ( & pbc , 5 , info - > substream [ i ] . bsid ) ;
put_bits ( & pbc , 1 , 0 ) ; /* reserved */
put_bits ( & pbc , 1 , 0 ) ; /* asvc */
put_bits ( & pbc , 3 , info - > substream [ i ] . bsmod ) ;
put_bits ( & pbc , 3 , info - > substream [ i ] . acmod ) ;
put_bits ( & pbc , 1 , info - > substream [ i ] . lfeon ) ;
put_bits ( & pbc , 5 , 0 ) ; /* reserved */
put_bits ( & pbc , 4 , info - > substream [ i ] . num_dep_sub ) ;
if ( ! info - > substream [ i ] . num_dep_sub ) {
put_bits ( & pbc , 1 , 0 ) ; /* reserved */
size - - ;
} else {
put_bits ( & pbc , 9 , info - > substream [ i ] . chan_loc ) ;
}
}
flush_put_bits ( & pbc ) ;
avio_wb32 ( pb , size + 8 ) ;
ffio_wfourcc ( pb , " dec3 " ) ;
avio_write ( pb , buf , size ) ;
av_free ( buf ) ;
2014-10-13 10:35:11 +03:00
end :
2015-10-27 15:35:30 +02:00
av_packet_unref ( & info - > pkt ) ;
2014-10-07 15:57:19 +03:00
av_freep ( & track - > eac3_priv ) ;
return size ;
}
2008-04-24 16:59:39 +03:00
/**
* This function writes extradata " as is " .
2011-10-05 15:12:42 +03:00
* Extradata must be formatted like a valid atom ( with size and tag ) .
2008-04-24 16:59:39 +03:00
*/
2011-02-20 12:04:12 +02:00
static int mov_write_extradata_tag ( AVIOContext * pb , MOVTrack * track )
2008-04-24 16:59:39 +03:00
{
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
avio_write ( pb , track - > par - > extradata , track - > par - > extradata_size ) ;
return track - > par - > extradata_size ;
2008-04-24 16:59:39 +03:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_enda_tag ( AVIOContext * pb )
2006-05-13 23:05:02 +03:00
{
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 10 ) ;
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " enda " ) ;
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , 1 ) ; /* little endian */
2006-05-13 23:05:02 +03:00
return 10 ;
}
2012-10-02 13:18:04 +03:00
static int mov_write_enda_tag_be ( AVIOContext * pb )
{
avio_wb32 ( pb , 10 ) ;
ffio_wfourcc ( pb , " enda " ) ;
avio_wb16 ( pb , 0 ) ; /* big endian */
return 10 ;
}
2012-01-30 17:19:15 +03:00
static void put_descr ( AVIOContext * pb , int tag , unsigned int size )
2006-05-13 21:01:16 +03:00
{
2011-03-31 00:08:16 +03:00
int i = 3 ;
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , tag ) ;
2013-07-07 14:59:47 +03:00
for ( ; i > 0 ; i - - )
avio_w8 ( pb , ( size > > ( 7 * i ) ) | 0x80 ) ;
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , size & 0x7F ) ;
2006-05-13 21:01:16 +03:00
}
2011-04-17 02:19:10 +03:00
static unsigned compute_avg_bitrate ( MOVTrack * track )
{
uint64_t size = 0 ;
int i ;
2012-02-12 14:05:43 +03:00
if ( ! track - > track_duration )
return 0 ;
2011-04-17 02:19:10 +03:00
for ( i = 0 ; i < track - > entry ; i + + )
size + = track - > cluster [ i ] . size ;
2012-01-31 03:14:58 +03:00
return size * 8 * track - > timescale / track - > track_duration ;
2011-04-17 02:19:10 +03:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_esds_tag ( AVIOContext * pb , MOVTrack * track ) // Basic
2006-05-13 21:01:16 +03:00
{
2015-10-04 12:27:10 +02:00
AVCPBProperties * props ;
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2012-01-30 17:19:15 +03:00
int decoder_specific_info_len = track - > vos_len ? 5 + track - > vos_len : 0 ;
2011-04-17 02:19:10 +03:00
unsigned avg_bitrate ;
2006-05-13 21:01:16 +03:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; // size
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " esds " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; // Version
2006-05-13 21:01:16 +03:00
// ES descriptor
2012-01-30 17:19:15 +03:00
put_descr ( pb , 0x03 , 3 + 5 + 13 + decoder_specific_info_len + 5 + 1 ) ;
avio_wb16 ( pb , track - > track_id ) ;
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , 0x00 ) ; // flags (= no flags)
2006-05-13 21:01:16 +03:00
// DecoderConfig descriptor
2012-01-30 17:19:15 +03:00
put_descr ( pb , 0x04 , 13 + decoder_specific_info_len ) ;
2006-05-13 21:01:16 +03:00
// Object type indication
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( ( track - > par - > codec_id = = AV_CODEC_ID_MP2 | |
track - > par - > codec_id = = AV_CODEC_ID_MP3 ) & &
track - > par - > sample_rate > 24000 )
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , 0x6B ) ; // 11172-3
2008-09-02 23:48:45 +03:00
else
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
avio_w8 ( pb , ff_codec_get_tag ( ff_mp4_obj_type , track - > par - > codec_id ) ) ;
2006-05-13 21:01:16 +03:00
// the following fields is made of 6 bits to identify the streamtype (4 for video, 5 for audio)
// plus 1 bit to indicate upstream and 1 bit set to 1 (reserved)
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > codec_id = = AV_CODEC_ID_DVD_SUBTITLE )
2014-03-18 17:50:25 +03:00
avio_w8 ( pb , ( 0x38 < < 2 ) | 1 ) ; // flags (= NeroSubpicStream)
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > codec_type = = AVMEDIA_TYPE_AUDIO )
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , 0x15 ) ; // flags (= Audiostream)
2006-05-13 21:01:16 +03:00
else
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , 0x11 ) ; // flags (= Visualstream)
2006-05-13 21:01:16 +03:00
2015-10-04 12:27:10 +02:00
props = ( AVCPBProperties * ) av_stream_get_side_data ( track - > st , AV_PKT_DATA_CPB_PROPERTIES ,
NULL ) ;
2006-05-13 21:01:16 +03:00
2015-10-04 12:27:10 +02:00
avio_wb24 ( pb , props ? props - > buffer_size / 8 : 0 ) ; // Buffersize DB
2006-05-13 21:01:16 +03:00
2011-04-17 02:19:10 +03:00
avg_bitrate = compute_avg_bitrate ( track ) ;
2016-04-10 21:58:15 +02:00
avio_wb32 ( pb , props ? FFMAX3 ( props - > max_bitrate , props - > avg_bitrate , avg_bitrate ) : FFMAX ( track - > par - > bit_rate , avg_bitrate ) ) ; // maxbitrate (FIXME should be max rate in any 1 sec window)
2011-04-17 02:19:10 +03:00
avio_wb32 ( pb , avg_bitrate ) ;
2006-05-13 21:01:16 +03:00
2012-01-30 17:19:15 +03:00
if ( track - > vos_len ) {
2006-05-13 21:01:16 +03:00
// DecoderSpecific info descriptor
2012-01-30 17:19:15 +03:00
put_descr ( pb , 0x05 , track - > vos_len ) ;
avio_write ( pb , track - > vos_data , track - > vos_len ) ;
2006-05-13 21:01:16 +03:00
}
// SL descriptor
2012-01-30 17:19:15 +03:00
put_descr ( pb , 0x06 , 1 ) ;
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , 0x02 ) ;
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2006-05-13 21:01:16 +03:00
}
2012-08-08 00:57:21 +03:00
static int mov_pcm_le_gt16 ( enum AVCodecID codec_id )
2009-11-29 04:27:08 +02:00
{
2012-08-07 23:45:46 +03:00
return codec_id = = AV_CODEC_ID_PCM_S24LE | |
codec_id = = AV_CODEC_ID_PCM_S32LE | |
codec_id = = AV_CODEC_ID_PCM_F32LE | |
codec_id = = AV_CODEC_ID_PCM_F64LE ;
2009-11-29 04:27:08 +02:00
}
2012-10-02 13:18:04 +03:00
static int mov_pcm_be_gt16 ( enum AVCodecID codec_id )
{
return codec_id = = AV_CODEC_ID_PCM_S24BE | |
codec_id = = AV_CODEC_ID_PCM_S32BE | |
codec_id = = AV_CODEC_ID_PCM_F32BE | |
codec_id = = AV_CODEC_ID_PCM_F64BE ;
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
static int mov_write_ms_tag ( AVFormatContext * s , AVIOContext * pb , MOVTrack * track )
2011-01-20 23:14:12 +02:00
{
2014-05-10 04:36:04 +03:00
int ret ;
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ;
avio_wl32 ( pb , track - > tag ) ; // store it byteswapped
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
track - > par - > codec_tag = av_bswap16 ( track - > tag > > 16 ) ;
2016-04-10 21:58:15 +02:00
if ( ( ret = ff_put_wav_header ( s , pb , track - > par , 0 ) ) < 0 )
2014-05-10 04:36:04 +03:00
return ret ;
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2011-01-20 23:14:12 +02:00
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
static int mov_write_wfex_tag ( AVFormatContext * s , AVIOContext * pb , MOVTrack * track )
2012-01-23 15:58:38 +03:00
{
2014-05-10 04:36:04 +03:00
int ret ;
2012-01-23 15:58:38 +03:00
int64_t pos = avio_tell ( pb ) ;
avio_wb32 ( pb , 0 ) ;
ffio_wfourcc ( pb , " wfex " ) ;
2016-04-10 21:58:15 +02:00
if ( ( ret = ff_put_wav_header ( s , pb , track - > st - > codecpar , FF_PUT_WAV_HEADER_FORCE_WAVEFORMATEX ) ) < 0 )
2014-05-10 04:36:04 +03:00
return ret ;
2012-01-23 15:58:38 +03:00
return update_size ( pb , pos ) ;
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
static int mov_write_chan_tag ( AVFormatContext * s , AVIOContext * pb , MOVTrack * track )
2011-12-03 21:32:45 +03:00
{
uint32_t layout_tag , bitmap ;
int64_t pos = avio_tell ( pb ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
layout_tag = ff_mov_get_channel_layout_tag ( track - > par - > codec_id ,
track - > par - > channel_layout ,
2011-12-03 21:32:45 +03:00
& bitmap ) ;
if ( ! layout_tag ) {
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
av_log ( s , AV_LOG_WARNING , " not writing 'chan' tag due to "
2011-12-03 21:32:45 +03:00
" lack of channel information \n " ) ;
return 0 ;
}
2014-07-14 17:51:28 +03:00
if ( track - > multichannel_as_mono )
return 0 ;
2011-12-03 21:32:45 +03:00
avio_wb32 ( pb , 0 ) ; // Size
ffio_wfourcc ( pb , " chan " ) ; // Type
avio_w8 ( pb , 0 ) ; // Version
avio_wb24 ( pb , 0 ) ; // Flags
avio_wb32 ( pb , layout_tag ) ; // mChannelLayoutTag
avio_wb32 ( pb , bitmap ) ; // mChannelBitmap
avio_wb32 ( pb , 0 ) ; // mNumberChannelDescriptions
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2011-12-03 21:32:45 +03:00
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
static int mov_write_wave_tag ( AVFormatContext * s , AVIOContext * pb , MOVTrack * track )
2004-02-14 21:08:09 +02:00
{
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2004-02-14 21:08:09 +02:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " wave " ) ;
2004-02-14 21:08:09 +02:00
2016-04-10 21:58:15 +02:00
if ( track - > par - > codec_id ! = AV_CODEC_ID_QDM2 ) {
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 12 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " frma " ) ;
2011-02-21 20:28:17 +02:00
avio_wl32 ( pb , track - > tag ) ;
2012-08-06 01:37:28 +03:00
}
2004-02-14 21:08:09 +02:00
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > codec_id = = AV_CODEC_ID_AAC ) {
2006-06-19 14:28:28 +03:00
/* useless atom needed by mplayer, ipod, not needed by quicktime */
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 12 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " mp4a " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ;
2006-05-13 23:05:02 +03:00
mov_write_esds_tag ( pb , track ) ;
2016-04-10 21:58:15 +02:00
} else if ( mov_pcm_le_gt16 ( track - > par - > codec_id ) ) {
2012-10-02 13:18:04 +03:00
mov_write_enda_tag ( pb ) ;
2016-04-10 21:58:15 +02:00
} else if ( mov_pcm_be_gt16 ( track - > par - > codec_id ) ) {
2012-10-02 13:18:04 +03:00
mov_write_enda_tag_be ( pb ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( track - > par - > codec_id = = AV_CODEC_ID_AMR_NB ) {
2006-07-06 17:38:50 +03:00
mov_write_amr_tag ( pb , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( track - > par - > codec_id = = AV_CODEC_ID_AC3 ) {
2008-09-03 22:05:22 +03:00
mov_write_ac3_tag ( pb , track ) ;
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > codec_id = = AV_CODEC_ID_EAC3 ) {
2014-10-07 15:57:19 +03:00
mov_write_eac3_tag ( pb , track ) ;
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > codec_id = = AV_CODEC_ID_ALAC | |
track - > par - > codec_id = = AV_CODEC_ID_QDM2 ) {
2008-04-24 16:59:39 +03:00
mov_write_extradata_tag ( pb , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( track - > par - > codec_id = = AV_CODEC_ID_ADPCM_MS | |
track - > par - > codec_id = = AV_CODEC_ID_ADPCM_IMA_WAV ) {
mov_write_ms_tag ( s , pb , track ) ;
2006-05-13 23:05:02 +03:00
}
2004-02-14 21:08:09 +02:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 8 ) ; /* size */
avio_wb32 ( pb , 0 ) ; /* null tag */
2004-02-14 21:08:09 +02:00
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2004-02-14 21:08:09 +02:00
}
2012-01-21 03:16:34 +03:00
static int mov_write_dvc1_structs ( MOVTrack * track , uint8_t * buf )
{
uint8_t * unescaped ;
const uint8_t * start , * next , * end = track - > vos_data + track - > vos_len ;
int unescaped_size , seq_found = 0 ;
int level = 0 , interlace = 0 ;
int packet_seq = track - > vc1_info . packet_seq ;
int packet_entry = track - > vc1_info . packet_entry ;
int slices = track - > vc1_info . slices ;
PutBitContext pbc ;
if ( track - > start_dts = = AV_NOPTS_VALUE ) {
/* No packets written yet, vc1_info isn't authoritative yet. */
2014-12-29 13:32:21 +02:00
/* Assume inline sequence and entry headers. */
2012-01-21 03:16:34 +03:00
packet_seq = packet_entry = 1 ;
2014-12-29 13:32:21 +02:00
av_log ( NULL , AV_LOG_WARNING ,
" moov atom written before any packets, unable to write correct "
" dvc1 atom. Set the delay_moov flag to fix this. \n " ) ;
2012-01-21 03:16:34 +03:00
}
2015-06-29 23:48:34 +02:00
unescaped = av_mallocz ( track - > vos_len + AV_INPUT_BUFFER_PADDING_SIZE ) ;
2012-01-21 03:16:34 +03:00
if ( ! unescaped )
return AVERROR ( ENOMEM ) ;
start = find_next_marker ( track - > vos_data , end ) ;
for ( next = start ; next < end ; start = next ) {
GetBitContext gb ;
int size ;
next = find_next_marker ( start + 4 , end ) ;
size = next - start - 4 ;
if ( size < = 0 )
continue ;
unescaped_size = vc1_unescape_buffer ( start + 4 , size , unescaped ) ;
init_get_bits ( & gb , unescaped , 8 * unescaped_size ) ;
if ( AV_RB32 ( start ) = = VC1_CODE_SEQHDR ) {
int profile = get_bits ( & gb , 2 ) ;
if ( profile ! = PROFILE_ADVANCED ) {
av_free ( unescaped ) ;
return AVERROR ( ENOSYS ) ;
}
seq_found = 1 ;
level = get_bits ( & gb , 3 ) ;
/* chromaformat, frmrtq_postproc, bitrtq_postproc, postprocflag,
* width , height */
skip_bits_long ( & gb , 2 + 3 + 5 + 1 + 2 * 12 ) ;
skip_bits ( & gb , 1 ) ; /* broadcast */
interlace = get_bits1 ( & gb ) ;
skip_bits ( & gb , 4 ) ; /* tfcntrflag, finterpflag, reserved, psf */
}
}
if ( ! seq_found ) {
av_free ( unescaped ) ;
return AVERROR ( ENOSYS ) ;
}
init_put_bits ( & pbc , buf , 7 ) ;
/* VC1DecSpecStruc */
put_bits ( & pbc , 4 , 12 ) ; /* profile - advanced */
put_bits ( & pbc , 3 , level ) ;
put_bits ( & pbc , 1 , 0 ) ; /* reserved */
/* VC1AdvDecSpecStruc */
put_bits ( & pbc , 3 , level ) ;
put_bits ( & pbc , 1 , 0 ) ; /* cbr */
put_bits ( & pbc , 6 , 0 ) ; /* reserved */
put_bits ( & pbc , 1 , ! interlace ) ; /* no interlace */
put_bits ( & pbc , 1 , ! packet_seq ) ; /* no multiple seq */
put_bits ( & pbc , 1 , ! packet_entry ) ; /* no multiple entry */
put_bits ( & pbc , 1 , ! slices ) ; /* no slice code */
put_bits ( & pbc , 1 , 0 ) ; /* no bframe */
put_bits ( & pbc , 1 , 0 ) ; /* reserved */
2014-05-18 14:49:46 +03:00
/* framerate */
if ( track - > st - > avg_frame_rate . num > 0 & & track - > st - > avg_frame_rate . den > 0 )
put_bits32 ( & pbc , track - > st - > avg_frame_rate . num / track - > st - > avg_frame_rate . den ) ;
else
put_bits32 ( & pbc , 0xffffffff ) ;
2012-01-21 03:16:34 +03:00
flush_put_bits ( & pbc ) ;
av_free ( unescaped ) ;
return 0 ;
}
static int mov_write_dvc1_tag ( AVIOContext * pb , MOVTrack * track )
{
uint8_t buf [ 7 ] = { 0 } ;
int ret ;
if ( ( ret = mov_write_dvc1_structs ( track , buf ) ) < 0 )
return ret ;
avio_wb32 ( pb , track - > vos_len + 8 + sizeof ( buf ) ) ;
ffio_wfourcc ( pb , " dvc1 " ) ;
avio_write ( pb , buf , sizeof ( buf ) ) ;
avio_write ( pb , track - > vos_data , track - > vos_len ) ;
return 0 ;
}
2011-02-20 12:04:12 +02:00
static int mov_write_glbl_tag ( AVIOContext * pb , MOVTrack * track )
2007-12-19 18:00:08 +02:00
{
2012-01-30 17:19:15 +03:00
avio_wb32 ( pb , track - > vos_len + 8 ) ;
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " glbl " ) ;
2012-01-30 17:19:15 +03:00
avio_write ( pb , track - > vos_data , track - > vos_len ) ;
return 8 + track - > vos_len ;
2007-12-19 18:00:08 +02:00
}
2009-11-29 04:03:24 +02:00
/**
* Compute flags for ' lpcm ' tag .
* See CoreAudioTypes and AudioStreamBasicDescription at Apple .
*/
2012-08-05 12:11:04 +03:00
static int mov_get_lpcm_flags ( enum AVCodecID codec_id )
2009-11-29 04:03:24 +02:00
{
switch ( codec_id ) {
2012-08-05 12:11:04 +03:00
case AV_CODEC_ID_PCM_F32BE :
case AV_CODEC_ID_PCM_F64BE :
2009-11-29 04:03:24 +02:00
return 11 ;
2012-08-05 12:11:04 +03:00
case AV_CODEC_ID_PCM_F32LE :
case AV_CODEC_ID_PCM_F64LE :
2009-11-29 04:03:24 +02:00
return 9 ;
2012-08-05 12:11:04 +03:00
case AV_CODEC_ID_PCM_U8 :
2009-11-29 04:03:24 +02:00
return 10 ;
2012-08-05 12:11:04 +03:00
case AV_CODEC_ID_PCM_S16BE :
case AV_CODEC_ID_PCM_S24BE :
case AV_CODEC_ID_PCM_S32BE :
2009-11-29 04:03:24 +02:00
return 14 ;
2012-08-05 12:11:04 +03:00
case AV_CODEC_ID_PCM_S8 :
case AV_CODEC_ID_PCM_S16LE :
case AV_CODEC_ID_PCM_S24LE :
case AV_CODEC_ID_PCM_S32LE :
2009-11-29 04:03:24 +02:00
return 12 ;
default :
return 0 ;
}
}
2012-02-27 01:17:13 +03:00
static int get_cluster_duration ( MOVTrack * track , int cluster_idx )
{
int64_t next_dts ;
if ( cluster_idx > = track - > entry )
return 0 ;
if ( cluster_idx + 1 = = track - > entry )
next_dts = track - > track_duration + track - > start_dts ;
else
next_dts = track - > cluster [ cluster_idx + 1 ] . dts ;
2014-02-21 04:15:58 +03:00
next_dts - = track - > cluster [ cluster_idx ] . dts ;
av_assert0 ( next_dts > = 0 ) ;
av_assert0 ( next_dts < = INT_MAX ) ;
return next_dts ;
2012-02-27 01:17:13 +03:00
}
2012-02-27 00:25:46 +03:00
static int get_samples_per_packet ( MOVTrack * track )
{
int i , first_duration ;
2016-04-10 21:58:15 +02:00
// return track->par->frame_size;
2012-02-28 05:38:58 +03:00
2012-02-27 00:25:46 +03:00
/* use 1 for raw PCM */
if ( ! track - > audio_vbr )
return 1 ;
/* check to see if duration is constant for all clusters */
if ( ! track - > entry )
return 0 ;
first_duration = get_cluster_duration ( track , 0 ) ;
for ( i = 1 ; i < track - > entry ; i + + ) {
if ( get_cluster_duration ( track , i ) ! = first_duration )
return 0 ;
}
return first_duration ;
}
2016-04-10 21:58:15 +02:00
static int mov_write_audio_tag ( AVFormatContext * s , AVIOContext * pb , MOVMuxContext * mov , MOVTrack * track )
2003-08-26 23:23:13 +03:00
{
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2009-11-29 04:03:24 +02:00
int version = 0 ;
uint32_t tag = track - > tag ;
if ( track - > mode = = MODE_MOV ) {
2012-01-23 01:23:23 +03:00
if ( track - > timescale > UINT16_MAX ) {
2016-04-10 21:58:15 +02:00
if ( mov_get_lpcm_flags ( track - > par - > codec_id ) )
2012-01-23 01:23:23 +03:00
tag = AV_RL32 ( " lpcm " ) ;
version = 2 ;
2016-04-10 21:58:15 +02:00
} else if ( track - > audio_vbr | | mov_pcm_le_gt16 ( track - > par - > codec_id ) | |
mov_pcm_be_gt16 ( track - > par - > codec_id ) | |
track - > par - > codec_id = = AV_CODEC_ID_ADPCM_MS | |
track - > par - > codec_id = = AV_CODEC_ID_ADPCM_IMA_WAV | |
track - > par - > codec_id = = AV_CODEC_ID_QDM2 ) {
2012-01-23 01:23:23 +03:00
version = 1 ;
}
2009-11-29 04:03:24 +02:00
}
2005-12-17 20:14:38 +02:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2015-12-07 12:01:09 +02:00
if ( mov - > encryption_scheme ! = MOV_ENC_NONE ) {
ffio_wfourcc ( pb , " enca " ) ;
} else {
avio_wl32 ( pb , tag ) ; // store it byteswapped
}
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* Reserved */
avio_wb16 ( pb , 0 ) ; /* Reserved */
avio_wb16 ( pb , 1 ) ; /* Data-reference index, XXX == 1 */
2004-02-14 21:08:09 +02:00
2003-11-03 23:51:07 +02:00
/* SoundDescription */
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , version ) ; /* Version */
avio_wb16 ( pb , 0 ) ; /* Revision level */
avio_wb32 ( pb , 0 ) ; /* Reserved */
2003-08-26 23:23:13 +03:00
2009-11-29 04:03:24 +02:00
if ( version = = 2 ) {
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , 3 ) ;
avio_wb16 ( pb , 16 ) ;
avio_wb16 ( pb , 0xfffe ) ;
avio_wb16 ( pb , 0 ) ;
avio_wb32 ( pb , 0x00010000 ) ;
avio_wb32 ( pb , 72 ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
avio_wb64 ( pb , av_double2int ( track - > par - > sample_rate ) ) ;
avio_wb32 ( pb , track - > par - > channels ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x7F000000 ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
avio_wb32 ( pb , av_get_bits_per_sample ( track - > par - > codec_id ) ) ;
avio_wb32 ( pb , mov_get_lpcm_flags ( track - > par - > codec_id ) ) ;
2012-01-30 17:19:15 +03:00
avio_wb32 ( pb , track - > sample_size ) ;
2012-02-27 00:25:46 +03:00
avio_wb32 ( pb , get_samples_per_packet ( track ) ) ;
2009-11-29 04:03:24 +02:00
} else {
2012-01-23 01:23:23 +03:00
if ( track - > mode = = MODE_MOV ) {
2016-04-10 21:58:15 +02:00
avio_wb16 ( pb , track - > par - > channels ) ;
if ( track - > par - > codec_id = = AV_CODEC_ID_PCM_U8 | |
track - > par - > codec_id = = AV_CODEC_ID_PCM_S8 )
2012-01-23 01:23:23 +03:00
avio_wb16 ( pb , 8 ) ; /* bits per sample */
2016-04-10 21:58:15 +02:00
else if ( track - > par - > codec_id = = AV_CODEC_ID_ADPCM_G726 )
avio_wb16 ( pb , track - > par - > bits_per_coded_sample ) ;
2012-01-23 01:23:23 +03:00
else
avio_wb16 ( pb , 16 ) ;
avio_wb16 ( pb , track - > audio_vbr ? - 2 : 0 ) ; /* compression ID */
} else { /* reserved for mp4/3gp */
avio_wb16 ( pb , 2 ) ;
avio_wb16 ( pb , 16 ) ;
avio_wb16 ( pb , 0 ) ;
}
2004-07-28 12:40:59 +03:00
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , 0 ) ; /* packet size (= 0) */
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
avio_wb16 ( pb , track - > par - > sample_rate < = UINT16_MAX ?
track - > par - > sample_rate : 0 ) ;
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , 0 ) ; /* Reserved */
2009-11-29 04:03:24 +02:00
}
2003-08-26 23:23:13 +03:00
2013-07-08 05:08:07 +03:00
if ( version = = 1 ) { /* SoundDescription V1 extended info */
2016-04-10 21:58:15 +02:00
if ( mov_pcm_le_gt16 ( track - > par - > codec_id ) | |
mov_pcm_be_gt16 ( track - > par - > codec_id ) )
2012-10-05 11:33:12 +03:00
avio_wb32 ( pb , 1 ) ; /* must be 1 for uncompressed formats */
else
2016-04-10 21:58:15 +02:00
avio_wb32 ( pb , track - > par - > frame_size ) ; /* Samples per packet */
avio_wb32 ( pb , track - > sample_size / track - > par - > channels ) ; /* Bytes per packet */
2012-01-31 03:14:58 +03:00
avio_wb32 ( pb , track - > sample_size ) ; /* Bytes per frame */
2012-01-23 01:23:23 +03:00
avio_wb32 ( pb , 2 ) ; /* Bytes per sample */
}
2013-07-07 14:59:47 +03:00
if ( track - > mode = = MODE_MOV & &
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
( track - > par - > codec_id = = AV_CODEC_ID_AAC | |
track - > par - > codec_id = = AV_CODEC_ID_AC3 | |
2016-04-10 21:58:15 +02:00
track - > par - > codec_id = = AV_CODEC_ID_EAC3 | |
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
track - > par - > codec_id = = AV_CODEC_ID_AMR_NB | |
track - > par - > codec_id = = AV_CODEC_ID_ALAC | |
track - > par - > codec_id = = AV_CODEC_ID_ADPCM_MS | |
2016-04-10 21:58:15 +02:00
track - > par - > codec_id = = AV_CODEC_ID_ADPCM_IMA_WAV | |
track - > par - > codec_id = = AV_CODEC_ID_QDM2 | |
( mov_pcm_le_gt16 ( track - > par - > codec_id ) & & version = = 1 ) | |
( mov_pcm_be_gt16 ( track - > par - > codec_id ) & & version = = 1 ) ) )
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
mov_write_wave_tag ( s , pb , track ) ;
2013-07-07 14:59:47 +03:00
else if ( track - > tag = = MKTAG ( ' m ' , ' p ' , ' 4 ' , ' a ' ) )
2006-05-14 00:00:52 +03:00
mov_write_esds_tag ( pb , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > codec_id = = AV_CODEC_ID_AMR_NB )
2006-07-06 17:38:50 +03:00
mov_write_amr_tag ( pb , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > codec_id = = AV_CODEC_ID_AC3 )
2008-09-04 21:25:55 +03:00
mov_write_ac3_tag ( pb , track ) ;
2016-04-10 21:58:15 +02:00
else if ( track - > par - > codec_id = = AV_CODEC_ID_EAC3 )
2014-10-07 15:57:19 +03:00
mov_write_eac3_tag ( pb , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > codec_id = = AV_CODEC_ID_ALAC )
2008-06-11 11:17:38 +03:00
mov_write_extradata_tag ( pb , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > codec_id = = AV_CODEC_ID_WMAPRO )
mov_write_wfex_tag ( s , pb , track ) ;
2012-01-30 17:19:15 +03:00
else if ( track - > vos_len > 0 )
2007-12-19 18:00:08 +02:00
mov_write_glbl_tag ( pb , track ) ;
2006-05-14 00:00:52 +03:00
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > mode = = MODE_MOV & & track - > par - > codec_type = = AVMEDIA_TYPE_AUDIO )
mov_write_chan_tag ( s , pb , track ) ;
2012-05-31 03:51:12 +03:00
2015-12-07 12:01:09 +02:00
if ( mov - > encryption_scheme ! = MOV_ENC_NONE ) {
ff_mov_cenc_write_sinf_tag ( track , pb , mov - > encryption_kid ) ;
}
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2003-08-26 23:23:13 +03:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_d263_tag ( AVIOContext * pb )
2003-08-26 23:23:13 +03:00
{
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0xf ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " d263 " ) ;
ffio_wfourcc ( pb , " FFMP " ) ;
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , 0 ) ; /* decoder version */
2006-08-07 18:04:15 +03:00
/* FIXME use AVCodecContext level/profile, when encoder will set values */
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , 0xa ) ; /* level */
avio_w8 ( pb , 0 ) ; /* profile */
2003-08-26 23:23:13 +03:00
return 0xf ;
}
2011-02-20 12:04:12 +02:00
static int mov_write_avcc_tag ( AVIOContext * pb , MOVTrack * track )
2008-01-11 03:04:01 +02:00
{
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2008-01-11 03:04:01 +02:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ;
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " avcC " ) ;
2012-01-30 17:19:15 +03:00
ff_isom_write_avcc ( pb , track - > vos_data , track - > vos_len ) ;
return update_size ( pb , pos ) ;
2006-03-11 20:18:17 +02:00
}
2014-03-03 17:53:41 +03:00
static int mov_write_hvcc_tag ( AVIOContext * pb , MOVTrack * track )
{
int64_t pos = avio_tell ( pb ) ;
avio_wb32 ( pb , 0 ) ;
ffio_wfourcc ( pb , " hvcC " ) ;
ff_isom_write_hvcc ( pb , track - > vos_data , track - > vos_len , 0 ) ;
return update_size ( pb , pos ) ;
}
2007-10-08 14:27:18 +03:00
/* also used by all avid codecs (dv, imx, meridien) and their variants */
2011-02-20 12:04:12 +02:00
static int mov_write_avid_tag ( AVIOContext * pb , MOVTrack * track )
2007-10-08 14:27:18 +03:00
{
int i ;
2015-02-24 12:00:07 +02:00
int interlaced ;
int cid ;
if ( track - > vos_data & & track - > vos_len > 0x29 ) {
if ( track - > vos_data [ 0 ] = = 0x00 & &
track - > vos_data [ 1 ] = = 0x00 & &
track - > vos_data [ 2 ] = = 0x02 & &
track - > vos_data [ 3 ] = = 0x80 & &
( track - > vos_data [ 4 ] = = 0x01 | | track - > vos_data [ 4 ] = = 0x02 ) ) {
/* looks like a DNxHD bit stream */
interlaced = ( track - > vos_data [ 5 ] & 2 ) ;
cid = AV_RB32 ( track - > vos_data + 0x28 ) ;
} else {
av_log ( NULL , AV_LOG_WARNING , " Could not locate DNxHD bit stream in vos_data \n " ) ;
return 0 ;
}
} else {
av_log ( NULL , AV_LOG_WARNING , " Could not locate DNxHD bit stream, vos_data too small \n " ) ;
return 0 ;
}
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 24 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " ACLR " ) ;
ffio_wfourcc ( pb , " ACLR " ) ;
ffio_wfourcc ( pb , " 0001 " ) ;
2016-04-10 21:58:15 +02:00
if ( track - > par - > color_range = = AVCOL_RANGE_MPEG | | /* Legal range (16-235) */
track - > par - > color_range = = AVCOL_RANGE_UNSPECIFIED ) {
2014-07-09 00:09:57 +03:00
avio_wb32 ( pb , 1 ) ; /* Corresponds to 709 in official encoder */
} else { /* Full range (0-255) */
avio_wb32 ( pb , 2 ) ; /* Corresponds to RGB in official encoder */
}
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* unknown */
2007-10-08 14:27:18 +03:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 24 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " APRG " ) ;
ffio_wfourcc ( pb , " APRG " ) ;
ffio_wfourcc ( pb , " 0001 " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 1 ) ; /* unknown */
avio_wb32 ( pb , 0 ) ; /* unknown */
2007-10-08 14:27:18 +03:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 120 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " ARES " ) ;
ffio_wfourcc ( pb , " ARES " ) ;
ffio_wfourcc ( pb , " 0001 " ) ;
2015-02-24 12:00:07 +02:00
avio_wb32 ( pb , cid ) ; /* dnxhd cid, some id ? */
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
avio_wb32 ( pb , track - > par - > width ) ;
2007-10-08 14:27:18 +03:00
/* values below are based on samples created with quicktime and avid codecs */
2015-02-24 12:00:07 +02:00
if ( interlaced ) {
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
avio_wb32 ( pb , track - > par - > height / 2 ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 2 ) ; /* unknown */
avio_wb32 ( pb , 0 ) ; /* unknown */
avio_wb32 ( pb , 4 ) ; /* unknown */
2007-10-08 14:27:18 +03:00
} else {
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
avio_wb32 ( pb , track - > par - > height ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 1 ) ; /* unknown */
avio_wb32 ( pb , 0 ) ; /* unknown */
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > height = = 1080 )
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 5 ) ; /* unknown */
2007-12-03 12:44:25 +02:00
else
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 6 ) ; /* unknown */
2007-10-08 14:27:18 +03:00
}
/* padding */
for ( i = 0 ; i < 10 ; i + + )
2011-02-21 20:28:17 +02:00
avio_wb64 ( pb , 0 ) ;
2007-10-08 14:27:18 +03:00
return 0 ;
}
2015-01-11 13:11:36 +02:00
static int mov_write_dpxe_tag ( AVIOContext * pb , MOVTrack * track )
{
avio_wb32 ( pb , 12 ) ;
ffio_wfourcc ( pb , " DpxE " ) ;
2016-04-10 21:58:15 +02:00
if ( track - > par - > extradata_size > = 12 & &
! memcmp ( & track - > par - > extradata [ 4 ] , " DpxE " , 4 ) ) {
avio_wb32 ( pb , track - > par - > extradata [ 11 ] ) ;
2015-01-11 13:11:36 +02:00
} else {
avio_wb32 ( pb , 1 ) ;
}
return 0 ;
}
2009-04-08 01:19:58 +03:00
static int mp4_get_codec_tag ( AVFormatContext * s , MOVTrack * track )
2006-03-08 02:39:23 +02:00
{
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
int tag = track - > par - > codec_tag ;
2009-04-08 01:21:43 +03:00
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( ! ff_codec_get_tag ( ff_mp4_obj_type , track - > par - > codec_id ) )
2009-04-08 01:21:43 +03:00
return 0 ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > codec_id = = AV_CODEC_ID_H264 ) tag = MKTAG ( ' a ' , ' v ' , ' c ' , ' 1 ' ) ;
else if ( track - > par - > codec_id = = AV_CODEC_ID_HEVC ) tag = MKTAG ( ' h ' , ' e ' , ' v ' , ' 1 ' ) ;
else if ( track - > par - > codec_id = = AV_CODEC_ID_AC3 ) tag = MKTAG ( ' a ' , ' c ' , ' - ' , ' 3 ' ) ;
2016-04-10 21:58:15 +02:00
else if ( track - > par - > codec_id = = AV_CODEC_ID_EAC3 ) tag = MKTAG ( ' e ' , ' c ' , ' - ' , ' 3 ' ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > codec_id = = AV_CODEC_ID_DIRAC ) tag = MKTAG ( ' d ' , ' r ' , ' a ' , ' c ' ) ;
else if ( track - > par - > codec_id = = AV_CODEC_ID_MOV_TEXT ) tag = MKTAG ( ' t ' , ' x ' , ' 3 ' , ' g ' ) ;
else if ( track - > par - > codec_id = = AV_CODEC_ID_VC1 ) tag = MKTAG ( ' v ' , ' c ' , ' - ' , ' 1 ' ) ;
else if ( track - > par - > codec_type = = AVMEDIA_TYPE_VIDEO ) tag = MKTAG ( ' m ' , ' p ' , ' 4 ' , ' v ' ) ;
else if ( track - > par - > codec_type = = AVMEDIA_TYPE_AUDIO ) tag = MKTAG ( ' m ' , ' p ' , ' 4 ' , ' a ' ) ;
else if ( track - > par - > codec_id = = AV_CODEC_ID_DVD_SUBTITLE ) tag = MKTAG ( ' m ' , ' p ' , ' 4 ' , ' s ' ) ;
2009-04-08 01:19:58 +03:00
return tag ;
}
2009-04-08 01:34:05 +03:00
static const AVCodecTag codec_ipod_tags [ ] = {
2013-07-07 14:59:47 +03:00
{ AV_CODEC_ID_H264 , MKTAG ( ' a ' , ' v ' , ' c ' , ' 1 ' ) } ,
{ AV_CODEC_ID_MPEG4 , MKTAG ( ' m ' , ' p ' , ' 4 ' , ' v ' ) } ,
{ AV_CODEC_ID_AAC , MKTAG ( ' m ' , ' p ' , ' 4 ' , ' a ' ) } ,
{ AV_CODEC_ID_ALAC , MKTAG ( ' a ' , ' l ' , ' a ' , ' c ' ) } ,
{ AV_CODEC_ID_AC3 , MKTAG ( ' a ' , ' c ' , ' - ' , ' 3 ' ) } ,
2012-08-05 12:11:04 +03:00
{ AV_CODEC_ID_MOV_TEXT , MKTAG ( ' t ' , ' x ' , ' 3 ' , ' g ' ) } ,
{ AV_CODEC_ID_MOV_TEXT , MKTAG ( ' t ' , ' e ' , ' x ' , ' t ' ) } ,
{ AV_CODEC_ID_NONE , 0 } ,
2009-04-08 01:34:05 +03:00
} ;
2009-04-08 01:19:58 +03:00
static int ipod_get_codec_tag ( AVFormatContext * s , MOVTrack * track )
{
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
int tag = track - > par - > codec_tag ;
2009-04-08 01:19:58 +03:00
2009-04-08 01:26:47 +03:00
// keep original tag for subs, ipod supports both formats
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( ! ( track - > par - > codec_type = = AVMEDIA_TYPE_SUBTITLE & &
2013-07-07 14:59:47 +03:00
( tag = = MKTAG ( ' t ' , ' x ' , ' 3 ' , ' g ' ) | |
tag = = MKTAG ( ' t ' , ' e ' , ' x ' , ' t ' ) ) ) )
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
tag = ff_codec_get_tag ( codec_ipod_tags , track - > par - > codec_id ) ;
2009-04-08 01:31:17 +03:00
2014-07-30 05:19:15 +03:00
if ( ! av_match_ext ( s - > filename , " m4a " ) & &
! av_match_ext ( s - > filename , " m4b " ) & &
! av_match_ext ( s - > filename , " m4v " ) )
av_log ( s , AV_LOG_WARNING , " Warning, extension is not .m4a, .m4v nor .m4b "
2009-04-08 01:21:43 +03:00
" Quicktime/Ipod might not play the file \n " ) ;
2009-04-08 01:19:58 +03:00
return tag ;
}
static int mov_get_dv_codec_tag ( AVFormatContext * s , MOVTrack * track )
{
int tag ;
2016-04-10 21:58:15 +02:00
if ( track - > par - > width = = 720 ) { /* SD */
if ( track - > par - > height = = 480 ) { /* NTSC */
if ( track - > par - > format = = AV_PIX_FMT_YUV422P ) tag = MKTAG ( ' d ' , ' v ' , ' 5 ' , ' n ' ) ;
2013-07-07 14:59:47 +03:00
else tag = MKTAG ( ' d ' , ' v ' , ' c ' , ' ' ) ;
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > format = = AV_PIX_FMT_YUV422P ) tag = MKTAG ( ' d ' , ' v ' , ' 5 ' , ' p ' ) ;
else if ( track - > par - > format = = AV_PIX_FMT_YUV420P ) tag = MKTAG ( ' d ' , ' v ' , ' c ' , ' p ' ) ;
2013-07-07 14:59:47 +03:00
else tag = MKTAG ( ' d ' , ' v ' , ' p ' , ' p ' ) ;
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > height = = 720 ) { /* HD 720 line */
2014-05-18 13:12:59 +03:00
if ( track - > st - > time_base . den = = 50 ) tag = MKTAG ( ' d ' , ' v ' , ' h ' , ' q ' ) ;
2013-07-07 14:59:47 +03:00
else tag = MKTAG ( ' d ' , ' v ' , ' h ' , ' p ' ) ;
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > height = = 1080 ) { /* HD 1080 line */
2014-05-18 13:12:59 +03:00
if ( track - > st - > time_base . den = = 25 ) tag = MKTAG ( ' d ' , ' v ' , ' h ' , ' 5 ' ) ;
2013-07-07 14:59:47 +03:00
else tag = MKTAG ( ' d ' , ' v ' , ' h ' , ' 6 ' ) ;
2012-10-21 18:35:54 +03:00
} else {
2011-03-02 01:54:29 +02:00
av_log ( s , AV_LOG_ERROR , " unsupported height for dv codec \n " ) ;
return 0 ;
}
2009-04-08 01:19:58 +03:00
return tag ;
}
2013-10-07 18:35:13 +03:00
static AVRational find_fps ( AVFormatContext * s , AVStream * st )
{
2016-04-10 21:58:15 +02:00
AVRational rate = st - > avg_frame_rate ;
# if FF_API_LAVF_AVCTX
FF_DISABLE_DEPRECATION_WARNINGS
rate = av_inv_q ( st - > codec - > time_base ) ;
2013-10-07 18:35:13 +03:00
if ( av_timecode_check_frame_rate ( rate ) < 0 ) {
av_log ( s , AV_LOG_DEBUG , " timecode: tbc=%d/%d invalid, fallback on %d/%d \n " ,
rate . num , rate . den , st - > avg_frame_rate . num , st - > avg_frame_rate . den ) ;
rate = st - > avg_frame_rate ;
}
2016-04-10 21:58:15 +02:00
FF_ENABLE_DEPRECATION_WARNINGS
# endif
2013-10-07 18:35:13 +03:00
return rate ;
}
2013-10-07 18:36:23 +03:00
static int mov_get_mpeg2_xdcam_codec_tag ( AVFormatContext * s , MOVTrack * track )
{
2016-04-10 21:58:15 +02:00
int tag = track - > par - > codec_tag ;
int interlaced = track - > par - > field_order > AV_FIELD_PROGRESSIVE ;
2013-10-07 18:36:23 +03:00
AVStream * st = track - > st ;
int rate = av_q2d ( find_fps ( s , st ) ) ;
2014-01-27 20:57:52 +03:00
if ( ! tag )
tag = MKTAG ( ' m ' , ' 2 ' , ' v ' , ' 1 ' ) ; //fallback tag
2016-04-10 21:58:15 +02:00
if ( track - > par - > format = = AV_PIX_FMT_YUV420P ) {
if ( track - > par - > width = = 1280 & & track - > par - > height = = 720 ) {
2013-10-07 18:36:23 +03:00
if ( ! interlaced ) {
if ( rate = = 24 ) tag = MKTAG ( ' x ' , ' d ' , ' v ' , ' 4 ' ) ;
else if ( rate = = 25 ) tag = MKTAG ( ' x ' , ' d ' , ' v ' , ' 5 ' ) ;
else if ( rate = = 30 ) tag = MKTAG ( ' x ' , ' d ' , ' v ' , ' 1 ' ) ;
else if ( rate = = 50 ) tag = MKTAG ( ' x ' , ' d ' , ' v ' , ' a ' ) ;
else if ( rate = = 60 ) tag = MKTAG ( ' x ' , ' d ' , ' v ' , ' 9 ' ) ;
}
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > width = = 1440 & & track - > par - > height = = 1080 ) {
2013-10-07 18:36:23 +03:00
if ( ! interlaced ) {
if ( rate = = 24 ) tag = MKTAG ( ' x ' , ' d ' , ' v ' , ' 6 ' ) ;
else if ( rate = = 25 ) tag = MKTAG ( ' x ' , ' d ' , ' v ' , ' 7 ' ) ;
else if ( rate = = 30 ) tag = MKTAG ( ' x ' , ' d ' , ' v ' , ' 8 ' ) ;
} else {
if ( rate = = 25 ) tag = MKTAG ( ' x ' , ' d ' , ' v ' , ' 3 ' ) ;
else if ( rate = = 30 ) tag = MKTAG ( ' x ' , ' d ' , ' v ' , ' 2 ' ) ;
}
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > width = = 1920 & & track - > par - > height = = 1080 ) {
2013-10-07 18:36:23 +03:00
if ( ! interlaced ) {
if ( rate = = 24 ) tag = MKTAG ( ' x ' , ' d ' , ' v ' , ' d ' ) ;
else if ( rate = = 25 ) tag = MKTAG ( ' x ' , ' d ' , ' v ' , ' e ' ) ;
else if ( rate = = 30 ) tag = MKTAG ( ' x ' , ' d ' , ' v ' , ' f ' ) ;
} else {
if ( rate = = 25 ) tag = MKTAG ( ' x ' , ' d ' , ' v ' , ' c ' ) ;
else if ( rate = = 30 ) tag = MKTAG ( ' x ' , ' d ' , ' v ' , ' b ' ) ;
}
}
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > format = = AV_PIX_FMT_YUV422P ) {
if ( track - > par - > width = = 1280 & & track - > par - > height = = 720 ) {
2013-10-07 18:36:23 +03:00
if ( ! interlaced ) {
if ( rate = = 24 ) tag = MKTAG ( ' x ' , ' d ' , ' 5 ' , ' 4 ' ) ;
else if ( rate = = 25 ) tag = MKTAG ( ' x ' , ' d ' , ' 5 ' , ' 5 ' ) ;
else if ( rate = = 30 ) tag = MKTAG ( ' x ' , ' d ' , ' 5 ' , ' 1 ' ) ;
else if ( rate = = 50 ) tag = MKTAG ( ' x ' , ' d ' , ' 5 ' , ' a ' ) ;
else if ( rate = = 60 ) tag = MKTAG ( ' x ' , ' d ' , ' 5 ' , ' 9 ' ) ;
}
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > width = = 1920 & & track - > par - > height = = 1080 ) {
2013-10-07 18:36:23 +03:00
if ( ! interlaced ) {
if ( rate = = 24 ) tag = MKTAG ( ' x ' , ' d ' , ' 5 ' , ' d ' ) ;
else if ( rate = = 25 ) tag = MKTAG ( ' x ' , ' d ' , ' 5 ' , ' e ' ) ;
else if ( rate = = 30 ) tag = MKTAG ( ' x ' , ' d ' , ' 5 ' , ' f ' ) ;
} else {
if ( rate = = 25 ) tag = MKTAG ( ' x ' , ' d ' , ' 5 ' , ' c ' ) ;
else if ( rate = = 30 ) tag = MKTAG ( ' x ' , ' d ' , ' 5 ' , ' b ' ) ;
}
}
}
return tag ;
}
2014-09-28 02:18:43 +03:00
static int mov_get_h264_codec_tag ( AVFormatContext * s , MOVTrack * track )
{
2016-04-10 21:58:15 +02:00
int tag = track - > par - > codec_tag ;
int interlaced = track - > par - > field_order > AV_FIELD_PROGRESSIVE ;
2014-09-28 02:18:43 +03:00
AVStream * st = track - > st ;
int rate = av_q2d ( find_fps ( s , st ) ) ;
if ( ! tag )
tag = MKTAG ( ' a ' , ' v ' , ' c ' , ' i ' ) ; //fallback tag
2016-04-10 21:58:15 +02:00
if ( track - > par - > format = = AV_PIX_FMT_YUV420P10 ) {
if ( track - > par - > width = = 960 & & track - > par - > height = = 720 ) {
2014-09-28 02:18:43 +03:00
if ( ! interlaced ) {
if ( rate = = 24 ) tag = MKTAG ( ' a ' , ' i ' , ' 5 ' , ' p ' ) ;
else if ( rate = = 25 ) tag = MKTAG ( ' a ' , ' i ' , ' 5 ' , ' q ' ) ;
else if ( rate = = 30 ) tag = MKTAG ( ' a ' , ' i ' , ' 5 ' , ' p ' ) ;
else if ( rate = = 50 ) tag = MKTAG ( ' a ' , ' i ' , ' 5 ' , ' q ' ) ;
else if ( rate = = 60 ) tag = MKTAG ( ' a ' , ' i ' , ' 5 ' , ' p ' ) ;
}
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > width = = 1440 & & track - > par - > height = = 1080 ) {
2014-09-28 02:18:43 +03:00
if ( ! interlaced ) {
if ( rate = = 24 ) tag = MKTAG ( ' a ' , ' i ' , ' 5 ' , ' 3 ' ) ;
else if ( rate = = 25 ) tag = MKTAG ( ' a ' , ' i ' , ' 5 ' , ' 2 ' ) ;
else if ( rate = = 30 ) tag = MKTAG ( ' a ' , ' i ' , ' 5 ' , ' 3 ' ) ;
} else {
if ( rate = = 50 ) tag = MKTAG ( ' a ' , ' i ' , ' 5 ' , ' 5 ' ) ;
else if ( rate = = 60 ) tag = MKTAG ( ' a ' , ' i ' , ' 5 ' , ' 6 ' ) ;
}
}
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > format = = AV_PIX_FMT_YUV422P10 ) {
if ( track - > par - > width = = 1280 & & track - > par - > height = = 720 ) {
2014-09-28 02:18:43 +03:00
if ( ! interlaced ) {
if ( rate = = 24 ) tag = MKTAG ( ' a ' , ' i ' , ' 1 ' , ' p ' ) ;
else if ( rate = = 25 ) tag = MKTAG ( ' a ' , ' i ' , ' 1 ' , ' q ' ) ;
else if ( rate = = 30 ) tag = MKTAG ( ' a ' , ' i ' , ' 1 ' , ' p ' ) ;
else if ( rate = = 50 ) tag = MKTAG ( ' a ' , ' i ' , ' 1 ' , ' q ' ) ;
else if ( rate = = 60 ) tag = MKTAG ( ' a ' , ' i ' , ' 1 ' , ' p ' ) ;
}
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > width = = 1920 & & track - > par - > height = = 1080 ) {
2014-09-28 02:18:43 +03:00
if ( ! interlaced ) {
if ( rate = = 24 ) tag = MKTAG ( ' a ' , ' i ' , ' 1 ' , ' 3 ' ) ;
else if ( rate = = 25 ) tag = MKTAG ( ' a ' , ' i ' , ' 1 ' , ' 2 ' ) ;
else if ( rate = = 30 ) tag = MKTAG ( ' a ' , ' i ' , ' 1 ' , ' 3 ' ) ;
} else {
if ( rate = = 25 ) tag = MKTAG ( ' a ' , ' i ' , ' 1 ' , ' 5 ' ) ;
else if ( rate = = 50 ) tag = MKTAG ( ' a ' , ' i ' , ' 1 ' , ' 5 ' ) ;
else if ( rate = = 60 ) tag = MKTAG ( ' a ' , ' i ' , ' 1 ' , ' 6 ' ) ;
}
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > width = = 4096 & & track - > par - > height = = 2160
| | track - > par - > width = = 3840 & & track - > par - > height = = 2160
| | track - > par - > width = = 2048 & & track - > par - > height = = 1080 ) {
2014-12-03 00:35:40 +02:00
tag = MKTAG ( ' a ' , ' i ' , ' v ' , ' x ' ) ;
2014-09-28 02:18:43 +03:00
}
}
return tag ;
}
2009-04-08 01:34:05 +03:00
static const struct {
2012-10-06 13:10:34 +03:00
enum AVPixelFormat pix_fmt ;
2009-04-08 01:34:05 +03:00
uint32_t tag ;
unsigned bps ;
} mov_pix_fmt_tags [ ] = {
2012-10-08 21:54:00 +03:00
{ AV_PIX_FMT_YUYV422 , MKTAG ( ' y ' , ' u ' , ' v ' , ' 2 ' ) , 0 } ,
2012-10-06 13:10:34 +03:00
{ AV_PIX_FMT_YUYV422 , MKTAG ( ' y ' , ' u ' , ' v ' , ' s ' ) , 0 } ,
{ AV_PIX_FMT_UYVY422 , MKTAG ( ' 2 ' , ' v ' , ' u ' , ' y ' ) , 0 } ,
{ AV_PIX_FMT_RGB555BE , MKTAG ( ' r ' , ' a ' , ' w ' , ' ' ) , 16 } ,
{ AV_PIX_FMT_RGB555LE , MKTAG ( ' L ' , ' 5 ' , ' 5 ' , ' 5 ' ) , 16 } ,
{ AV_PIX_FMT_RGB565LE , MKTAG ( ' L ' , ' 5 ' , ' 6 ' , ' 5 ' ) , 16 } ,
{ AV_PIX_FMT_RGB565BE , MKTAG ( ' B ' , ' 5 ' , ' 6 ' , ' 5 ' ) , 16 } ,
{ AV_PIX_FMT_GRAY16BE , MKTAG ( ' b ' , ' 1 ' , ' 6 ' , ' g ' ) , 16 } ,
{ AV_PIX_FMT_RGB24 , MKTAG ( ' r ' , ' a ' , ' w ' , ' ' ) , 24 } ,
{ AV_PIX_FMT_BGR24 , MKTAG ( ' 2 ' , ' 4 ' , ' B ' , ' G ' ) , 24 } ,
{ AV_PIX_FMT_ARGB , MKTAG ( ' r ' , ' a ' , ' w ' , ' ' ) , 32 } ,
{ AV_PIX_FMT_BGRA , MKTAG ( ' B ' , ' G ' , ' R ' , ' A ' ) , 32 } ,
{ AV_PIX_FMT_RGBA , MKTAG ( ' R ' , ' G ' , ' B ' , ' A ' ) , 32 } ,
{ AV_PIX_FMT_ABGR , MKTAG ( ' A ' , ' B ' , ' G ' , ' R ' ) , 32 } ,
{ AV_PIX_FMT_RGB48BE , MKTAG ( ' b ' , ' 4 ' , ' 8 ' , ' r ' ) , 48 } ,
2009-04-08 01:34:05 +03:00
} ;
2009-04-08 01:19:58 +03:00
static int mov_get_rawvideo_codec_tag ( AVFormatContext * s , MOVTrack * track )
{
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
int tag = track - > par - > codec_tag ;
2009-04-08 01:21:43 +03:00
int i ;
2014-04-12 00:35:11 +03:00
enum AVPixelFormat pix_fmt ;
2009-04-08 01:21:43 +03:00
for ( i = 0 ; i < FF_ARRAY_ELEMS ( mov_pix_fmt_tags ) ; i + + ) {
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > format = = mov_pix_fmt_tags [ i ] . pix_fmt ) {
2009-04-08 01:21:43 +03:00
tag = mov_pix_fmt_tags [ i ] . tag ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
track - > par - > bits_per_coded_sample = mov_pix_fmt_tags [ i ] . bps ;
2016-04-10 21:58:15 +02:00
if ( track - > par - > codec_tag = = mov_pix_fmt_tags [ i ] . tag )
2012-10-06 21:19:05 +03:00
break ;
2009-04-08 01:21:43 +03:00
}
}
2009-04-08 01:19:58 +03:00
2014-04-12 00:35:11 +03:00
pix_fmt = avpriv_find_pix_fmt ( avpriv_pix_fmt_bps_mov ,
2016-04-10 21:58:15 +02:00
track - > par - > bits_per_coded_sample ) ;
2014-04-12 00:35:11 +03:00
if ( tag = = MKTAG ( ' r ' , ' a ' , ' w ' , ' ' ) & &
2016-04-10 21:58:15 +02:00
track - > par - > format ! = pix_fmt & &
track - > par - > format ! = AV_PIX_FMT_NONE )
2014-04-12 00:35:11 +03:00
av_log ( s , AV_LOG_ERROR , " %s rawvideo cannot be written to mov, output file will be unreadable \n " ,
2016-04-10 21:58:15 +02:00
av_get_pix_fmt_name ( track - > par - > format ) ) ;
2009-04-08 01:19:58 +03:00
return tag ;
}
static int mov_get_codec_tag ( AVFormatContext * s , MOVTrack * track )
{
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
int tag = track - > par - > codec_tag ;
2009-04-08 01:19:58 +03:00
2014-07-05 12:56:16 +03:00
if ( ! tag | | ( s - > strict_std_compliance > = FF_COMPLIANCE_NORMAL & &
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
( track - > par - > codec_id = = AV_CODEC_ID_DVVIDEO | |
track - > par - > codec_id = = AV_CODEC_ID_RAWVIDEO | |
track - > par - > codec_id = = AV_CODEC_ID_H263 | |
2016-04-10 21:58:15 +02:00
track - > par - > codec_id = = AV_CODEC_ID_H264 | |
track - > par - > codec_id = = AV_CODEC_ID_MPEG2VIDEO | |
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
av_get_bits_per_sample ( track - > par - > codec_id ) ) ) ) { // pcm audio
if ( track - > par - > codec_id = = AV_CODEC_ID_DVVIDEO )
2009-04-08 01:19:58 +03:00
tag = mov_get_dv_codec_tag ( s , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > codec_id = = AV_CODEC_ID_RAWVIDEO )
2009-04-08 01:19:58 +03:00
tag = mov_get_rawvideo_codec_tag ( s , track ) ;
2016-04-10 21:58:15 +02:00
else if ( track - > par - > codec_id = = AV_CODEC_ID_MPEG2VIDEO )
2013-10-07 18:36:23 +03:00
tag = mov_get_mpeg2_xdcam_codec_tag ( s , track ) ;
2016-04-10 21:58:15 +02:00
else if ( track - > par - > codec_id = = AV_CODEC_ID_H264 )
2014-09-28 02:18:43 +03:00
tag = mov_get_h264_codec_tag ( s , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > codec_type = = AVMEDIA_TYPE_VIDEO ) {
tag = ff_codec_get_tag ( ff_codec_movvideo_tags , track - > par - > codec_id ) ;
2009-04-08 01:31:53 +03:00
if ( ! tag ) { // if no mac fcc found, try with Microsoft tags
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
tag = ff_codec_get_tag ( ff_codec_bmp_tags , track - > par - > codec_id ) ;
2009-04-08 01:31:53 +03:00
if ( tag )
2011-08-11 17:33:03 +03:00
av_log ( s , AV_LOG_WARNING , " Using MS style video codec tag, "
2009-04-08 01:31:53 +03:00
" the file may be unplayable! \n " ) ;
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( track - > par - > codec_type = = AVMEDIA_TYPE_AUDIO ) {
tag = ff_codec_get_tag ( ff_codec_movaudio_tags , track - > par - > codec_id ) ;
2009-04-08 01:31:53 +03:00
if ( ! tag ) { // if no mac fcc found, try with Microsoft tags
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
int ms_tag = ff_codec_get_tag ( ff_codec_wav_tags , track - > par - > codec_id ) ;
2009-04-08 01:31:53 +03:00
if ( ms_tag ) {
tag = MKTAG ( ' m ' , ' s ' , ( ( ms_tag > > 8 ) & 0xff ) , ( ms_tag & 0xff ) ) ;
2011-08-11 17:33:03 +03:00
av_log ( s , AV_LOG_WARNING , " Using MS style audio codec tag, "
2009-04-08 01:31:53 +03:00
" the file may be unplayable! \n " ) ;
2008-01-28 17:23:37 +02:00
}
2009-04-08 01:31:53 +03:00
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( track - > par - > codec_type = = AVMEDIA_TYPE_SUBTITLE )
tag = ff_codec_get_tag ( ff_codec_movsubtitle_tags , track - > par - > codec_id ) ;
2008-01-28 17:22:07 +02:00
}
2009-04-08 01:21:43 +03:00
2006-03-08 02:39:23 +02:00
return tag ;
}
2009-04-08 01:34:05 +03:00
static const AVCodecTag codec_3gp_tags [ ] = {
2013-07-07 14:59:47 +03:00
{ AV_CODEC_ID_H263 , MKTAG ( ' s ' , ' 2 ' , ' 6 ' , ' 3 ' ) } ,
{ AV_CODEC_ID_H264 , MKTAG ( ' a ' , ' v ' , ' c ' , ' 1 ' ) } ,
{ AV_CODEC_ID_MPEG4 , MKTAG ( ' m ' , ' p ' , ' 4 ' , ' v ' ) } ,
{ AV_CODEC_ID_AAC , MKTAG ( ' m ' , ' p ' , ' 4 ' , ' a ' ) } ,
{ AV_CODEC_ID_AMR_NB , MKTAG ( ' s ' , ' a ' , ' m ' , ' r ' ) } ,
{ AV_CODEC_ID_AMR_WB , MKTAG ( ' s ' , ' a ' , ' w ' , ' b ' ) } ,
2012-08-05 12:11:04 +03:00
{ AV_CODEC_ID_MOV_TEXT , MKTAG ( ' t ' , ' x ' , ' 3 ' , ' g ' ) } ,
{ AV_CODEC_ID_NONE , 0 } ,
2009-04-08 01:34:05 +03:00
} ;
2012-09-17 17:16:16 +03:00
static const AVCodecTag codec_f4v_tags [ ] = { // XXX: add GIF/PNG/JPEG?
{ AV_CODEC_ID_MP3 , MKTAG ( ' . ' , ' m ' , ' p ' , ' 3 ' ) } ,
{ AV_CODEC_ID_AAC , MKTAG ( ' m ' , ' p ' , ' 4 ' , ' a ' ) } ,
{ AV_CODEC_ID_H264 , MKTAG ( ' a ' , ' v ' , ' c ' , ' 1 ' ) } ,
2012-09-17 17:16:16 +03:00
{ AV_CODEC_ID_VP6A , MKTAG ( ' V ' , ' P ' , ' 6 ' , ' A ' ) } ,
2012-09-17 17:16:16 +03:00
{ AV_CODEC_ID_VP6F , MKTAG ( ' V ' , ' P ' , ' 6 ' , ' F ' ) } ,
{ AV_CODEC_ID_NONE , 0 } ,
} ;
2009-04-08 01:19:58 +03:00
static int mov_find_codec_tag ( AVFormatContext * s , MOVTrack * track )
{
2012-04-13 03:02:24 +03:00
int tag ;
2009-04-08 01:19:58 +03:00
2012-01-23 15:58:38 +03:00
if ( track - > mode = = MODE_MP4 | | track - > mode = = MODE_PSP )
tag = mp4_get_codec_tag ( s , track ) ;
else if ( track - > mode = = MODE_ISM ) {
2009-04-08 01:19:58 +03:00
tag = mp4_get_codec_tag ( s , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( ! tag & & track - > par - > codec_id = = AV_CODEC_ID_WMAPRO )
2012-01-23 15:58:38 +03:00
tag = MKTAG ( ' w ' , ' m ' , ' a ' , ' ' ) ;
} else if ( track - > mode = = MODE_IPOD )
2009-04-08 01:19:58 +03:00
tag = ipod_get_codec_tag ( s , track ) ;
else if ( track - > mode & MODE_3GP )
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
tag = ff_codec_get_tag ( codec_3gp_tags , track - > par - > codec_id ) ;
2012-09-17 17:16:16 +03:00
else if ( track - > mode = = MODE_F4V )
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
tag = ff_codec_get_tag ( codec_f4v_tags , track - > par - > codec_id ) ;
2009-04-08 01:19:58 +03:00
else
tag = mov_get_codec_tag ( s , track ) ;
return tag ;
}
2008-03-16 15:36:36 +02:00
/** Write uuid atom.
* Needed to make file play in iPods running newest firmware
* goes after avcC atom in moov . trak . mdia . minf . stbl . stsd . avc1
*/
2011-02-20 12:04:12 +02:00
static int mov_write_uuid_tag_ipod ( AVIOContext * pb )
2008-03-16 15:36:36 +02:00
{
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 28 ) ;
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " uuid " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x6b6840f2 ) ;
avio_wb32 ( pb , 0x5f244fc5 ) ;
avio_wb32 ( pb , 0xba39a51b ) ;
avio_wb32 ( pb , 0xcf0323f3 ) ;
avio_wb32 ( pb , 0x0 ) ;
2008-03-16 15:36:36 +02:00
return 28 ;
}
2011-12-14 05:49:06 +03:00
static const uint16_t fiel_data [ ] = {
0x0000 , 0x0100 , 0x0201 , 0x0206 , 0x0209 , 0x020e
} ;
2016-04-10 21:58:15 +02:00
static int mov_write_fiel_tag ( AVIOContext * pb , MOVTrack * track , int field_order )
2011-12-14 05:49:06 +03:00
{
unsigned mov_field_order = 0 ;
2016-04-10 21:58:15 +02:00
if ( field_order < FF_ARRAY_ELEMS ( fiel_data ) )
mov_field_order = fiel_data [ field_order ] ;
2011-12-14 05:49:06 +03:00
else
return 0 ;
avio_wb32 ( pb , 10 ) ;
ffio_wfourcc ( pb , " fiel " ) ;
avio_wb16 ( pb , mov_field_order ) ;
return 10 ;
}
2011-02-20 12:04:12 +02:00
static int mov_write_subtitle_tag ( AVIOContext * pb , MOVTrack * track )
2009-01-11 12:26:44 +02:00
{
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
avio_wl32 ( pb , track - > tag ) ; // store it byteswapped
avio_wb32 ( pb , 0 ) ; /* Reserved */
avio_wb16 ( pb , 0 ) ; /* Reserved */
avio_wb16 ( pb , 1 ) ; /* Data-reference index */
2009-01-11 12:26:44 +02:00
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > codec_id = = AV_CODEC_ID_DVD_SUBTITLE )
2014-03-18 17:50:25 +03:00
mov_write_esds_tag ( pb , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > extradata_size )
avio_write ( pb , track - > par - > extradata , track - > par - > extradata_size ) ;
2009-01-11 12:26:44 +02:00
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2009-01-11 12:26:44 +02:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_pasp_tag ( AVIOContext * pb , MOVTrack * track )
2010-07-09 00:57:20 +03:00
{
AVRational sar ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
av_reduce ( & sar . num , & sar . den , track - > par - > sample_aspect_ratio . num ,
track - > par - > sample_aspect_ratio . den , INT_MAX ) ;
2010-07-09 00:57:20 +03:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 16 ) ;
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " pasp " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , sar . num ) ;
avio_wb32 ( pb , sar . den ) ;
2010-07-09 00:57:20 +03:00
return 16 ;
}
2015-03-05 12:40:13 +02:00
static int mov_write_gama_tag ( AVIOContext * pb , MOVTrack * track , double gamma )
{
uint32_t gama = 0 ;
if ( gamma < = 0.0 )
{
2016-04-10 21:58:15 +02:00
gamma = avpriv_get_gamma_from_trc ( track - > par - > color_trc ) ;
2015-03-05 12:40:13 +02:00
}
av_log ( pb , AV_LOG_DEBUG , " gamma value %g \n " , gamma ) ;
if ( gamma > 1e-6 ) {
gama = ( uint32_t ) lrint ( ( double ) ( 1 < < 16 ) * gamma ) ;
av_log ( pb , AV_LOG_DEBUG , " writing gama value %d \n " , gama ) ;
av_assert0 ( track - > mode = = MODE_MOV ) ;
avio_wb32 ( pb , 12 ) ;
ffio_wfourcc ( pb , " gama " ) ;
avio_wb32 ( pb , gama ) ;
return 12 ;
}
else {
av_log ( pb , AV_LOG_WARNING , " gamma value unknown, unable to write gama atom \n " ) ;
}
return 0 ;
}
2015-01-26 17:39:24 +02:00
static int mov_write_colr_tag ( AVIOContext * pb , MOVTrack * track )
{
2015-02-26 15:47:01 +02:00
// Ref (MOV): https://developer.apple.com/library/mac/technotes/tn2162/_index.html#//apple_ref/doc/uid/DTS40013070-CH1-TNTAG9
// Ref (MP4): ISO/IEC 14496-12:2012
2015-01-26 17:39:24 +02:00
2016-04-10 21:58:15 +02:00
if ( track - > par - > color_primaries = = AVCOL_PRI_UNSPECIFIED & &
track - > par - > color_trc = = AVCOL_TRC_UNSPECIFIED & &
track - > par - > color_space = = AVCOL_SPC_UNSPECIFIED ) {
if ( ( track - > par - > width > = 1920 & & track - > par - > height > = 1080 )
| | ( track - > par - > width = = 1280 & & track - > par - > height = = 720 ) ) {
2015-01-26 17:39:24 +02:00
av_log ( NULL , AV_LOG_WARNING , " color primaries unspecified, assuming bt709 \n " ) ;
2016-04-10 21:58:15 +02:00
track - > par - > color_primaries = AVCOL_PRI_BT709 ;
} else if ( track - > par - > width = = 720 & & track - > height = = 576 ) {
2015-01-26 17:39:24 +02:00
av_log ( NULL , AV_LOG_WARNING , " color primaries unspecified, assuming bt470bg \n " ) ;
2016-04-10 21:58:15 +02:00
track - > par - > color_primaries = AVCOL_PRI_BT470BG ;
} else if ( track - > par - > width = = 720 & &
2015-01-26 17:39:24 +02:00
( track - > height = = 486 | | track - > height = = 480 ) ) {
av_log ( NULL , AV_LOG_WARNING , " color primaries unspecified, assuming smpte170 \n " ) ;
2016-04-10 21:58:15 +02:00
track - > par - > color_primaries = AVCOL_PRI_SMPTE170M ;
2015-01-26 17:39:24 +02:00
} else {
av_log ( NULL , AV_LOG_WARNING , " color primaries unspecified, unable to assume anything \n " ) ;
}
2016-04-10 21:58:15 +02:00
switch ( track - > par - > color_primaries ) {
2015-01-26 17:39:24 +02:00
case AVCOL_PRI_BT709 :
2016-04-10 21:58:15 +02:00
track - > par - > color_trc = AVCOL_TRC_BT709 ;
track - > par - > color_space = AVCOL_SPC_BT709 ;
2015-01-26 17:39:24 +02:00
break ;
case AVCOL_PRI_SMPTE170M :
case AVCOL_PRI_BT470BG :
2016-04-10 21:58:15 +02:00
track - > par - > color_trc = AVCOL_TRC_BT709 ;
track - > par - > color_space = AVCOL_SPC_SMPTE170M ;
2015-01-26 17:39:24 +02:00
break ;
}
}
2015-02-26 15:47:01 +02:00
/* We should only ever be called by MOV or MP4. */
av_assert0 ( track - > mode = = MODE_MOV | | track - > mode = = MODE_MP4 ) ;
avio_wb32 ( pb , 18 + ( track - > mode = = MODE_MP4 ) ) ;
2015-01-26 17:39:24 +02:00
ffio_wfourcc ( pb , " colr " ) ;
2015-02-26 15:47:01 +02:00
if ( track - > mode = = MODE_MP4 )
ffio_wfourcc ( pb , " nclx " ) ;
else
ffio_wfourcc ( pb , " nclc " ) ;
2016-04-10 21:58:15 +02:00
switch ( track - > par - > color_primaries ) {
2015-01-26 17:39:24 +02:00
case AVCOL_PRI_BT709 : avio_wb16 ( pb , 1 ) ; break ;
case AVCOL_PRI_SMPTE170M :
case AVCOL_PRI_SMPTE240M : avio_wb16 ( pb , 6 ) ; break ;
case AVCOL_PRI_BT470BG : avio_wb16 ( pb , 5 ) ; break ;
default : avio_wb16 ( pb , 2 ) ;
}
2016-04-10 21:58:15 +02:00
switch ( track - > par - > color_trc ) {
2015-01-26 17:39:24 +02:00
case AVCOL_TRC_BT709 : avio_wb16 ( pb , 1 ) ; break ;
case AVCOL_TRC_SMPTE170M : avio_wb16 ( pb , 1 ) ; break ; // remapped
case AVCOL_TRC_SMPTE240M : avio_wb16 ( pb , 7 ) ; break ;
default : avio_wb16 ( pb , 2 ) ;
}
2016-04-10 21:58:15 +02:00
switch ( track - > par - > color_space ) {
2015-05-21 01:04:57 +02:00
case AVCOL_SPC_BT709 : avio_wb16 ( pb , 1 ) ; break ;
2015-05-21 00:44:34 +02:00
case AVCOL_SPC_BT470BG :
2015-05-21 01:04:57 +02:00
case AVCOL_SPC_SMPTE170M : avio_wb16 ( pb , 6 ) ; break ;
case AVCOL_SPC_SMPTE240M : avio_wb16 ( pb , 7 ) ; break ;
2015-01-26 17:39:24 +02:00
default : avio_wb16 ( pb , 2 ) ;
}
2015-02-26 15:47:01 +02:00
if ( track - > mode = = MODE_MP4 ) {
2016-04-10 21:58:15 +02:00
int full_range = track - > par - > color_range = = AVCOL_RANGE_JPEG ;
2015-02-26 15:47:01 +02:00
avio_w8 ( pb , full_range < < 7 ) ;
return 19 ;
} else {
return 18 ;
}
2015-01-26 17:39:24 +02:00
}
2013-10-07 19:15:25 +03:00
static void find_compressor ( char * compressor_name , int len , MOVTrack * track )
{
2014-04-29 18:40:57 +03:00
AVDictionaryEntry * encoder ;
2016-04-10 21:58:15 +02:00
int xdcam_res = ( track - > par - > width = = 1280 & & track - > par - > height = = 720 )
| | ( track - > par - > width = = 1440 & & track - > par - > height = = 1080 )
| | ( track - > par - > width = = 1920 & & track - > par - > height = = 1080 ) ;
2013-10-07 19:15:25 +03:00
2014-05-18 23:27:27 +03:00
if ( track - > mode = = MODE_MOV & &
( encoder = av_dict_get ( track - > st - > metadata , " encoder " , NULL , 0 ) ) ) {
av_strlcpy ( compressor_name , encoder - > value , 32 ) ;
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > codec_id = = AV_CODEC_ID_MPEG2VIDEO & & xdcam_res ) {
int interlaced = track - > par - > field_order > AV_FIELD_PROGRESSIVE ;
2013-10-07 19:15:25 +03:00
AVStream * st = track - > st ;
int rate = av_q2d ( find_fps ( NULL , st ) ) ;
av_strlcatf ( compressor_name , len , " XDCAM " ) ;
2016-04-10 21:58:15 +02:00
if ( track - > par - > format = = AV_PIX_FMT_YUV422P ) {
2013-10-07 19:15:25 +03:00
av_strlcatf ( compressor_name , len , " HD422 " ) ;
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > width = = 1440 ) {
2013-10-07 19:15:25 +03:00
av_strlcatf ( compressor_name , len , " HD " ) ;
} else
av_strlcatf ( compressor_name , len , " EX " ) ;
2016-04-10 21:58:15 +02:00
av_strlcatf ( compressor_name , len , " %d%c " , track - > par - > height , interlaced ? ' i ' : ' p ' ) ;
2013-10-07 19:15:25 +03:00
av_strlcatf ( compressor_name , len , " %d " , rate * ( interlaced + 1 ) ) ;
}
}
2015-01-26 17:39:24 +02:00
static int mov_write_video_tag ( AVIOContext * pb , MOVMuxContext * mov , MOVTrack * track )
2003-08-26 23:23:13 +03:00
{
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2011-06-06 15:13:02 +03:00
char compressor_name [ 32 ] = { 0 } ;
2015-02-16 12:40:36 +02:00
int avid = 0 ;
2004-04-07 15:47:33 +03:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2015-12-07 12:01:09 +02:00
if ( mov - > encryption_scheme ! = MOV_ENC_NONE ) {
ffio_wfourcc ( pb , " encv " ) ;
} else {
avio_wl32 ( pb , track - > tag ) ; // store it byteswapped
}
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* Reserved */
avio_wb16 ( pb , 0 ) ; /* Reserved */
avio_wb16 ( pb , 1 ) ; /* Data-reference index */
2003-09-29 00:09:32 +03:00
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , 0 ) ; /* Codec stream version */
avio_wb16 ( pb , 0 ) ; /* Codec stream revision (=0) */
2006-08-07 17:18:43 +03:00
if ( track - > mode = = MODE_MOV ) {
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " FFMP " ) ; /* Vendor */
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > codec_id = = AV_CODEC_ID_RAWVIDEO ) {
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* Temporal Quality */
avio_wb32 ( pb , 0x400 ) ; /* Spatial Quality = lossless*/
2006-08-07 17:19:33 +03:00
} else {
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x200 ) ; /* Temporal Quality = normal */
avio_wb32 ( pb , 0x200 ) ; /* Spatial Quality = normal */
2006-08-07 17:19:33 +03:00
}
2006-08-07 17:18:43 +03:00
} else {
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* Reserved */
avio_wb32 ( pb , 0 ) ; /* Reserved */
avio_wb32 ( pb , 0 ) ; /* Reserved */
2006-08-07 17:18:43 +03:00
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
avio_wb16 ( pb , track - > par - > width ) ; /* Video width */
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , track - > height ) ; /* Video height */
avio_wb32 ( pb , 0x00480000 ) ; /* Horizontal resolution 72dpi */
avio_wb32 ( pb , 0x00480000 ) ; /* Vertical resolution 72dpi */
avio_wb32 ( pb , 0 ) ; /* Data size (= 0) */
avio_wb16 ( pb , 1 ) ; /* Frame count (= 1) */
2005-12-17 20:14:38 +02:00
2006-08-07 17:18:43 +03:00
/* FIXME not sure, ISO 14496-1 draft where it shall be set to 0 */
2013-10-07 19:15:25 +03:00
find_compressor ( compressor_name , 32 , track ) ;
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , strlen ( compressor_name ) ) ;
avio_write ( pb , compressor_name , 31 ) ;
2005-12-17 20:14:38 +02:00
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > mode = = MODE_MOV & & track - > par - > bits_per_coded_sample )
2016-04-10 21:58:15 +02:00
avio_wb16 ( pb , track - > par - > bits_per_coded_sample |
( track - > par - > format = = AV_PIX_FMT_GRAY8 ? 0x20 : 0 ) ) ;
2008-01-31 13:31:56 +02:00
else
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , 0x18 ) ; /* Reserved */
2016-02-26 06:07:28 +02:00
2016-04-10 21:58:15 +02:00
if ( track - > mode = = MODE_MOV & & track - > par - > format = = AV_PIX_FMT_PAL8 ) {
int pal_size = 1 < < track - > par - > bits_per_coded_sample ;
2016-02-26 06:07:28 +02:00
int i ;
avio_wb16 ( pb , 0 ) ; /* Color table ID */
avio_wb32 ( pb , 0 ) ; /* Color table seed */
avio_wb16 ( pb , 0x8000 ) ; /* Color table flags */
2016-03-03 01:27:53 +02:00
avio_wb16 ( pb , pal_size - 1 ) ; /* Color table size (zero-relative) */
for ( i = 0 ; i < pal_size ; i + + ) {
uint32_t rgb = track - > palette [ i ] ;
2016-02-26 06:07:28 +02:00
uint16_t r = ( rgb > > 16 ) & 0xff ;
uint16_t g = ( rgb > > 8 ) & 0xff ;
uint16_t b = rgb & 0xff ;
avio_wb16 ( pb , 0 ) ;
avio_wb16 ( pb , ( r < < 8 ) | r ) ;
avio_wb16 ( pb , ( g < < 8 ) | g ) ;
avio_wb16 ( pb , ( b < < 8 ) | b ) ;
}
} else
avio_wb16 ( pb , 0xffff ) ; /* Reserved */
2013-07-07 14:59:47 +03:00
if ( track - > tag = = MKTAG ( ' m ' , ' p ' , ' 4 ' , ' v ' ) )
2003-09-29 00:09:32 +03:00
mov_write_esds_tag ( pb , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > codec_id = = AV_CODEC_ID_H263 )
2003-09-29 00:09:32 +03:00
mov_write_d263_tag ( pb ) ;
2016-04-10 21:58:15 +02:00
else if ( track - > par - > codec_id = = AV_CODEC_ID_AVUI | |
track - > par - > codec_id = = AV_CODEC_ID_SVQ3 ) {
2012-05-15 08:41:23 +03:00
mov_write_extradata_tag ( pb , track ) ;
2012-07-25 10:26:17 +03:00
avio_wb32 ( pb , 0 ) ;
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > codec_id = = AV_CODEC_ID_DNXHD ) {
2008-03-20 20:42:44 +02:00
mov_write_avid_tag ( pb , track ) ;
2015-02-16 12:40:36 +02:00
avid = 1 ;
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > codec_id = = AV_CODEC_ID_HEVC )
2014-03-03 17:53:41 +03:00
mov_write_hvcc_tag ( pb , track ) ;
2016-04-10 21:58:15 +02:00
else if ( track - > par - > codec_id = = AV_CODEC_ID_H264 & & ! TAG_IS_AVCI ( track - > tag ) ) {
2006-03-11 20:18:17 +02:00
mov_write_avcc_tag ( pb , track ) ;
2013-07-07 14:59:47 +03:00
if ( track - > mode = = MODE_IPOD )
2008-03-16 15:36:36 +02:00
mov_write_uuid_tag_ipod ( pb ) ;
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > codec_id = = AV_CODEC_ID_VC1 & & track - > vos_len > 0 )
2012-01-21 03:16:34 +03:00
mov_write_dvc1_tag ( pb , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > codec_id = = AV_CODEC_ID_VP6F | |
track - > par - > codec_id = = AV_CODEC_ID_VP6A ) {
2012-09-17 17:16:16 +03:00
/* Don't write any potential extradata here - the cropping
* is signalled via the normal width / height fields . */
2016-04-10 21:58:15 +02:00
} else if ( track - > par - > codec_id = = AV_CODEC_ID_R10K ) {
if ( track - > par - > codec_tag = = MKTAG ( ' R ' , ' 1 ' , ' 0 ' , ' k ' ) )
2015-01-11 13:11:36 +02:00
mov_write_dpxe_tag ( pb , track ) ;
2012-09-17 17:16:16 +03:00
} else if ( track - > vos_len > 0 )
2007-12-19 18:00:08 +02:00
mov_write_glbl_tag ( pb , track ) ;
2003-09-29 00:09:32 +03:00
2016-04-10 21:58:15 +02:00
if ( track - > par - > codec_id ! = AV_CODEC_ID_H264 & &
track - > par - > codec_id ! = AV_CODEC_ID_MPEG4 & &
track - > par - > codec_id ! = AV_CODEC_ID_DNXHD ) {
int field_order = track - > par - > field_order ;
# if FF_API_LAVF_AVCTX
FF_DISABLE_DEPRECATION_WARNINGS
if ( field_order ! = track - > st - > codec - > field_order & & track - > st - > codec - > field_order ! = AV_FIELD_UNKNOWN )
field_order = track - > st - > codec - > field_order ;
FF_ENABLE_DEPRECATION_WARNINGS
# endif
if ( field_order ! = AV_FIELD_UNKNOWN )
mov_write_fiel_tag ( pb , track , field_order ) ;
}
2013-02-14 17:08:37 +03:00
2015-03-05 12:40:13 +02:00
if ( mov - > flags & FF_MOV_FLAG_WRITE_GAMA ) {
if ( track - > mode = = MODE_MOV )
mov_write_gama_tag ( pb , track , mov - > gamma ) ;
else
av_log ( mov - > fc , AV_LOG_WARNING , " Not writing 'gama' atom. Format is not MOV. \n " ) ;
}
2015-02-26 15:47:01 +02:00
if ( mov - > flags & FF_MOV_FLAG_WRITE_COLR ) {
if ( track - > mode = = MODE_MOV | | track - > mode = = MODE_MP4 )
mov_write_colr_tag ( pb , track ) ;
else
av_log ( mov - > fc , AV_LOG_WARNING , " Not writing 'colr' atom. Format is not MOV or MP4. \n " ) ;
}
2015-01-26 17:39:24 +02:00
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > sample_aspect_ratio . den & & track - > par - > sample_aspect_ratio . num & &
track - > par - > sample_aspect_ratio . den ! = track - > par - > sample_aspect_ratio . num ) {
2010-07-09 00:57:20 +03:00
mov_write_pasp_tag ( pb , track ) ;
}
2015-12-07 12:01:09 +02:00
if ( mov - > encryption_scheme ! = MOV_ENC_NONE ) {
ff_mov_cenc_write_sinf_tag ( track , pb , mov - > encryption_kid ) ;
}
2015-02-16 12:40:36 +02:00
/* extra padding for avid stsd */
/* https://developer.apple.com/library/mac/documentation/QuickTime/QTFF/QTFFChap2/qtff2.html#//apple_ref/doc/uid/TP40000939-CH204-61112 */
if ( avid )
avio_wb32 ( pb , 0 ) ;
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2003-08-26 23:23:13 +03:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_rtp_tag ( AVIOContext * pb , MOVTrack * track )
2010-05-18 22:47:24 +03:00
{
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " rtp " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* Reserved */
avio_wb16 ( pb , 0 ) ; /* Reserved */
avio_wb16 ( pb , 1 ) ; /* Data-reference index */
2010-05-18 22:47:24 +03:00
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , 1 ) ; /* Hint track version */
avio_wb16 ( pb , 1 ) ; /* Highest compatible version */
avio_wb32 ( pb , track - > max_packet_size ) ; /* Max packet size */
2010-05-18 22:47:24 +03:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 12 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " tims " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , track - > timescale ) ;
2010-05-18 22:47:24 +03:00
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2010-05-18 22:47:24 +03:00
}
2014-08-10 22:01:33 +03:00
static int mov_write_source_reference_tag ( AVIOContext * pb , MOVTrack * track , const char * reel_name )
{
uint64_t str_size = strlen ( reel_name ) ;
2014-08-08 20:37:06 +03:00
int64_t pos = avio_tell ( pb ) ;
2014-08-10 22:01:33 +03:00
if ( str_size > = UINT16_MAX ) {
2014-08-11 01:09:09 +03:00
av_log ( NULL , AV_LOG_ERROR , " reel_name length % " PRIu64 " is too large \n " , str_size ) ;
2014-08-10 22:01:33 +03:00
avio_wb16 ( pb , 0 ) ;
return AVERROR ( EINVAL ) ;
}
2014-08-08 20:37:06 +03:00
avio_wb32 ( pb , 0 ) ; /* size */
ffio_wfourcc ( pb , " name " ) ; /* Data format */
2014-08-10 22:01:33 +03:00
avio_wb16 ( pb , str_size ) ; /* string size */
2014-08-08 20:37:06 +03:00
avio_wb16 ( pb , track - > language ) ; /* langcode */
2014-08-10 22:01:33 +03:00
avio_write ( pb , reel_name , str_size ) ; /* reel name */
2014-08-08 20:37:06 +03:00
return update_size ( pb , pos ) ;
}
2012-03-05 10:51:08 +03:00
static int mov_write_tmcd_tag ( AVIOContext * pb , MOVTrack * track )
{
int64_t pos = avio_tell ( pb ) ;
2013-08-26 15:31:07 +03:00
# if 1
2016-04-10 21:58:15 +02:00
int frame_duration ;
int nb_frames ;
2014-08-08 20:37:06 +03:00
AVDictionaryEntry * t = NULL ;
2012-03-05 10:51:08 +03:00
2016-04-10 21:58:15 +02:00
if ( ! track - > st - > avg_frame_rate . num | | ! track - > st - > avg_frame_rate . den ) {
# if FF_API_LAVF_AVCTX
FF_DISABLE_DEPRECATION_WARNINGS
frame_duration = av_rescale ( track - > timescale , track - > st - > codec - > time_base . num , track - > st - > codec - > time_base . den ) ;
nb_frames = ROUNDED_DIV ( track - > st - > codec - > time_base . den , track - > st - > codec - > time_base . num ) ;
FF_ENABLE_DEPRECATION_WARNINGS
# else
av_log ( NULL , AV_LOG_ERROR , " avg_frame_rate not set for tmcd track. \n " ) ;
return AVERROR ( EINVAL ) ;
# endif
} else {
frame_duration = av_rescale ( track - > timescale , track - > st - > avg_frame_rate . num , track - > st - > avg_frame_rate . den ) ;
nb_frames = ROUNDED_DIV ( track - > st - > avg_frame_rate . den , track - > st - > avg_frame_rate . num ) ;
}
2013-01-17 20:52:35 +03:00
if ( nb_frames > 255 ) {
av_log ( NULL , AV_LOG_ERROR , " fps %d is too large \n " , nb_frames ) ;
return AVERROR ( EINVAL ) ;
}
2012-03-05 10:51:08 +03:00
avio_wb32 ( pb , 0 ) ; /* size */
ffio_wfourcc ( pb , " tmcd " ) ; /* Data format */
avio_wb32 ( pb , 0 ) ; /* Reserved */
avio_wb32 ( pb , 1 ) ; /* Data reference index */
avio_wb32 ( pb , 0 ) ; /* Flags */
avio_wb32 ( pb , track - > timecode_flags ) ; /* Flags (timecode) */
avio_wb32 ( pb , track - > timescale ) ; /* Timescale */
avio_wb32 ( pb , frame_duration ) ; /* Frame duration */
avio_w8 ( pb , nb_frames ) ; /* Number of frames */
2014-08-08 20:37:06 +03:00
avio_w8 ( pb , 0 ) ; /* Reserved */
2016-05-30 02:17:32 +02:00
t = av_dict_get ( track - > st - > metadata , " reel_name " , NULL , 0 ) ;
2016-01-21 08:08:09 +02:00
if ( t & & utf8len ( t - > value ) & & track - > mode ! = MODE_MP4 )
2014-08-08 20:37:06 +03:00
mov_write_source_reference_tag ( pb , track , t - > value ) ;
else
avio_wb16 ( pb , 0 ) ; /* zero size */
2013-08-26 15:31:07 +03:00
# else
2012-03-05 10:51:08 +03:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2013-08-25 14:02:33 +03:00
ffio_wfourcc ( pb , " tmcd " ) ; /* Data format */
avio_wb32 ( pb , 0 ) ; /* Reserved */
avio_wb32 ( pb , 1 ) ; /* Data reference index */
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > extradata_size )
avio_write ( pb , track - > par - > extradata , track - > par - > extradata_size ) ;
2013-08-26 15:31:07 +03:00
# endif
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2010-05-18 22:47:24 +03:00
}
2016-04-10 21:58:15 +02:00
static int mov_write_stsd_tag ( AVFormatContext * s , AVIOContext * pb , MOVMuxContext * mov , MOVTrack * track )
2003-08-26 23:23:13 +03:00
{
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " stsd " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* version & flags */
avio_wb32 ( pb , 1 ) ; /* entry count */
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > codec_type = = AVMEDIA_TYPE_VIDEO )
2015-12-07 12:01:09 +02:00
mov_write_video_tag ( pb , mov , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > codec_type = = AVMEDIA_TYPE_AUDIO )
2016-04-10 21:58:15 +02:00
mov_write_audio_tag ( s , pb , mov , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > codec_type = = AVMEDIA_TYPE_SUBTITLE )
2009-01-11 12:26:44 +02:00
mov_write_subtitle_tag ( pb , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > codec_tag = = MKTAG ( ' r ' , ' t ' , ' p ' , ' ' ) )
2010-05-18 22:47:24 +03:00
mov_write_rtp_tag ( pb , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > codec_tag = = MKTAG ( ' t ' , ' m ' , ' c ' , ' d ' ) )
2012-03-05 10:51:08 +03:00
mov_write_tmcd_tag ( pb , track ) ;
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2003-08-26 23:23:13 +03:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_ctts_tag ( AVIOContext * pb , MOVTrack * track )
2006-02-23 01:46:20 +02:00
{
2008-12-11 22:06:56 +02:00
MOVStts * ctts_entries ;
2006-02-23 01:46:20 +02:00
uint32_t entries = 0 ;
uint32_t atom_size ;
int i ;
2014-06-08 16:19:46 +03:00
ctts_entries = av_malloc_array ( ( track - > entry + 1 ) , sizeof ( * ctts_entries ) ) ; /* worst case */
2015-01-25 15:32:22 +02:00
if ( ! ctts_entries )
return AVERROR ( ENOMEM ) ;
2006-02-23 01:46:20 +02:00
ctts_entries [ 0 ] . count = 1 ;
2006-06-24 21:09:20 +03:00
ctts_entries [ 0 ] . duration = track - > cluster [ 0 ] . cts ;
2013-07-07 14:59:47 +03:00
for ( i = 1 ; i < track - > entry ; i + + ) {
2006-06-24 21:09:20 +03:00
if ( track - > cluster [ i ] . cts = = ctts_entries [ entries ] . duration ) {
2006-02-23 01:46:20 +02:00
ctts_entries [ entries ] . count + + ; /* compress */
} else {
entries + + ;
2006-06-24 21:09:20 +03:00
ctts_entries [ entries ] . duration = track - > cluster [ i ] . cts ;
2006-02-23 01:46:20 +02:00
ctts_entries [ entries ] . count = 1 ;
}
}
entries + + ; /* last one */
atom_size = 16 + ( entries * 8 ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , atom_size ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " ctts " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* version & flags */
avio_wb32 ( pb , entries ) ; /* entry count */
2013-07-07 14:59:47 +03:00
for ( i = 0 ; i < entries ; i + + ) {
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , ctts_entries [ i ] . count ) ;
avio_wb32 ( pb , ctts_entries [ i ] . duration ) ;
2006-02-23 01:46:20 +02:00
}
av_free ( ctts_entries ) ;
return atom_size ;
}
2003-11-03 23:51:07 +02:00
/* Time to sample atom */
2011-02-20 12:04:12 +02:00
static int mov_write_stts_tag ( AVIOContext * pb , MOVTrack * track )
2003-08-26 23:23:13 +03:00
{
2015-03-06 12:27:14 +02:00
MOVStts * stts_entries = NULL ;
2006-11-01 19:27:39 +02:00
uint32_t entries = - 1 ;
uint32_t atom_size ;
int i ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > codec_type = = AVMEDIA_TYPE_AUDIO & & ! track - > audio_vbr ) {
2006-11-01 19:27:39 +02:00
stts_entries = av_malloc ( sizeof ( * stts_entries ) ) ; /* one entry */
2015-02-01 20:19:45 +02:00
if ( ! stts_entries )
return AVERROR ( ENOMEM ) ;
2012-01-30 17:19:15 +03:00
stts_entries [ 0 ] . count = track - > sample_count ;
2006-11-01 19:27:39 +02:00
stts_entries [ 0 ] . duration = 1 ;
entries = 1 ;
} else {
2015-03-06 12:27:14 +02:00
if ( track - > entry ) {
2015-03-06 21:54:41 +02:00
stts_entries = av_malloc_array ( track - > entry , sizeof ( * stts_entries ) ) ; /* worst case */
2015-03-06 12:27:14 +02:00
if ( ! stts_entries )
return AVERROR ( ENOMEM ) ;
}
2013-07-07 14:59:47 +03:00
for ( i = 0 ; i < track - > entry ; i + + ) {
2012-02-27 01:17:13 +03:00
int duration = get_cluster_duration ( track , i ) ;
2006-11-01 19:27:39 +02:00
if ( i & & duration = = stts_entries [ entries ] . duration ) {
stts_entries [ entries ] . count + + ; /* compress */
} else {
entries + + ;
stts_entries [ entries ] . duration = duration ;
stts_entries [ entries ] . count = 1 ;
}
}
entries + + ; /* last one */
}
atom_size = 16 + ( entries * 8 ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , atom_size ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " stts " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* version & flags */
avio_wb32 ( pb , entries ) ; /* entry count */
2013-07-07 14:59:47 +03:00
for ( i = 0 ; i < entries ; i + + ) {
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , stts_entries [ i ] . count ) ;
avio_wb32 ( pb , stts_entries [ i ] . duration ) ;
2006-11-01 19:27:39 +02:00
}
av_free ( stts_entries ) ;
return atom_size ;
2003-08-26 23:23:13 +03:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_dref_tag ( AVIOContext * pb )
2003-08-26 23:23:13 +03:00
{
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 28 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " dref " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* version & flags */
avio_wb32 ( pb , 1 ) ; /* entry count */
2003-08-26 23:23:13 +03:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0xc ) ; /* size */
2012-01-14 16:10:19 +03:00
//FIXME add the alis and rsrc atom
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " url " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 1 ) ; /* version & flags */
2003-08-26 23:23:13 +03:00
return 28 ;
}
2016-04-10 21:58:15 +02:00
static int mov_write_stbl_tag ( AVFormatContext * s , AVIOContext * pb , MOVMuxContext * mov , MOVTrack * track )
2003-08-26 23:23:13 +03:00
{
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2015-01-25 15:32:22 +02:00
int ret ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " stbl " ) ;
2016-04-10 21:58:15 +02:00
mov_write_stsd_tag ( s , pb , mov , track ) ;
2003-09-10 02:03:04 +03:00
mov_write_stts_tag ( pb , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( ( track - > par - > codec_type = = AVMEDIA_TYPE_VIDEO | |
track - > par - > codec_tag = = MKTAG ( ' r ' , ' t ' , ' p ' , ' ' ) ) & &
2012-01-30 17:19:15 +03:00
track - > has_keyframes & & track - > has_keyframes < track - > entry )
2009-05-15 09:11:53 +03:00
mov_write_stss_tag ( pb , track , MOV_SYNC_SAMPLE ) ;
if ( track - > mode = = MODE_MOV & & track - > flags & MOV_TRACK_STPS )
mov_write_stss_tag ( pb , track , MOV_PARTIAL_SYNC_SAMPLE ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > codec_type = = AVMEDIA_TYPE_VIDEO & &
2015-01-25 15:32:22 +02:00
track - > flags & MOV_TRACK_CTTS & & track - > entry ) {
if ( ( ret = mov_write_ctts_tag ( pb , track ) ) < 0 )
return ret ;
}
2003-09-10 02:03:04 +03:00
mov_write_stsc_tag ( pb , track ) ;
mov_write_stsz_tag ( pb , track ) ;
mov_write_stco_tag ( pb , track ) ;
2015-12-07 12:01:09 +02:00
if ( mov - > encryption_scheme = = MOV_ENC_CENC_AES_CTR ) {
ff_mov_cenc_write_stbl_atoms ( & track - > cenc , pb ) ;
}
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2003-08-26 23:23:13 +03:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_dinf_tag ( AVIOContext * pb )
2003-08-26 23:23:13 +03:00
{
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " dinf " ) ;
2003-09-10 02:03:04 +03:00
mov_write_dref_tag ( pb ) ;
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2003-08-26 23:23:13 +03:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_nmhd_tag ( AVIOContext * pb )
2009-01-11 12:26:44 +02:00
{
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 12 ) ;
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " nmhd " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ;
2009-01-11 12:26:44 +02:00
return 12 ;
}
2012-03-05 10:51:08 +03:00
static int mov_write_tcmi_tag ( AVIOContext * pb , MOVTrack * track )
2009-01-11 12:26:44 +02:00
{
2012-03-05 10:51:08 +03:00
int64_t pos = avio_tell ( pb ) ;
const char * font = " Lucida Grande " ;
avio_wb32 ( pb , 0 ) ; /* size */
ffio_wfourcc ( pb , " tcmi " ) ; /* timecode media information atom */
avio_wb32 ( pb , 0 ) ; /* version & flags */
avio_wb16 ( pb , 0 ) ; /* text font */
avio_wb16 ( pb , 0 ) ; /* text face */
avio_wb16 ( pb , 12 ) ; /* text size */
avio_wb16 ( pb , 0 ) ; /* (unknown, not in the QT specs...) */
avio_wb16 ( pb , 0x0000 ) ; /* text color (red) */
avio_wb16 ( pb , 0x0000 ) ; /* text color (green) */
avio_wb16 ( pb , 0x0000 ) ; /* text color (blue) */
avio_wb16 ( pb , 0xffff ) ; /* background color (red) */
avio_wb16 ( pb , 0xffff ) ; /* background color (green) */
avio_wb16 ( pb , 0xffff ) ; /* background color (blue) */
avio_w8 ( pb , strlen ( font ) ) ; /* font len (part of the pascal string) */
avio_write ( pb , font , strlen ( font ) ) ; /* font name */
return update_size ( pb , pos ) ;
}
static int mov_write_gmhd_tag ( AVIOContext * pb , MOVTrack * track )
{
int64_t pos = avio_tell ( pb ) ;
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " gmhd " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x18 ) ; /* gmin size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " gmin " ) ; /* generic media info */
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* version & flags */
avio_wb16 ( pb , 0x40 ) ; /* graphics mode = */
avio_wb16 ( pb , 0x8000 ) ; /* opColor (r?) */
avio_wb16 ( pb , 0x8000 ) ; /* opColor (g?) */
avio_wb16 ( pb , 0x8000 ) ; /* opColor (b?) */
avio_wb16 ( pb , 0 ) ; /* balance */
avio_wb16 ( pb , 0 ) ; /* reserved */
2012-05-20 03:10:47 +03:00
/*
* This special text atom is required for
* Apple Quicktime chapters . The contents
* don ' t appear to be documented , so the
* bytes are copied verbatim .
*/
2012-10-22 06:56:18 +03:00
if ( track - > tag ! = MKTAG ( ' c ' , ' 6 ' , ' 0 ' , ' 8 ' ) ) {
2012-05-20 03:10:47 +03:00
avio_wb32 ( pb , 0x2C ) ; /* size */
ffio_wfourcc ( pb , " text " ) ;
avio_wb16 ( pb , 0x01 ) ;
avio_wb32 ( pb , 0x00 ) ;
avio_wb32 ( pb , 0x00 ) ;
avio_wb32 ( pb , 0x00 ) ;
avio_wb32 ( pb , 0x01 ) ;
avio_wb32 ( pb , 0x00 ) ;
avio_wb32 ( pb , 0x00 ) ;
avio_wb32 ( pb , 0x00 ) ;
avio_wb32 ( pb , 0x00004000 ) ;
avio_wb16 ( pb , 0x0000 ) ;
2012-10-22 06:56:18 +03:00
}
2012-05-20 03:10:47 +03:00
2016-04-10 21:58:15 +02:00
if ( track - > par - > codec_tag = = MKTAG ( ' t ' , ' m ' , ' c ' , ' d ' ) ) {
2012-03-05 10:51:08 +03:00
int64_t tmcd_pos = avio_tell ( pb ) ;
avio_wb32 ( pb , 0 ) ; /* size */
ffio_wfourcc ( pb , " tmcd " ) ;
mov_write_tcmi_tag ( pb , track ) ;
update_size ( pb , tmcd_pos ) ;
}
return update_size ( pb , pos ) ;
2009-01-11 12:26:44 +02:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_smhd_tag ( AVIOContext * pb )
2003-08-26 23:23:13 +03:00
{
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 16 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " smhd " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* version & flags */
avio_wb16 ( pb , 0 ) ; /* reserved (balance, normally = 0) */
avio_wb16 ( pb , 0 ) ; /* reserved */
2003-08-26 23:23:13 +03:00
return 16 ;
}
2011-02-20 12:04:12 +02:00
static int mov_write_vmhd_tag ( AVIOContext * pb )
2003-08-26 23:23:13 +03:00
{
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x14 ) ; /* size (always 0x14) */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " vmhd " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x01 ) ; /* version & flags */
avio_wb64 ( pb , 0 ) ; /* reserved (graphics mode = copy) */
2003-08-26 23:23:13 +03:00
return 0x14 ;
}
2014-04-21 16:46:22 +03:00
static int is_clcp_track ( MOVTrack * track )
{
return track - > tag = = MKTAG ( ' c ' , ' 7 ' , ' 0 ' , ' 8 ' ) | |
track - > tag = = MKTAG ( ' c ' , ' 6 ' , ' 0 ' , ' 8 ' ) ;
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
static int mov_write_hdlr_tag ( AVFormatContext * s , AVIOContext * pb , MOVTrack * track )
2003-08-26 23:23:13 +03:00
{
2009-01-11 12:41:43 +02:00
const char * hdlr , * descr = NULL , * hdlr_type = NULL ;
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2005-12-17 20:14:38 +02:00
2013-08-19 18:18:23 +03:00
hdlr = " dhlr " ;
hdlr_type = " url " ;
descr = " DataHandler " ;
if ( track ) {
2005-12-22 03:10:11 +02:00
hdlr = ( track - > mode = = MODE_MOV ) ? " mhlr " : " \0 \0 \0 \0 " ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > codec_type = = AVMEDIA_TYPE_VIDEO ) {
2005-12-22 03:10:11 +02:00
hdlr_type = " vide " ;
2013-07-07 14:59:47 +03:00
descr = " VideoHandler " ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( track - > par - > codec_type = = AVMEDIA_TYPE_AUDIO ) {
2005-12-22 03:10:11 +02:00
hdlr_type = " soun " ;
2013-07-07 14:59:47 +03:00
descr = " SoundHandler " ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( track - > par - > codec_type = = AVMEDIA_TYPE_SUBTITLE ) {
2014-04-25 22:08:29 +03:00
if ( is_clcp_track ( track ) ) {
2012-10-22 06:56:18 +03:00
hdlr_type = " clcp " ;
descr = " ClosedCaptionHandler " ;
} else {
2014-03-31 23:07:34 +03:00
if ( track - > tag = = MKTAG ( ' t ' , ' x ' , ' 3 ' , ' g ' ) ) {
hdlr_type = " sbtl " ;
} else if ( track - > tag = = MKTAG ( ' m ' , ' p ' , ' 4 ' , ' s ' ) ) {
hdlr_type = " subp " ;
} else {
hdlr_type = " text " ;
}
2009-01-11 12:26:44 +02:00
descr = " SubtitleHandler " ;
2012-10-22 06:56:18 +03:00
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( track - > par - > codec_tag = = MKTAG ( ' r ' , ' t ' , ' p ' , ' ' ) ) {
2010-05-18 22:47:24 +03:00
hdlr_type = " hint " ;
2013-07-07 14:59:47 +03:00
descr = " HintHandler " ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( track - > par - > codec_tag = = MKTAG ( ' t ' , ' m ' , ' c ' , ' d ' ) ) {
2013-08-19 18:18:23 +03:00
hdlr_type = " tmcd " ;
descr = " TimeCodeHandler " ;
2012-03-24 23:36:51 +03:00
} else {
2013-08-19 18:18:23 +03:00
char tag_buf [ 32 ] ;
av_get_codec_tag_string ( tag_buf , sizeof ( tag_buf ) ,
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
track - > par - > codec_tag ) ;
2013-08-19 18:18:23 +03:00
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
av_log ( s , AV_LOG_WARNING ,
2013-08-19 18:18:23 +03:00
" Unknown hldr_type for %s / 0x%04X, writing dummy values \n " ,
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
tag_buf , track - > par - > codec_tag ) ;
2005-12-22 03:10:11 +02:00
}
2014-07-31 20:00:05 +03:00
if ( track - > st ) {
// hdlr.name is used by some players to identify the content title
// of the track. So if an alternate handler description is
// specified, use it.
AVDictionaryEntry * t ;
t = av_dict_get ( track - > st - > metadata , " handler " , NULL , 0 ) ;
if ( t & & utf8len ( t - > value ) )
descr = t - > value ;
}
2004-03-28 05:17:06 +03:00
}
2005-12-17 20:14:38 +02:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " hdlr " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* Version & flags */
avio_write ( pb , hdlr , 4 ) ; /* handler */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , hdlr_type ) ; /* handler type */
2013-07-07 14:59:47 +03:00
avio_wb32 ( pb , 0 ) ; /* reserved */
avio_wb32 ( pb , 0 ) ; /* reserved */
avio_wb32 ( pb , 0 ) ; /* reserved */
2009-05-23 10:17:17 +03:00
if ( ! track | | track - > mode = = MODE_MOV )
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , strlen ( descr ) ) ; /* pascal string */
avio_write ( pb , descr , strlen ( descr ) ) ; /* handler description */
2009-05-23 10:17:17 +03:00
if ( track & & track - > mode ! = MODE_MOV )
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , 0 ) ; /* c string */
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2004-03-28 05:17:06 +03:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_hmhd_tag ( AVIOContext * pb )
2010-05-18 22:47:24 +03:00
{
/* This atom must be present, but leaving the values at zero
* seems harmless . */
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 28 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " hmhd " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* version, flags */
avio_wb16 ( pb , 0 ) ; /* maxPDUsize */
avio_wb16 ( pb , 0 ) ; /* avgPDUsize */
avio_wb32 ( pb , 0 ) ; /* maxbitrate */
avio_wb32 ( pb , 0 ) ; /* avgbitrate */
avio_wb32 ( pb , 0 ) ; /* reserved */
2010-05-18 22:47:24 +03:00
return 28 ;
}
2016-04-10 21:58:15 +02:00
static int mov_write_minf_tag ( AVFormatContext * s , AVIOContext * pb , MOVMuxContext * mov , MOVTrack * track )
2004-03-28 05:17:06 +03:00
{
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2015-01-25 15:32:22 +02:00
int ret ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " minf " ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > codec_type = = AVMEDIA_TYPE_VIDEO )
2004-03-28 05:17:06 +03:00
mov_write_vmhd_tag ( pb ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > codec_type = = AVMEDIA_TYPE_AUDIO )
2004-03-28 05:17:06 +03:00
mov_write_smhd_tag ( pb ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
else if ( track - > par - > codec_type = = AVMEDIA_TYPE_SUBTITLE ) {
2014-04-21 16:46:22 +03:00
if ( track - > tag = = MKTAG ( ' t ' , ' e ' , ' x ' , ' t ' ) | | is_clcp_track ( track ) ) {
2012-10-22 06:56:18 +03:00
mov_write_gmhd_tag ( pb , track ) ;
} else {
mov_write_nmhd_tag ( pb ) ;
}
2010-05-18 22:47:24 +03:00
} else if ( track - > tag = = MKTAG ( ' r ' , ' t ' , ' p ' , ' ' ) ) {
mov_write_hmhd_tag ( pb ) ;
2013-08-25 14:02:33 +03:00
} else if ( track - > tag = = MKTAG ( ' t ' , ' m ' , ' c ' , ' d ' ) ) {
2016-01-21 08:08:09 +02:00
if ( track - > mode = = MODE_MP4 )
mov_write_nmhd_tag ( pb ) ;
else
mov_write_gmhd_tag ( pb , track ) ;
2009-01-11 12:26:44 +02:00
}
2004-03-28 05:17:06 +03:00
if ( track - > mode = = MODE_MOV ) /* FIXME: Why do it for MODE_MOV only ? */
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
mov_write_hdlr_tag ( s , pb , NULL ) ;
2004-03-28 05:17:06 +03:00
mov_write_dinf_tag ( pb ) ;
2016-04-10 21:58:15 +02:00
if ( ( ret = mov_write_stbl_tag ( s , pb , mov , track ) ) < 0 )
2015-01-25 15:32:22 +02:00
return ret ;
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2003-08-26 23:23:13 +03:00
}
2014-10-29 22:51:29 +02:00
static int mov_write_mdhd_tag ( AVIOContext * pb , MOVMuxContext * mov ,
MOVTrack * track )
2003-08-26 23:23:13 +03:00
{
2012-01-30 17:19:15 +03:00
int version = track - > track_duration < INT32_MAX ? 0 : 1 ;
2006-03-26 16:34:51 +03:00
2012-02-17 15:40:24 +03:00
if ( track - > mode = = MODE_ISM )
version = 1 ;
2011-02-21 20:28:17 +02:00
( version = = 1 ) ? avio_wb32 ( pb , 44 ) : avio_wb32 ( pb , 32 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " mdhd " ) ;
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , version ) ;
avio_wb24 ( pb , 0 ) ; /* flags */
2006-03-26 16:34:51 +03:00
if ( version = = 1 ) {
2011-02-21 20:28:17 +02:00
avio_wb64 ( pb , track - > time ) ;
avio_wb64 ( pb , track - > time ) ;
2006-03-26 16:34:51 +03:00
} else {
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , track - > time ) ; /* creation time */
avio_wb32 ( pb , track - > time ) ; /* modification time */
2006-03-26 16:34:51 +03:00
}
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , track - > timescale ) ; /* time scale (sample rate for audio) */
2014-10-29 22:51:29 +02:00
if ( ! track - > entry & & mov - > mode = = MODE_ISM )
2012-01-28 01:28:19 +03:00
( version = = 1 ) ? avio_wb64 ( pb , UINT64_C ( 0xffffffffffffffff ) ) : avio_wb32 ( pb , 0xffffffff ) ;
2014-10-29 22:51:29 +02:00
else if ( ! track - > entry )
( version = = 1 ) ? avio_wb64 ( pb , 0 ) : avio_wb32 ( pb , 0 ) ;
2012-01-28 01:28:19 +03:00
else
( version = = 1 ) ? avio_wb64 ( pb , track - > track_duration ) : avio_wb32 ( pb , track - > track_duration ) ; /* duration */
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , track - > language ) ; /* language */
avio_wb16 ( pb , 0 ) ; /* reserved (quality) */
2007-05-30 03:08:32 +03:00
2013-07-07 14:59:47 +03:00
if ( version ! = 0 & & track - > mode = = MODE_MOV ) {
2007-05-30 03:08:32 +03:00
av_log ( NULL , AV_LOG_ERROR ,
2013-07-07 14:59:47 +03:00
" FATAL error, file duration too long for timebase, this file will not be \n "
" playable with quicktime. Choose a different timebase or a different \n "
" container format \n " ) ;
2007-05-30 03:08:32 +03:00
}
2003-08-26 23:23:13 +03:00
return 32 ;
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
static int mov_write_mdia_tag ( AVFormatContext * s , AVIOContext * pb ,
MOVMuxContext * mov , MOVTrack * track )
2003-08-26 23:23:13 +03:00
{
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2015-01-25 15:32:22 +02:00
int ret ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " mdia " ) ;
2014-10-29 22:51:29 +02:00
mov_write_mdhd_tag ( pb , mov , track ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
mov_write_hdlr_tag ( s , pb , track ) ;
2016-04-10 21:58:15 +02:00
if ( ( ret = mov_write_minf_tag ( s , pb , mov , track ) ) < 0 )
2015-01-25 15:32:22 +02:00
return ret ;
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2003-08-26 23:23:13 +03:00
}
2012-07-14 00:35:54 +03:00
/* transformation matrix
| a b u |
| c d v |
| tx ty w | */
static void write_matrix ( AVIOContext * pb , int16_t a , int16_t b , int16_t c ,
int16_t d , int16_t tx , int16_t ty )
{
avio_wb32 ( pb , a < < 16 ) ; /* 16.16 format */
avio_wb32 ( pb , b < < 16 ) ; /* 16.16 format */
avio_wb32 ( pb , 0 ) ; /* u in 2.30 format */
avio_wb32 ( pb , c < < 16 ) ; /* 16.16 format */
avio_wb32 ( pb , d < < 16 ) ; /* 16.16 format */
avio_wb32 ( pb , 0 ) ; /* v in 2.30 format */
avio_wb32 ( pb , tx < < 16 ) ; /* 16.16 format */
avio_wb32 ( pb , ty < < 16 ) ; /* 16.16 format */
avio_wb32 ( pb , 1 < < 30 ) ; /* w in 2.30 format */
}
2014-07-04 17:26:06 +03:00
static int mov_write_tkhd_tag ( AVIOContext * pb , MOVMuxContext * mov ,
MOVTrack * track , AVStream * st )
2003-08-26 23:23:13 +03:00
{
2012-01-30 17:19:15 +03:00
int64_t duration = av_rescale_rnd ( track - > track_duration , MOV_TIMESCALE ,
2009-11-29 04:46:49 +02:00
track - > timescale , AV_ROUND_UP ) ;
2006-03-26 16:34:51 +03:00
int version = duration < INT32_MAX ? 0 : 1 ;
2014-07-04 17:53:31 +03:00
int flags = MOV_TKHD_FLAG_IN_MOVIE ;
2012-07-14 00:35:54 +03:00
int rotation = 0 ;
2014-07-04 17:26:06 +03:00
int group = 0 ;
2015-03-13 00:14:08 +02:00
uint32_t * display_matrix = NULL ;
int display_matrix_size , i ;
2014-07-04 17:53:31 +03:00
2014-07-04 17:26:06 +03:00
if ( st ) {
if ( mov - > per_stream_grouping )
group = st - > index ;
else
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
group = st - > codecpar - > codec_type ;
2015-03-13 00:14:08 +02:00
display_matrix = ( uint32_t * ) av_stream_get_side_data ( st , AV_PKT_DATA_DISPLAYMATRIX ,
& display_matrix_size ) ;
2015-03-21 01:24:31 +02:00
if ( display_matrix & & display_matrix_size < 9 * sizeof ( * display_matrix ) )
2015-03-13 00:14:08 +02:00
display_matrix = NULL ;
2014-07-04 17:26:06 +03:00
}
2006-03-26 16:34:51 +03:00
2014-07-04 17:53:31 +03:00
if ( track - > flags & MOV_TRACK_ENABLED )
flags | = MOV_TKHD_FLAG_ENABLED ;
2012-02-17 15:40:24 +03:00
if ( track - > mode = = MODE_ISM )
version = 1 ;
2011-02-21 20:28:17 +02:00
( version = = 1 ) ? avio_wb32 ( pb , 104 ) : avio_wb32 ( pb , 92 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " tkhd " ) ;
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , version ) ;
2014-07-04 17:53:31 +03:00
avio_wb24 ( pb , flags ) ;
2006-03-26 16:34:51 +03:00
if ( version = = 1 ) {
2011-02-21 20:28:17 +02:00
avio_wb64 ( pb , track - > time ) ;
avio_wb64 ( pb , track - > time ) ;
2006-03-26 16:34:51 +03:00
} else {
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , track - > time ) ; /* creation time */
avio_wb32 ( pb , track - > time ) ; /* modification time */
2006-03-26 16:34:51 +03:00
}
2012-01-30 17:19:15 +03:00
avio_wb32 ( pb , track - > track_id ) ; /* track-id */
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* reserved */
2014-10-29 22:51:29 +02:00
if ( ! track - > entry & & mov - > mode = = MODE_ISM )
2012-01-28 01:28:19 +03:00
( version = = 1 ) ? avio_wb64 ( pb , UINT64_C ( 0xffffffffffffffff ) ) : avio_wb32 ( pb , 0xffffffff ) ;
2014-10-29 22:51:29 +02:00
else if ( ! track - > entry )
( version = = 1 ) ? avio_wb64 ( pb , 0 ) : avio_wb32 ( pb , 0 ) ;
2012-01-28 01:28:19 +03:00
else
( version = = 1 ) ? avio_wb64 ( pb , duration ) : avio_wb32 ( pb , duration ) ;
2003-08-26 23:23:13 +03:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* reserved */
avio_wb32 ( pb , 0 ) ; /* reserved */
2011-09-16 17:06:45 +03:00
avio_wb16 ( pb , 0 ) ; /* layer */
2014-07-04 17:26:06 +03:00
avio_wb16 ( pb , group ) ; /* alternate group) */
2003-08-26 23:23:13 +03:00
/* Volume, only for audio */
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > codec_type = = AVMEDIA_TYPE_AUDIO )
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , 0x0100 ) ;
2003-08-26 23:23:13 +03:00
else
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , 0 ) ;
avio_wb16 ( pb , 0 ) ; /* reserved */
2003-08-26 23:23:13 +03:00
/* Matrix structure */
2012-07-14 00:35:54 +03:00
if ( st & & st - > metadata ) {
AVDictionaryEntry * rot = av_dict_get ( st - > metadata , " rotate " , NULL , 0 ) ;
rotation = ( rot & & rot - > value ) ? atoi ( rot - > value ) : 0 ;
}
2015-03-13 00:14:08 +02:00
if ( display_matrix ) {
for ( i = 0 ; i < 9 ; i + + )
avio_wb32 ( pb , display_matrix [ i ] ) ;
2015-03-20 02:10:06 +02:00
} else if ( rotation = = 90 ) {
2016-04-10 21:58:15 +02:00
write_matrix ( pb , 0 , 1 , - 1 , 0 , track - > par - > height , 0 ) ;
2012-07-14 00:35:54 +03:00
} else if ( rotation = = 180 ) {
2016-04-10 21:58:15 +02:00
write_matrix ( pb , - 1 , 0 , 0 , - 1 , track - > par - > width , track - > par - > height ) ;
2012-07-14 00:35:54 +03:00
} else if ( rotation = = 270 ) {
2016-04-10 21:58:15 +02:00
write_matrix ( pb , 0 , - 1 , 1 , 0 , 0 , track - > par - > width ) ;
2012-07-14 00:35:54 +03:00
} else {
write_matrix ( pb , 1 , 0 , 0 , 1 , 0 , 0 ) ;
}
2003-08-26 23:23:13 +03:00
/* Track width and height, for visual only */
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( st & & ( track - > par - > codec_type = = AVMEDIA_TYPE_VIDEO | |
track - > par - > codec_type = = AVMEDIA_TYPE_SUBTITLE ) ) {
2016-05-29 04:00:27 +02:00
int64_t track_width_1616 ;
2013-07-07 14:59:47 +03:00
if ( track - > mode = = MODE_MOV ) {
2016-05-29 04:00:27 +02:00
track_width_1616 = track - > par - > width * 0x10000ULL ;
2011-02-25 17:41:55 +02:00
} else {
2016-05-29 04:00:27 +02:00
track_width_1616 = av_rescale ( st - > sample_aspect_ratio . num ,
2016-04-10 21:58:15 +02:00
track - > par - > width * 0x10000LL ,
2015-05-29 12:36:55 +02:00
st - > sample_aspect_ratio . den ) ;
2015-05-29 12:54:37 +02:00
if ( ! track_width_1616 | |
2016-04-10 21:58:15 +02:00
track - > height ! = track - > par - > height | |
2015-05-29 12:54:37 +02:00
track_width_1616 > UINT32_MAX )
2016-05-29 03:51:42 +02:00
track_width_1616 = track - > par - > width * 0x10000ULL ;
2011-02-25 17:41:55 +02:00
}
2016-05-29 04:00:27 +02:00
if ( track_width_1616 > UINT32_MAX ) {
av_log ( mov - > fc , AV_LOG_WARNING , " track width is too large \n " ) ;
track_width_1616 = 0 ;
}
avio_wb32 ( pb , track_width_1616 ) ;
if ( track - > height > 0xFFFF ) {
av_log ( mov - > fc , AV_LOG_WARNING , " track height is too large \n " ) ;
avio_wb32 ( pb , 0 ) ;
} else
avio_wb32 ( pb , track - > height * 0x10000U ) ;
2013-07-07 14:59:47 +03:00
} else {
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ;
avio_wb32 ( pb , 0 ) ;
2003-08-26 23:23:13 +03:00
}
return 0x5c ;
}
2011-02-26 00:41:00 +02:00
static int mov_write_tapt_tag ( AVIOContext * pb , MOVTrack * track )
2011-02-25 17:41:55 +02:00
{
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
int32_t width = av_rescale ( track - > par - > sample_aspect_ratio . num , track - > par - > width ,
track - > par - > sample_aspect_ratio . den ) ;
2011-02-25 17:41:55 +02:00
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2011-02-25 17:41:55 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-26 00:41:00 +02:00
ffio_wfourcc ( pb , " tapt " ) ;
2011-02-25 17:41:55 +02:00
avio_wb32 ( pb , 20 ) ;
2011-02-26 00:41:00 +02:00
ffio_wfourcc ( pb , " clef " ) ;
2011-02-25 17:41:55 +02:00
avio_wb32 ( pb , 0 ) ;
2012-10-29 16:56:57 +03:00
avio_wb32 ( pb , width < < 16 ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
avio_wb32 ( pb , track - > par - > height < < 16 ) ;
2012-10-29 16:56:57 +03:00
avio_wb32 ( pb , 20 ) ;
ffio_wfourcc ( pb , " prof " ) ;
avio_wb32 ( pb , 0 ) ;
2011-02-25 17:41:55 +02:00
avio_wb32 ( pb , width < < 16 ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
avio_wb32 ( pb , track - > par - > height < < 16 ) ;
2011-02-25 17:41:55 +02:00
avio_wb32 ( pb , 20 ) ;
2011-02-26 00:41:00 +02:00
ffio_wfourcc ( pb , " enof " ) ;
2011-02-25 17:41:55 +02:00
avio_wb32 ( pb , 0 ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
avio_wb32 ( pb , track - > par - > width < < 16 ) ;
avio_wb32 ( pb , track - > par - > height < < 16 ) ;
2011-02-25 17:41:55 +02:00
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2011-12-11 20:07:04 +03:00
}
2011-02-25 17:41:55 +02:00
2005-01-27 16:48:15 +02:00
// This box seems important for the psp playback ... without it the movie seems to hang
2014-10-12 00:03:25 +03:00
static int mov_write_edts_tag ( AVIOContext * pb , MOVMuxContext * mov ,
MOVTrack * track )
2005-01-27 16:48:15 +02:00
{
2012-01-30 17:19:15 +03:00
int64_t duration = av_rescale_rnd ( track - > track_duration , MOV_TIMESCALE ,
2011-05-02 20:25:28 +03:00
track - > timescale , AV_ROUND_UP ) ;
int version = duration < INT32_MAX ? 0 : 1 ;
int entry_size , entry_count , size ;
2014-11-03 20:34:27 +02:00
int64_t delay , start_ct = track - > start_cts ;
2015-01-03 03:37:18 +02:00
int64_t start_dts = track - > start_dts ;
if ( track - > entry ) {
if ( start_dts ! = track - > cluster [ 0 ] . dts | | start_ct ! = track - > cluster [ 0 ] . cts ) {
av_log ( mov - > fc , AV_LOG_DEBUG ,
" EDTS using dts:% " PRId64 " cts:%d instead of dts:% " PRId64 " cts:% " PRId64 " tid:%d \n " ,
track - > cluster [ 0 ] . dts , track - > cluster [ 0 ] . cts ,
start_dts , start_ct , track - > track_id ) ;
start_dts = track - > cluster [ 0 ] . dts ;
start_ct = track - > cluster [ 0 ] . cts ;
}
}
delay = av_rescale_rnd ( start_dts + start_ct , MOV_TIMESCALE ,
2011-05-02 20:25:28 +03:00
track - > timescale , AV_ROUND_DOWN ) ;
version | = delay < INT32_MAX ? 0 : 1 ;
entry_size = ( version = = 1 ) ? 20 : 12 ;
entry_count = 1 + ( delay > 0 ) ;
size = 24 + entry_count * entry_size ;
/* write the atom data */
avio_wb32 ( pb , size ) ;
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " edts " ) ;
2011-05-02 20:25:28 +03:00
avio_wb32 ( pb , size - 8 ) ;
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " elst " ) ;
2011-05-02 20:25:28 +03:00
avio_w8 ( pb , version ) ;
avio_wb24 ( pb , 0 ) ; /* flags */
2005-01-27 16:48:15 +02:00
2011-05-02 20:25:28 +03:00
avio_wb32 ( pb , entry_count ) ;
if ( delay > 0 ) { /* add an empty edit to delay presentation */
2014-10-11 23:56:24 +03:00
/* In the positive delay case, the delay includes the cts
* offset , and the second edit list entry below trims out
* the same amount from the actual content . This makes sure
* that the offsetted last sample is included in the edit
* list duration as well . */
2011-05-02 20:25:28 +03:00
if ( version = = 1 ) {
avio_wb64 ( pb , delay ) ;
avio_wb64 ( pb , - 1 ) ;
} else {
avio_wb32 ( pb , delay ) ;
avio_wb32 ( pb , - 1 ) ;
}
avio_wb32 ( pb , 0x00010000 ) ;
2012-09-29 07:31:18 +03:00
} else {
2012-09-29 07:31:18 +03:00
/* Avoid accidentally ending up with start_ct = -1 which has got a
* special meaning . Normally start_ct should end up positive or zero
2016-03-29 00:07:47 +02:00
* here , but use FFMIN in case dts is a small positive integer
2012-09-29 07:31:18 +03:00
* rounded to 0 when represented in MOV_TIMESCALE units . */
2015-01-03 03:37:18 +02:00
av_assert0 ( av_rescale_rnd ( start_dts , MOV_TIMESCALE , track - > timescale , AV_ROUND_DOWN ) < = 0 ) ;
start_ct = - FFMIN ( start_dts , 0 ) ;
2014-10-11 23:56:24 +03:00
/* Note, this delay is calculated from the pts of the first sample,
* ensuring that we don ' t reduce the duration for cases with
* dts < 0 pts = 0. */
2012-09-29 07:31:18 +03:00
duration + = delay ;
2011-05-02 20:25:28 +03:00
}
2005-01-27 16:48:15 +02:00
2014-10-12 00:03:25 +03:00
/* For fragmented files, we don't know the full length yet. Setting
* duration to 0 allows us to only specify the offset , including
* the rest of the content ( from all future fragments ) without specifying
* an explicit duration . */
if ( mov - > flags & FF_MOV_FLAG_FRAGMENT )
duration = 0 ;
2011-05-02 20:25:28 +03:00
/* duration */
if ( version = = 1 ) {
avio_wb64 ( pb , duration ) ;
avio_wb64 ( pb , start_ct ) ;
} else {
avio_wb32 ( pb , duration ) ;
avio_wb32 ( pb , start_ct ) ;
}
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x00010000 ) ;
2011-05-02 20:25:28 +03:00
return size ;
2005-01-27 16:48:15 +02:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_tref_tag ( AVIOContext * pb , MOVTrack * track )
2010-05-05 11:41:10 +03:00
{
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 20 ) ; // size
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " tref " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 12 ) ; // size (subatom)
avio_wl32 ( pb , track - > tref_tag ) ;
avio_wb32 ( pb , track - > tref_id ) ;
2010-05-05 11:41:10 +03:00
return 20 ;
}
2005-01-27 16:48:15 +02:00
// goes at the end of each track! ... Critical for PSP playback ("Incompatible data" without it)
2011-02-20 12:04:12 +02:00
static int mov_write_uuid_tag_psp ( AVIOContext * pb , MOVTrack * mov )
2005-01-27 16:48:15 +02:00
{
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x34 ) ; /* size ... reports as 28 in mp4box! */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " uuid " ) ;
ffio_wfourcc ( pb , " USMT " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x21d24fce ) ;
avio_wb32 ( pb , 0xbb88695c ) ;
avio_wb32 ( pb , 0xfac9c740 ) ;
avio_wb32 ( pb , 0x1c ) ; // another size here!
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " MTDT " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x00010012 ) ;
avio_wb32 ( pb , 0x0a ) ;
avio_wb32 ( pb , 0x55c40000 ) ;
avio_wb32 ( pb , 0x1 ) ;
avio_wb32 ( pb , 0x0 ) ;
2005-01-27 16:48:15 +02:00
return 0x34 ;
}
2012-11-11 22:44:28 +03:00
static int mov_write_udta_sdp ( AVIOContext * pb , MOVTrack * track )
2010-05-18 22:47:24 +03:00
{
2012-11-11 22:44:28 +03:00
AVFormatContext * ctx = track - > rtp_ctx ;
2010-05-18 22:47:24 +03:00
char buf [ 1000 ] = " " ;
int len ;
2012-11-11 22:44:28 +03:00
ff_sdp_write_media ( buf , sizeof ( buf ) , ctx - > streams [ 0 ] , track - > src_track ,
NULL , NULL , 0 , 0 , ctx ) ;
av_strlcatf ( buf , sizeof ( buf ) , " a=control:streamid=%d \r \n " , track - > track_id ) ;
2010-05-18 22:47:24 +03:00
len = strlen ( buf ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , len + 24 ) ;
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " udta " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , len + 16 ) ;
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " hnti " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , len + 8 ) ;
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " sdp " ) ;
2011-02-21 20:28:17 +02:00
avio_write ( pb , buf , len ) ;
2010-05-18 22:47:24 +03:00
return len + 24 ;
}
2014-07-31 20:00:05 +03:00
static int mov_write_track_metadata ( AVIOContext * pb , AVStream * st ,
const char * tag , const char * str )
{
int64_t pos = avio_tell ( pb ) ;
AVDictionaryEntry * t = av_dict_get ( st - > metadata , str , NULL , 0 ) ;
if ( ! t | | ! utf8len ( t - > value ) )
return 0 ;
avio_wb32 ( pb , 0 ) ; /* size */
ffio_wfourcc ( pb , tag ) ; /* type */
avio_write ( pb , t - > value , strlen ( t - > value ) ) ; /* UTF8 string value */
return update_size ( pb , pos ) ;
}
static int mov_write_track_udta_tag ( AVIOContext * pb , MOVMuxContext * mov ,
AVStream * st )
{
AVIOContext * pb_buf ;
int ret , size ;
uint8_t * buf ;
2015-06-15 20:40:46 +02:00
if ( ! st )
2014-07-31 20:00:05 +03:00
return 0 ;
ret = avio_open_dyn_buf ( & pb_buf ) ;
if ( ret < 0 )
return ret ;
if ( mov - > mode & MODE_MP4 )
mov_write_track_metadata ( pb_buf , st , " name " , " title " ) ;
if ( ( size = avio_close_dyn_buf ( pb_buf , & buf ) ) > 0 ) {
avio_wb32 ( pb , size + 8 ) ;
ffio_wfourcc ( pb , " udta " ) ;
avio_write ( pb , buf , size ) ;
}
av_free ( buf ) ;
return 0 ;
}
2016-04-10 21:58:15 +02:00
static int mov_write_trak_tag ( AVFormatContext * s , AVIOContext * pb , MOVMuxContext * mov ,
2012-01-05 14:57:05 +03:00
MOVTrack * track , AVStream * st )
2003-08-26 23:23:13 +03:00
{
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
int entry_backup = track - > entry ;
2015-01-03 18:24:55 +02:00
int chunk_backup = track - > chunkCount ;
2015-01-25 15:32:22 +02:00
int ret ;
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
/* If we want to have an empty moov, but some samples already have been
* buffered ( delay_moov ) , pretend that no samples have been written yet . */
if ( mov - > flags & FF_MOV_FLAG_EMPTY_MOOV )
2015-01-03 18:24:55 +02:00
track - > chunkCount = track - > entry = 0 ;
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " trak " ) ;
2014-07-04 17:26:06 +03:00
mov_write_tkhd_tag ( pb , mov , track , st ) ;
2014-11-08 00:59:57 +02:00
av_assert2 ( mov - > use_editlist > = 0 ) ;
2015-01-03 02:38:58 +02:00
if ( track - > start_dts ! = AV_NOPTS_VALUE ) {
2014-11-04 16:28:48 +02:00
if ( mov - > use_editlist )
2014-11-08 12:41:29 +02:00
mov_write_edts_tag ( pb , mov , track ) ; // PSP Movies and several other cases require edts box
2014-11-04 16:28:48 +02:00
else if ( ( track - > entry & & track - > cluster [ 0 ] . dts ) | | track - > mode = = MODE_PSP | | is_clcp_track ( track ) )
av_log ( mov - > fc , AV_LOG_WARNING ,
" Not writing any edit list even though one would have been required \n " ) ;
2012-01-05 14:57:05 +03:00
}
2014-11-08 00:59:57 +02:00
2010-05-05 11:41:10 +03:00
if ( track - > tref_tag )
mov_write_tref_tag ( pb , track ) ;
2015-01-25 15:32:22 +02:00
2016-04-10 21:58:15 +02:00
if ( ( ret = mov_write_mdia_tag ( s , pb , mov , track ) ) < 0 )
2015-01-25 15:32:22 +02:00
return ret ;
2005-12-17 20:14:38 +02:00
if ( track - > mode = = MODE_PSP )
2013-07-07 14:59:47 +03:00
mov_write_uuid_tag_psp ( pb , track ) ; // PSP Movies require this uuid box
2010-05-18 22:47:24 +03:00
if ( track - > tag = = MKTAG ( ' r ' , ' t ' , ' p ' , ' ' ) )
2012-11-11 22:44:28 +03:00
mov_write_udta_sdp ( pb , track ) ;
2014-04-21 16:46:22 +03:00
if ( track - > mode = = MODE_MOV ) {
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > codec_type = = AVMEDIA_TYPE_VIDEO ) {
2014-04-21 16:46:22 +03:00
double sample_aspect_ratio = av_q2d ( st - > sample_aspect_ratio ) ;
2014-04-25 22:08:29 +03:00
if ( st - > sample_aspect_ratio . num & & 1.0 ! = sample_aspect_ratio ) {
2014-04-21 16:46:22 +03:00
mov_write_tapt_tag ( pb , track ) ;
}
}
2014-04-25 22:47:25 +03:00
if ( is_clcp_track ( track ) & & st - > sample_aspect_ratio . num ) {
2011-02-25 17:41:55 +02:00
mov_write_tapt_tag ( pb , track ) ;
2014-04-21 16:46:22 +03:00
}
2013-07-07 14:59:47 +03:00
}
2014-07-31 20:00:05 +03:00
mov_write_track_udta_tag ( pb , mov , st ) ;
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
track - > entry = entry_backup ;
2015-01-03 18:24:55 +02:00
track - > chunkCount = chunk_backup ;
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2003-08-26 23:23:13 +03:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_iods_tag ( AVIOContext * pb , MOVMuxContext * mov )
2003-08-26 23:23:13 +03:00
{
2011-10-04 21:44:25 +03:00
int i , has_audio = 0 , has_video = 0 ;
int64_t pos = avio_tell ( pb ) ;
int audio_profile = mov - > iods_audio_profile ;
int video_profile = mov - > iods_video_profile ;
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
2014-11-03 23:44:02 +02:00
if ( mov - > tracks [ i ] . entry > 0 | | mov - > flags & FF_MOV_FLAG_EMPTY_MOOV ) {
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
has_audio | = mov - > tracks [ i ] . par - > codec_type = = AVMEDIA_TYPE_AUDIO ;
has_video | = mov - > tracks [ i ] . par - > codec_type = = AVMEDIA_TYPE_VIDEO ;
2011-10-04 21:44:25 +03:00
}
}
if ( audio_profile < 0 )
audio_profile = 0xFF - has_audio ;
if ( video_profile < 0 )
video_profile = 0xFF - has_video ;
avio_wb32 ( pb , 0x0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " iods " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* version & flags */
2012-01-30 17:19:15 +03:00
put_descr ( pb , 0x10 , 7 ) ;
2011-10-04 21:44:25 +03:00
avio_wb16 ( pb , 0x004f ) ;
avio_w8 ( pb , 0xff ) ;
avio_w8 ( pb , 0xff ) ;
avio_w8 ( pb , audio_profile ) ;
avio_w8 ( pb , video_profile ) ;
avio_w8 ( pb , 0xff ) ;
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2003-08-26 23:23:13 +03:00
}
2012-01-05 14:57:05 +03:00
static int mov_write_trex_tag ( AVIOContext * pb , MOVTrack * track )
{
avio_wb32 ( pb , 0x20 ) ; /* size */
ffio_wfourcc ( pb , " trex " ) ;
avio_wb32 ( pb , 0 ) ; /* version & flags */
2012-01-30 17:19:15 +03:00
avio_wb32 ( pb , track - > track_id ) ; /* track ID */
2012-01-05 14:57:05 +03:00
avio_wb32 ( pb , 1 ) ; /* default sample description index */
avio_wb32 ( pb , 0 ) ; /* default sample duration */
avio_wb32 ( pb , 0 ) ; /* default sample size */
avio_wb32 ( pb , 0 ) ; /* default sample flags */
return 0 ;
}
static int mov_write_mvex_tag ( AVIOContext * pb , MOVMuxContext * mov )
{
int64_t pos = avio_tell ( pb ) ;
int i ;
avio_wb32 ( pb , 0x0 ) ; /* size */
ffio_wfourcc ( pb , " mvex " ) ;
for ( i = 0 ; i < mov - > nb_streams ; i + + )
mov_write_trex_tag ( pb , & mov - > tracks [ i ] ) ;
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2012-01-05 14:57:05 +03:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_mvhd_tag ( AVIOContext * pb , MOVMuxContext * mov )
2003-08-26 23:23:13 +03:00
{
2012-01-30 17:19:15 +03:00
int max_track_id = 1 , i ;
2015-01-10 05:50:50 +02:00
int64_t max_track_len = 0 ;
2006-03-26 16:34:51 +03:00
int version ;
2003-08-26 23:23:13 +03:00
2013-07-07 14:59:47 +03:00
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
2013-08-19 18:18:22 +03:00
if ( mov - > tracks [ i ] . entry > 0 & & mov - > tracks [ i ] . timescale ) {
2015-01-10 05:50:50 +02:00
int64_t max_track_len_temp = av_rescale_rnd ( mov - > tracks [ i ] . track_duration ,
2012-01-30 17:19:15 +03:00
MOV_TIMESCALE ,
mov - > tracks [ i ] . timescale ,
AV_ROUND_UP ) ;
if ( max_track_len < max_track_len_temp )
max_track_len = max_track_len_temp ;
if ( max_track_id < mov - > tracks [ i ] . track_id )
max_track_id = mov - > tracks [ i ] . track_id ;
2003-08-26 23:23:13 +03:00
}
}
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
/* If using delay_moov, make sure the output is the same as if no
* samples had been written yet . */
if ( mov - > flags & FF_MOV_FLAG_EMPTY_MOOV ) {
max_track_len = 0 ;
max_track_id = 1 ;
}
2006-03-26 16:34:51 +03:00
2012-01-30 17:19:15 +03:00
version = max_track_len < UINT32_MAX ? 0 : 1 ;
2015-01-10 04:43:54 +02:00
avio_wb32 ( pb , version = = 1 ? 120 : 108 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " mvhd " ) ;
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , version ) ;
avio_wb24 ( pb , 0 ) ; /* flags */
2006-03-26 16:34:51 +03:00
if ( version = = 1 ) {
2011-02-21 20:28:17 +02:00
avio_wb64 ( pb , mov - > time ) ;
avio_wb64 ( pb , mov - > time ) ;
2006-03-26 16:34:51 +03:00
} else {
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , mov - > time ) ; /* creation time */
avio_wb32 ( pb , mov - > time ) ; /* modification time */
2006-03-26 16:34:51 +03:00
}
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , MOV_TIMESCALE ) ;
2012-01-30 17:19:15 +03:00
( version = = 1 ) ? avio_wb64 ( pb , max_track_len ) : avio_wb32 ( pb , max_track_len ) ; /* duration of longest track */
2003-08-26 23:23:13 +03:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x00010000 ) ; /* reserved (preferred rate) 1.0 = normal */
avio_wb16 ( pb , 0x0100 ) ; /* reserved (preferred volume) 1.0 = normal */
avio_wb16 ( pb , 0 ) ; /* reserved */
avio_wb32 ( pb , 0 ) ; /* reserved */
avio_wb32 ( pb , 0 ) ; /* reserved */
2003-08-26 23:23:13 +03:00
/* Matrix structure */
2012-07-14 00:35:54 +03:00
write_matrix ( pb , 1 , 0 , 0 , 1 , 0 , 0 ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* reserved (preview time) */
avio_wb32 ( pb , 0 ) ; /* reserved (preview duration) */
avio_wb32 ( pb , 0 ) ; /* reserved (poster time) */
avio_wb32 ( pb , 0 ) ; /* reserved (selection time) */
avio_wb32 ( pb , 0 ) ; /* reserved (selection duration) */
avio_wb32 ( pb , 0 ) ; /* reserved (current time) */
2012-01-30 17:19:15 +03:00
avio_wb32 ( pb , max_track_id + 1 ) ; /* Next track id */
2003-08-26 23:23:13 +03:00
return 0x6c ;
}
2011-02-20 12:04:12 +02:00
static int mov_write_itunes_hdlr_tag ( AVIOContext * pb , MOVMuxContext * mov ,
2004-08-14 17:05:48 +03:00
AVFormatContext * s )
{
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 33 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " hdlr " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ;
avio_wb32 ( pb , 0 ) ;
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " mdir " ) ;
ffio_wfourcc ( pb , " appl " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ;
avio_wb32 ( pb , 0 ) ;
avio_w8 ( pb , 0 ) ;
2010-04-12 10:24:30 +03:00
return 33 ;
2004-08-14 17:05:48 +03:00
}
/* helper function to write a data tag with the specified string as data */
2011-02-20 12:04:12 +02:00
static int mov_write_string_data_tag ( AVIOContext * pb , const char * data , int lang , int long_style )
2004-08-14 17:05:48 +03:00
{
2013-07-07 14:59:47 +03:00
if ( long_style ) {
2010-04-12 10:24:30 +03:00
int size = 16 + strlen ( data ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , size ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " data " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 1 ) ;
avio_wb32 ( pb , 0 ) ;
avio_write ( pb , data , strlen ( data ) ) ;
2010-04-12 10:24:30 +03:00
return size ;
2013-07-07 14:59:47 +03:00
} else {
2010-03-09 03:53:16 +02:00
if ( ! lang )
lang = ff_mov_iso639_to_lang ( " und " , 1 ) ;
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , strlen ( data ) ) ; /* string length */
avio_wb16 ( pb , lang ) ;
avio_write ( pb , data , strlen ( data ) ) ;
2006-05-19 01:49:27 +03:00
return strlen ( data ) + 4 ;
2004-08-14 17:05:48 +03:00
}
}
2013-07-07 14:59:47 +03:00
static int mov_write_string_tag ( AVIOContext * pb , const char * name ,
const char * value , int lang , int long_style )
{
2004-08-14 17:05:48 +03:00
int size = 0 ;
2008-02-06 20:57:00 +02:00
if ( value & & value [ 0 ] ) {
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , name ) ;
2009-02-26 15:13:48 +02:00
mov_write_string_data_tag ( pb , value , lang , long_style ) ;
2012-01-30 17:19:15 +03:00
size = update_size ( pb , pos ) ;
2004-08-14 17:05:48 +03:00
}
return size ;
}
2011-02-20 12:04:12 +02:00
static int mov_write_string_metadata ( AVFormatContext * s , AVIOContext * pb ,
2009-02-26 15:06:49 +02:00
const char * name , const char * tag ,
int long_style )
2004-08-14 17:05:48 +03:00
{
2009-02-26 15:13:48 +02:00
int l , lang = 0 , len , len2 ;
2011-05-22 13:46:29 +03:00
AVDictionaryEntry * t , * t2 = NULL ;
2009-02-26 15:13:48 +02:00
char tag2 [ 16 ] ;
2009-02-26 15:06:49 +02:00
2011-05-22 13:46:29 +03:00
if ( ! ( t = av_dict_get ( s - > metadata , tag , NULL , 0 ) ) )
2006-05-19 01:49:27 +03:00
return 0 ;
2009-02-26 15:06:49 +02:00
2009-02-26 15:13:48 +02:00
len = strlen ( t - > key ) ;
snprintf ( tag2 , sizeof ( tag2 ) , " %s- " , tag ) ;
2011-05-22 13:46:29 +03:00
while ( ( t2 = av_dict_get ( s - > metadata , tag2 , t2 , AV_DICT_IGNORE_SUFFIX ) ) ) {
2009-02-26 15:13:48 +02:00
len2 = strlen ( t2 - > key ) ;
2013-07-07 14:59:47 +03:00
if ( len2 = = len + 4 & & ! strcmp ( t - > value , t2 - > value )
& & ( l = ff_mov_iso639_to_lang ( & t2 - > key [ len2 - 3 ] , 1 ) ) > = 0 ) {
2009-02-26 15:13:48 +02:00
lang = l ;
break ;
}
}
return mov_write_string_tag ( pb , name , t - > value , lang , long_style ) ;
2004-08-14 17:05:48 +03:00
}
2012-08-24 22:37:47 +03:00
/* iTunes bpm number */
static int mov_write_tmpo_tag ( AVIOContext * pb , AVFormatContext * s )
{
AVDictionaryEntry * t = av_dict_get ( s - > metadata , " tmpo " , NULL , 0 ) ;
int size = 0 , tmpo = t ? atoi ( t - > value ) : 0 ;
if ( tmpo ) {
size = 26 ;
avio_wb32 ( pb , size ) ;
ffio_wfourcc ( pb , " tmpo " ) ;
avio_wb32 ( pb , size - 8 ) ; /* size */
ffio_wfourcc ( pb , " data " ) ;
avio_wb32 ( pb , 0x15 ) ; //type specifier
avio_wb32 ( pb , 0 ) ;
avio_wb16 ( pb , tmpo ) ; // data
}
return size ;
}
2013-08-01 17:21:31 +03:00
/* iTunes track or disc number */
2011-02-20 12:04:12 +02:00
static int mov_write_trkn_tag ( AVIOContext * pb , MOVMuxContext * mov ,
2013-08-01 17:21:31 +03:00
AVFormatContext * s , int disc )
2004-08-14 17:05:48 +03:00
{
2013-08-01 17:21:31 +03:00
AVDictionaryEntry * t = av_dict_get ( s - > metadata ,
disc ? " disc " : " track " ,
NULL , 0 ) ;
2009-02-26 15:06:49 +02:00
int size = 0 , track = t ? atoi ( t - > value ) : 0 ;
if ( track ) {
2013-08-01 17:20:58 +03:00
int tracks = 0 ;
char * slash = strchr ( t - > value , ' / ' ) ;
if ( slash )
tracks = atoi ( slash + 1 ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 32 ) ; /* size */
2013-08-01 17:21:31 +03:00
ffio_wfourcc ( pb , disc ? " disk " : " trkn " ) ;
2013-07-07 14:59:47 +03:00
avio_wb32 ( pb , 24 ) ; /* size */
ffio_wfourcc ( pb , " data " ) ;
avio_wb32 ( pb , 0 ) ; // 8 bytes empty
avio_wb32 ( pb , 0 ) ;
avio_wb16 ( pb , 0 ) ; // empty
2013-08-01 17:21:31 +03:00
avio_wb16 ( pb , track ) ; // track / disc number
avio_wb16 ( pb , tracks ) ; // total track / disc number
2013-07-07 14:59:47 +03:00
avio_wb16 ( pb , 0 ) ; // empty
2010-04-12 10:24:30 +03:00
size = 32 ;
2004-08-14 17:05:48 +03:00
}
return size ;
}
2012-08-21 04:21:39 +03:00
static int mov_write_int8_metadata ( AVFormatContext * s , AVIOContext * pb ,
const char * name , const char * tag ,
int len )
{
AVDictionaryEntry * t = NULL ;
uint8_t num ;
2013-06-29 18:56:49 +03:00
int size = 24 + len ;
if ( len ! = 1 & & len ! = 4 )
return - 1 ;
2012-08-21 04:21:39 +03:00
if ( ! ( t = av_dict_get ( s - > metadata , tag , NULL , 0 ) ) )
return 0 ;
2012-11-04 02:56:57 +03:00
num = atoi ( t - > value ) ;
2012-08-21 04:21:39 +03:00
2013-06-29 18:56:49 +03:00
avio_wb32 ( pb , size ) ;
2012-08-21 04:21:39 +03:00
ffio_wfourcc ( pb , name ) ;
2013-06-29 18:56:49 +03:00
avio_wb32 ( pb , size - 8 ) ;
ffio_wfourcc ( pb , " data " ) ;
avio_wb32 ( pb , 0x15 ) ;
avio_wb32 ( pb , 0 ) ;
2012-08-21 04:21:39 +03:00
if ( len = = 4 ) avio_wb32 ( pb , num ) ;
else avio_w8 ( pb , num ) ;
2013-06-29 18:56:49 +03:00
return size ;
2012-08-21 04:21:39 +03:00
}
2004-08-14 17:05:48 +03:00
/* iTunes meta data list */
2011-02-20 12:04:12 +02:00
static int mov_write_ilst_tag ( AVIOContext * pb , MOVMuxContext * mov ,
2004-08-14 17:05:48 +03:00
AVFormatContext * s )
{
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " ilst " ) ;
2009-02-26 15:06:49 +02:00
mov_write_string_metadata ( s , pb , " \251 nam " , " title " , 1 ) ;
2010-05-24 00:03:14 +03:00
mov_write_string_metadata ( s , pb , " \251 ART " , " artist " , 1 ) ;
2010-01-04 04:52:40 +02:00
mov_write_string_metadata ( s , pb , " aART " , " album_artist " , 1 ) ;
2009-11-23 10:47:40 +02:00
mov_write_string_metadata ( s , pb , " \251 wrt " , " composer " , 1 ) ;
2009-02-26 15:06:49 +02:00
mov_write_string_metadata ( s , pb , " \251 alb " , " album " , 1 ) ;
2010-02-01 13:39:10 +02:00
mov_write_string_metadata ( s , pb , " \251 day " , " date " , 1 ) ;
2015-06-15 20:40:46 +02:00
if ( ! mov_write_string_metadata ( s , pb , " \251 too " , " encoding_tool " , 1 ) ) {
if ( ! ( s - > flags & AVFMT_FLAG_BITEXACT ) )
mov_write_string_tag ( pb , " \251 too " , LIBAVFORMAT_IDENT , 0 , 1 ) ;
}
2009-02-26 15:06:49 +02:00
mov_write_string_metadata ( s , pb , " \251 cmt " , " comment " , 1 ) ;
mov_write_string_metadata ( s , pb , " \251 gen " , " genre " , 1 ) ;
mov_write_string_metadata ( s , pb , " \251 cpy " , " copyright " , 1 ) ;
2010-01-04 04:52:40 +02:00
mov_write_string_metadata ( s , pb , " \251 grp " , " grouping " , 1 ) ;
mov_write_string_metadata ( s , pb , " \251 lyr " , " lyrics " , 1 ) ;
2009-11-23 10:47:44 +02:00
mov_write_string_metadata ( s , pb , " desc " , " description " , 1 ) ;
mov_write_string_metadata ( s , pb , " ldes " , " synopsis " , 1 ) ;
mov_write_string_metadata ( s , pb , " tvsh " , " show " , 1 ) ;
mov_write_string_metadata ( s , pb , " tven " , " episode_id " , 1 ) ;
mov_write_string_metadata ( s , pb , " tvnn " , " network " , 1 ) ;
2012-08-21 04:21:39 +03:00
mov_write_int8_metadata ( s , pb , " tves " , " episode_sort " , 4 ) ;
mov_write_int8_metadata ( s , pb , " tvsn " , " season_number " , 4 ) ;
mov_write_int8_metadata ( s , pb , " stik " , " media_type " , 1 ) ;
mov_write_int8_metadata ( s , pb , " hdvd " , " hd_video " , 1 ) ;
mov_write_int8_metadata ( s , pb , " pgap " , " gapless_playback " , 1 ) ;
2014-03-15 04:26:14 +03:00
mov_write_int8_metadata ( s , pb , " cpil " , " compilation " , 1 ) ;
2013-08-01 17:21:31 +03:00
mov_write_trkn_tag ( pb , mov , s , 0 ) ; // track number
mov_write_trkn_tag ( pb , mov , s , 1 ) ; // disc number
2012-08-24 22:37:47 +03:00
mov_write_tmpo_tag ( pb , s ) ;
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2004-08-14 17:05:48 +03:00
}
/* iTunes meta data tag */
2011-02-20 12:04:12 +02:00
static int mov_write_meta_tag ( AVIOContext * pb , MOVMuxContext * mov ,
2004-08-14 17:05:48 +03:00
AVFormatContext * s )
{
int size = 0 ;
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " meta " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ;
2009-02-26 15:08:22 +02:00
mov_write_itunes_hdlr_tag ( pb , mov , s ) ;
mov_write_ilst_tag ( pb , mov , s ) ;
2012-01-30 17:19:15 +03:00
size = update_size ( pb , pos ) ;
2004-08-14 17:05:48 +03:00
return size ;
}
2005-12-17 20:14:38 +02:00
2014-08-06 17:48:21 +03:00
static int mov_write_raw_metadata_tag ( AVFormatContext * s , AVIOContext * pb ,
const char * name , const char * key )
{
int len ;
AVDictionaryEntry * t ;
if ( ! ( t = av_dict_get ( s - > metadata , key , NULL , 0 ) ) )
return 0 ;
len = strlen ( t - > value ) ;
if ( len > 0 ) {
int size = len + 8 ;
avio_wb32 ( pb , size ) ;
ffio_wfourcc ( pb , name ) ;
avio_write ( pb , t - > value , len ) ;
return size ;
}
return 0 ;
}
2011-02-20 12:04:12 +02:00
static int ascii_to_wc ( AVIOContext * pb , const uint8_t * b )
2008-06-11 12:35:02 +03:00
{
int val ;
2013-07-07 14:59:47 +03:00
while ( * b ) {
2008-06-11 12:35:02 +03:00
GET_UTF8 ( val , * b + + , return - 1 ; )
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , val ) ;
2008-06-11 12:35:02 +03:00
}
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , 0x00 ) ;
2008-06-11 12:35:02 +03:00
return 0 ;
}
static uint16_t language_code ( const char * str )
{
2013-07-07 14:59:47 +03:00
return ( ( ( str [ 0 ] - 0x60 ) & 0x1F ) < < 10 ) +
( ( ( str [ 1 ] - 0x60 ) & 0x1F ) < < 5 ) +
( ( str [ 2 ] - 0x60 ) & 0x1F ) ;
2008-06-11 12:35:02 +03:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_3gp_udta_tag ( AVIOContext * pb , AVFormatContext * s ,
2008-06-11 12:40:22 +03:00
const char * tag , const char * str )
{
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2011-05-22 13:46:29 +03:00
AVDictionaryEntry * t = av_dict_get ( s - > metadata , str , NULL , 0 ) ;
2009-02-26 15:06:49 +02:00
if ( ! t | | ! utf8len ( t - > value ) )
2008-06-11 12:40:22 +03:00
return 0 ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , tag ) ; /* type */
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* version + flags */
2008-06-11 12:40:22 +03:00
if ( ! strcmp ( tag , " yrrc " ) )
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , atoi ( t - > value ) ) ;
2008-06-11 12:40:22 +03:00
else {
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , language_code ( " eng " ) ) ; /* language */
2013-07-07 14:59:47 +03:00
avio_write ( pb , t - > value , strlen ( t - > value ) + 1 ) ; /* UTF8 string value */
2009-02-26 15:06:49 +02:00
if ( ! strcmp ( tag , " albm " ) & &
2011-05-22 13:46:29 +03:00
( t = av_dict_get ( s - > metadata , " track " , NULL , 0 ) ) )
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , atoi ( t - > value ) ) ;
2008-06-11 12:40:22 +03:00
}
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2008-06-11 12:40:22 +03:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_chpl_tag ( AVIOContext * pb , AVFormatContext * s )
2010-04-21 09:36:05 +03:00
{
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2010-04-21 09:36:05 +03:00
int i , nb_chapters = FFMIN ( s - > nb_chapters , 255 ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; // size
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " chpl " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x01000000 ) ; // version + flags
avio_wb32 ( pb , 0 ) ; // unknown
avio_w8 ( pb , nb_chapters ) ;
2010-04-21 09:36:05 +03:00
for ( i = 0 ; i < nb_chapters ; i + + ) {
AVChapter * c = s - > chapters [ i ] ;
2011-05-22 13:46:29 +03:00
AVDictionaryEntry * t ;
2011-02-21 20:28:17 +02:00
avio_wb64 ( pb , av_rescale_q ( c - > start , c - > time_base , ( AVRational ) { 1 , 10000000 } ) ) ;
2010-04-21 09:36:05 +03:00
2011-05-22 13:46:29 +03:00
if ( ( t = av_dict_get ( c - > metadata , " title " , NULL , 0 ) ) ) {
2010-04-21 09:36:05 +03:00
int len = FFMIN ( strlen ( t - > value ) , 255 ) ;
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , len ) ;
avio_write ( pb , t - > value , len ) ;
2010-04-21 09:36:05 +03:00
} else
2011-02-21 20:28:17 +02:00
avio_w8 ( pb , 0 ) ;
2010-04-21 09:36:05 +03:00
}
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2010-04-21 09:36:05 +03:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_udta_tag ( AVIOContext * pb , MOVMuxContext * mov ,
2004-02-14 21:08:09 +02:00
AVFormatContext * s )
{
2011-02-20 12:04:12 +02:00
AVIOContext * pb_buf ;
2014-03-16 18:26:04 +03:00
int ret , size ;
2009-02-26 15:06:49 +02:00
uint8_t * buf ;
2007-10-24 17:20:15 +03:00
2011-03-17 09:13:34 +02:00
ret = avio_open_dyn_buf ( & pb_buf ) ;
2013-07-07 14:59:47 +03:00
if ( ret < 0 )
2009-02-26 15:06:49 +02:00
return ret ;
2004-02-14 21:08:09 +02:00
2012-01-14 16:10:17 +03:00
if ( mov - > mode & MODE_3GP ) {
mov_write_3gp_udta_tag ( pb_buf , s , " perf " , " artist " ) ;
mov_write_3gp_udta_tag ( pb_buf , s , " titl " , " title " ) ;
mov_write_3gp_udta_tag ( pb_buf , s , " auth " , " author " ) ;
mov_write_3gp_udta_tag ( pb_buf , s , " gnre " , " genre " ) ;
mov_write_3gp_udta_tag ( pb_buf , s , " dscp " , " comment " ) ;
mov_write_3gp_udta_tag ( pb_buf , s , " albm " , " album " ) ;
mov_write_3gp_udta_tag ( pb_buf , s , " cprt " , " copyright " ) ;
mov_write_3gp_udta_tag ( pb_buf , s , " yrrc " , " date " ) ;
} else if ( mov - > mode = = MODE_MOV ) { // the title field breaks gtkpod with mp4 and my suspicion is that stuff is not valid in mp4
2013-07-07 14:59:47 +03:00
mov_write_string_metadata ( s , pb_buf , " \251 ART " , " artist " , 0 ) ;
mov_write_string_metadata ( s , pb_buf , " \251 nam " , " title " , 0 ) ;
mov_write_string_metadata ( s , pb_buf , " \251 aut " , " author " , 0 ) ;
mov_write_string_metadata ( s , pb_buf , " \251 alb " , " album " , 0 ) ;
mov_write_string_metadata ( s , pb_buf , " \251 day " , " date " , 0 ) ;
mov_write_string_metadata ( s , pb_buf , " \251 swr " , " encoder " , 0 ) ;
2012-07-09 12:40:12 +03:00
// currently ignored by mov.c
2013-07-07 14:59:47 +03:00
mov_write_string_metadata ( s , pb_buf , " \251 des " , " comment " , 0 ) ;
2012-07-09 12:40:12 +03:00
// add support for libquicktime, this atom is also actually read by mov.c
2013-07-08 05:08:07 +03:00
mov_write_string_metadata ( s , pb_buf , " \251 cmt " , " comment " , 0 ) ;
2013-07-07 14:59:47 +03:00
mov_write_string_metadata ( s , pb_buf , " \251 gen " , " genre " , 0 ) ;
mov_write_string_metadata ( s , pb_buf , " \251 cpy " , " copyright " , 0 ) ;
2014-11-22 00:53:42 +02:00
mov_write_string_metadata ( s , pb_buf , " \251 mak " , " make " , 0 ) ;
mov_write_string_metadata ( s , pb_buf , " \251 mod " , " model " , 0 ) ;
2015-05-30 15:32:40 +02:00
mov_write_string_metadata ( s , pb_buf , " \251 xyz " , " location " , 0 ) ;
2014-08-06 17:48:21 +03:00
mov_write_raw_metadata_tag ( s , pb_buf , " XMP_ " , " xmp " ) ;
2012-01-14 16:10:17 +03:00
} else {
/* iTunes meta data */
mov_write_meta_tag ( pb_buf , mov , s ) ;
}
2009-02-26 15:06:49 +02:00
2014-08-04 23:13:44 +03:00
if ( s - > nb_chapters & & ! ( mov - > flags & FF_MOV_FLAG_DISABLE_CHPL ) )
2012-01-14 16:10:17 +03:00
mov_write_chpl_tag ( pb_buf , s ) ;
2010-04-21 09:36:05 +03:00
2011-03-17 09:16:07 +02:00
if ( ( size = avio_close_dyn_buf ( pb_buf , & buf ) ) > 0 ) {
2013-07-07 14:59:47 +03:00
avio_wb32 ( pb , size + 8 ) ;
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " udta " ) ;
2011-02-21 20:28:17 +02:00
avio_write ( pb , buf , size ) ;
2007-10-24 17:20:15 +03:00
}
2010-07-20 08:23:28 +03:00
av_free ( buf ) ;
2007-10-24 17:20:15 +03:00
return 0 ;
2004-02-14 21:08:09 +02:00
}
2011-02-20 12:04:12 +02:00
static void mov_write_psp_udta_tag ( AVIOContext * pb ,
2013-07-07 14:59:47 +03:00
const char * str , const char * lang , int type )
2008-06-11 12:02:01 +03:00
{
2013-07-07 14:59:47 +03:00
int len = utf8len ( str ) + 1 ;
if ( len < = 0 )
2008-06-11 12:06:27 +03:00
return ;
2013-07-07 14:59:47 +03:00
avio_wb16 ( pb , len * 2 + 10 ) ; /* size */
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , type ) ; /* type */
avio_wb16 ( pb , language_code ( lang ) ) ; /* language */
avio_wb16 ( pb , 0x01 ) ; /* ? */
2008-06-11 12:02:01 +03:00
ascii_to_wc ( pb , str ) ;
}
2011-02-20 12:04:12 +02:00
static int mov_write_uuidusmt_tag ( AVIOContext * pb , AVFormatContext * s )
2006-01-24 10:03:42 +02:00
{
2011-05-22 13:46:29 +03:00
AVDictionaryEntry * title = av_dict_get ( s - > metadata , " title " , NULL , 0 ) ;
2008-10-03 13:16:29 +03:00
int64_t pos , pos2 ;
2006-01-24 10:03:42 +02:00
2009-02-26 15:06:49 +02:00
if ( title ) {
2011-03-03 21:11:45 +02:00
pos = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size placeholder*/
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " uuid " ) ;
ffio_wfourcc ( pb , " USMT " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x21d24fce ) ; /* 96 bit UUID */
avio_wb32 ( pb , 0xbb88695c ) ;
avio_wb32 ( pb , 0xfac9c740 ) ;
2006-01-24 10:03:42 +02:00
2011-03-03 21:11:45 +02:00
pos2 = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size placeholder*/
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " MTDT " ) ;
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , 4 ) ;
2006-01-24 10:03:42 +02:00
2006-05-19 04:53:59 +03:00
// ?
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , 0x0C ) ; /* size */
avio_wb32 ( pb , 0x0B ) ; /* type */
avio_wb16 ( pb , language_code ( " und " ) ) ; /* language */
avio_wb16 ( pb , 0x0 ) ; /* ? */
avio_wb16 ( pb , 0x021C ) ; /* data */
2006-05-19 04:53:59 +03:00
2015-07-18 13:17:21 +02:00
if ( ! ( s - > flags & AVFMT_FLAG_BITEXACT ) )
2014-03-16 18:32:03 +03:00
mov_write_psp_udta_tag ( pb , LIBAVCODEC_IDENT , " eng " , 0x04 ) ;
2009-02-26 15:06:49 +02:00
mov_write_psp_udta_tag ( pb , title - > value , " eng " , 0x01 ) ;
2008-06-11 12:06:27 +03:00
mov_write_psp_udta_tag ( pb , " 2006/04/01 11:11:11 " , " und " , 0x03 ) ;
2012-01-30 17:19:15 +03:00
update_size ( pb , pos2 ) ;
return update_size ( pb , pos ) ;
2006-01-24 10:03:42 +02:00
}
2008-06-11 12:06:27 +03:00
return 0 ;
2006-01-24 10:03:42 +02:00
}
2011-11-29 06:03:22 +03:00
static void build_chunks ( MOVTrack * trk )
{
int i ;
2013-07-08 05:08:07 +03:00
MOVIentry * chunk = & trk - > cluster [ 0 ] ;
2011-11-29 18:50:25 +03:00
uint64_t chunkSize = chunk - > size ;
2013-07-08 05:08:07 +03:00
chunk - > chunkNum = 1 ;
2012-02-23 04:33:21 +03:00
if ( trk - > chunkCount )
return ;
2013-07-08 05:08:07 +03:00
trk - > chunkCount = 1 ;
for ( i = 1 ; i < trk - > entry ; i + + ) {
if ( chunk - > pos + chunkSize = = trk - > cluster [ i ] . pos & &
2012-01-23 22:52:37 +03:00
chunkSize + trk - > cluster [ i ] . size < ( 1 < < 20 ) ) {
2011-11-29 18:50:25 +03:00
chunkSize + = trk - > cluster [ i ] . size ;
2012-01-31 03:14:58 +03:00
chunk - > samples_in_chunk + = trk - > cluster [ i ] . entries ;
2013-07-08 05:08:07 +03:00
} else {
2011-11-29 06:03:22 +03:00
trk - > cluster [ i ] . chunkNum = chunk - > chunkNum + 1 ;
chunk = & trk - > cluster [ i ] ;
2011-11-29 18:50:25 +03:00
chunkSize = chunk - > size ;
2011-11-29 06:03:22 +03:00
trk - > chunkCount + + ;
}
}
}
2011-02-20 12:04:12 +02:00
static int mov_write_moov_tag ( AVIOContext * pb , MOVMuxContext * mov ,
2004-02-14 21:08:09 +02:00
AVFormatContext * s )
2003-08-26 23:23:13 +03:00
{
2006-01-23 16:12:03 +02:00
int i ;
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size placeholder*/
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " moov " ) ;
2003-08-26 23:23:13 +03:00
2013-07-07 14:59:47 +03:00
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
2012-01-05 14:57:05 +03:00
if ( mov - > tracks [ i ] . entry < = 0 & & ! ( mov - > flags & FF_MOV_FLAG_FRAGMENT ) )
continue ;
2003-11-03 23:51:07 +02:00
2013-07-07 14:59:47 +03:00
mov - > tracks [ i ] . time = mov - > time ;
mov - > tracks [ i ] . track_id = i + 1 ;
2011-11-29 06:03:22 +03:00
2012-02-12 14:05:43 +03:00
if ( mov - > tracks [ i ] . entry )
build_chunks ( & mov - > tracks [ i ] ) ;
2003-08-26 23:23:13 +03:00
}
2010-05-05 11:41:10 +03:00
if ( mov - > chapter_track )
2013-07-07 14:59:47 +03:00
for ( i = 0 ; i < s - > nb_streams ; i + + ) {
2010-05-05 11:41:10 +03:00
mov - > tracks [ i ] . tref_tag = MKTAG ( ' c ' , ' h ' , ' a ' , ' p ' ) ;
2013-07-07 14:59:47 +03:00
mov - > tracks [ i ] . tref_id = mov - > tracks [ mov - > chapter_track ] . track_id ;
2010-05-05 11:41:10 +03:00
}
2010-05-18 22:47:24 +03:00
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
2015-11-05 19:49:04 +02:00
MOVTrack * track = & mov - > tracks [ i ] ;
if ( track - > tag = = MKTAG ( ' r ' , ' t ' , ' p ' , ' ' ) ) {
track - > tref_tag = MKTAG ( ' h ' , ' i ' , ' n ' , ' t ' ) ;
track - > tref_id = mov - > tracks [ track - > src_track ] . track_id ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( track - > par - > codec_type = = AVMEDIA_TYPE_AUDIO ) {
2015-11-05 19:49:04 +02:00
int * fallback , size ;
fallback = ( int * ) av_stream_get_side_data ( track - > st ,
AV_PKT_DATA_FALLBACK_TRACK ,
& size ) ;
if ( fallback ! = NULL & & size = = sizeof ( int ) ) {
if ( * fallback > = 0 & & * fallback < mov - > nb_streams ) {
track - > tref_tag = MKTAG ( ' f ' , ' a ' , ' l ' , ' l ' ) ;
track - > tref_id = mov - > tracks [ * fallback ] . track_id ;
}
}
2010-05-18 22:47:24 +03:00
}
}
2012-03-05 10:51:08 +03:00
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
if ( mov - > tracks [ i ] . tag = = MKTAG ( ' t ' , ' m ' , ' c ' , ' d ' ) ) {
int src_trk = mov - > tracks [ i ] . src_track ;
mov - > tracks [ src_trk ] . tref_tag = mov - > tracks [ i ] . tag ;
mov - > tracks [ src_trk ] . tref_id = mov - > tracks [ i ] . track_id ;
2014-03-12 14:52:27 +03:00
//src_trk may have a different timescale than the tmcd track
mov - > tracks [ i ] . track_duration = av_rescale ( mov - > tracks [ src_trk ] . track_duration ,
mov - > tracks [ i ] . timescale ,
mov - > tracks [ src_trk ] . timescale ) ;
2012-03-05 10:51:08 +03:00
}
}
2010-05-05 11:41:10 +03:00
2003-09-10 02:03:04 +03:00
mov_write_mvhd_tag ( pb , mov ) ;
2011-10-04 21:44:25 +03:00
if ( mov - > mode ! = MODE_MOV & & ! mov - > iods_skip )
mov_write_iods_tag ( pb , mov ) ;
2013-07-07 14:59:47 +03:00
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
2012-01-05 14:57:05 +03:00
if ( mov - > tracks [ i ] . entry > 0 | | mov - > flags & FF_MOV_FLAG_FRAGMENT ) {
2016-04-10 21:58:15 +02:00
int ret = mov_write_trak_tag ( s , pb , mov , & ( mov - > tracks [ i ] ) , i < s - > nb_streams ? s - > streams [ i ] : NULL ) ;
2015-01-25 15:32:22 +02:00
if ( ret < 0 )
return ret ;
2003-08-26 23:23:13 +03:00
}
}
2012-01-05 14:57:05 +03:00
if ( mov - > flags & FF_MOV_FLAG_FRAGMENT )
mov_write_mvex_tag ( pb , mov ) ; /* QuickTime requires trak to precede this */
2011-12-06 18:15:35 +03:00
2006-01-24 10:03:42 +02:00
if ( mov - > mode = = MODE_PSP )
mov_write_uuidusmt_tag ( pb , s ) ;
2008-06-11 12:40:22 +03:00
else
2006-11-01 23:09:14 +02:00
mov_write_udta_tag ( pb , mov , s ) ;
2004-02-14 21:08:09 +02:00
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2003-08-26 23:23:13 +03:00
}
2012-01-20 14:02:18 +03:00
static void param_write_int ( AVIOContext * pb , const char * name , int value )
{
avio_printf ( pb , " <param name= \" %s \" value= \" %d \" valuetype= \" data \" /> \n " , name , value ) ;
}
static void param_write_string ( AVIOContext * pb , const char * name , const char * value )
{
avio_printf ( pb , " <param name= \" %s \" value= \" %s \" valuetype= \" data \" /> \n " , name , value ) ;
}
static void param_write_hex ( AVIOContext * pb , const char * name , const uint8_t * value , int len )
{
char buf [ 150 ] ;
2013-07-07 14:59:47 +03:00
len = FFMIN ( sizeof ( buf ) / 2 - 1 , len ) ;
2012-01-20 14:02:18 +03:00
ff_data_to_hex ( buf , value , len , 0 ) ;
2013-07-07 14:59:47 +03:00
buf [ 2 * len ] = ' \0 ' ;
2012-01-20 14:02:18 +03:00
avio_printf ( pb , " <param name= \" %s \" value= \" %s \" valuetype= \" data \" /> \n " , name , buf ) ;
}
static int mov_write_isml_manifest ( AVIOContext * pb , MOVMuxContext * mov )
{
int64_t pos = avio_tell ( pb ) ;
int i ;
2013-08-02 22:09:54 +03:00
static const uint8_t uuid [ ] = {
2012-01-20 14:02:18 +03:00
0xa5 , 0xd4 , 0x0b , 0x30 , 0xe8 , 0x14 , 0x11 , 0xdd ,
0xba , 0x2f , 0x08 , 0x00 , 0x20 , 0x0c , 0x9a , 0x66
} ;
avio_wb32 ( pb , 0 ) ;
ffio_wfourcc ( pb , " uuid " ) ;
avio_write ( pb , uuid , sizeof ( uuid ) ) ;
avio_wb32 ( pb , 0 ) ;
avio_printf ( pb , " <?xml version= \" 1.0 \" encoding= \" utf-8 \" ?> \n " ) ;
avio_printf ( pb , " <smil xmlns= \" http://www.w3.org/2001/SMIL20/Language \" > \n " ) ;
avio_printf ( pb , " <head> \n " ) ;
2015-07-18 13:17:21 +02:00
if ( ! ( mov - > fc - > flags & AVFMT_FLAG_BITEXACT ) )
2014-03-16 18:34:46 +03:00
avio_printf ( pb , " <meta name= \" creator \" content= \" %s \" /> \n " ,
2012-01-20 14:02:18 +03:00
LIBAVFORMAT_IDENT ) ;
avio_printf ( pb , " </head> \n " ) ;
avio_printf ( pb , " <body> \n " ) ;
avio_printf ( pb , " <switch> \n " ) ;
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
MOVTrack * track = & mov - > tracks [ i ] ;
const char * type ;
2012-01-30 17:19:15 +03:00
/* track->track_id is initialized in write_moov, and thus isn't known
2012-01-20 14:02:18 +03:00
* here yet */
int track_id = i + 1 ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > codec_type = = AVMEDIA_TYPE_VIDEO ) {
2012-01-20 14:02:18 +03:00
type = " video " ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( track - > par - > codec_type = = AVMEDIA_TYPE_AUDIO ) {
2012-01-20 14:02:18 +03:00
type = " audio " ;
} else {
continue ;
}
2015-09-15 18:01:32 +02:00
avio_printf ( pb , " <%s systemBitrate= \" % " PRId64 " \" > \n " , type ,
2016-04-10 21:58:15 +02:00
( int64_t ) track - > par - > bit_rate ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
param_write_int ( pb , " systemBitrate " , track - > par - > bit_rate ) ;
2012-01-20 14:02:18 +03:00
param_write_int ( pb , " trackID " , track_id ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > codec_type = = AVMEDIA_TYPE_VIDEO ) {
if ( track - > par - > codec_id = = AV_CODEC_ID_H264 ) {
2012-03-22 14:25:58 +03:00
uint8_t * ptr ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
int size = track - > par - > extradata_size ;
if ( ! ff_avc_write_annexb_extradata ( track - > par - > extradata , & ptr ,
2012-03-22 14:25:58 +03:00
& size ) ) {
param_write_hex ( pb , " CodecPrivateData " ,
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
ptr ? ptr : track - > par - > extradata ,
2012-03-22 14:25:58 +03:00
size ) ;
av_free ( ptr ) ;
}
2012-01-20 14:02:18 +03:00
param_write_string ( pb , " FourCC " , " H264 " ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( track - > par - > codec_id = = AV_CODEC_ID_VC1 ) {
2012-01-20 14:02:18 +03:00
param_write_string ( pb , " FourCC " , " WVC1 " ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
param_write_hex ( pb , " CodecPrivateData " , track - > par - > extradata ,
track - > par - > extradata_size ) ;
2012-01-20 14:02:18 +03:00
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
param_write_int ( pb , " MaxWidth " , track - > par - > width ) ;
param_write_int ( pb , " MaxHeight " , track - > par - > height ) ;
param_write_int ( pb , " DisplayWidth " , track - > par - > width ) ;
param_write_int ( pb , " DisplayHeight " , track - > par - > height ) ;
2012-01-20 14:02:18 +03:00
} else {
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( track - > par - > codec_id = = AV_CODEC_ID_AAC ) {
2016-04-10 21:58:15 +02:00
switch ( track - > par - > profile )
2015-12-01 13:22:38 +02:00
{
case FF_PROFILE_AAC_HE_V2 :
param_write_string ( pb , " FourCC " , " AACP " ) ;
break ;
case FF_PROFILE_AAC_HE :
param_write_string ( pb , " FourCC " , " AACH " ) ;
break ;
default :
param_write_string ( pb , " FourCC " , " AACL " ) ;
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( track - > par - > codec_id = = AV_CODEC_ID_WMAPRO ) {
2012-01-20 14:02:18 +03:00
param_write_string ( pb , " FourCC " , " WMAP " ) ;
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
param_write_hex ( pb , " CodecPrivateData " , track - > par - > extradata ,
track - > par - > extradata_size ) ;
2012-01-20 14:02:18 +03:00
param_write_int ( pb , " AudioTag " , ff_codec_get_tag ( ff_codec_wav_tags ,
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
track - > par - > codec_id ) ) ;
param_write_int ( pb , " Channels " , track - > par - > channels ) ;
param_write_int ( pb , " SamplingRate " , track - > par - > sample_rate ) ;
2012-01-20 14:02:18 +03:00
param_write_int ( pb , " BitsPerSample " , 16 ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
param_write_int ( pb , " PacketSize " , track - > par - > block_align ?
track - > par - > block_align : 4 ) ;
2012-01-20 14:02:18 +03:00
}
avio_printf ( pb , " </%s> \n " , type ) ;
}
avio_printf ( pb , " </switch> \n " ) ;
avio_printf ( pb , " </body> \n " ) ;
avio_printf ( pb , " </smil> \n " ) ;
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2012-01-20 14:02:18 +03:00
}
2011-12-06 18:15:35 +03:00
static int mov_write_mfhd_tag ( AVIOContext * pb , MOVMuxContext * mov )
{
2012-01-05 14:57:05 +03:00
avio_wb32 ( pb , 16 ) ;
2011-12-06 18:15:35 +03:00
ffio_wfourcc ( pb , " mfhd " ) ;
avio_wb32 ( pb , 0 ) ;
2012-01-05 14:57:05 +03:00
avio_wb32 ( pb , mov - > fragments ) ;
return 0 ;
}
2015-03-06 11:22:35 +02:00
static uint32_t get_sample_flags ( MOVTrack * track , MOVIentry * entry )
{
return entry - > flags & MOV_SYNC_SAMPLE ? MOV_FRAG_SAMPLE_FLAG_DEPENDS_NO :
( MOV_FRAG_SAMPLE_FLAG_DEPENDS_YES | MOV_FRAG_SAMPLE_FLAG_IS_NON_SYNC ) ;
}
2013-09-09 15:04:57 +03:00
static int mov_write_tfhd_tag ( AVIOContext * pb , MOVMuxContext * mov ,
MOVTrack * track , int64_t moof_offset )
2012-01-05 14:57:05 +03:00
{
int64_t pos = avio_tell ( pb ) ;
2012-02-16 22:48:30 +03:00
uint32_t flags = MOV_TFHD_DEFAULT_SIZE | MOV_TFHD_DEFAULT_DURATION |
MOV_TFHD_BASE_DATA_OFFSET ;
2012-01-05 14:57:05 +03:00
if ( ! track - > entry ) {
2012-02-16 22:48:30 +03:00
flags | = MOV_TFHD_DURATION_IS_EMPTY ;
2012-01-05 14:57:05 +03:00
} else {
2012-02-16 22:48:30 +03:00
flags | = MOV_TFHD_DEFAULT_FLAGS ;
2012-01-05 14:57:05 +03:00
}
2013-09-09 15:04:57 +03:00
if ( mov - > flags & FF_MOV_FLAG_OMIT_TFHD_OFFSET )
flags & = ~ MOV_TFHD_BASE_DATA_OFFSET ;
2014-10-29 11:53:21 +02:00
if ( mov - > flags & FF_MOV_FLAG_DEFAULT_BASE_MOOF ) {
flags & = ~ MOV_TFHD_BASE_DATA_OFFSET ;
flags | = MOV_TFHD_DEFAULT_BASE_IS_MOOF ;
}
2011-12-06 18:15:35 +03:00
2012-01-31 16:23:26 +03:00
/* Don't set a default sample size, the silverlight player refuses
* to play files with that set . Don ' t set a default sample duration ,
2013-03-24 08:15:31 +03:00
* WMP freaks out if it is set . Don ' t set a base data offset , PIFF
* file format says it MUST NOT be set . */
2012-01-09 18:58:26 +03:00
if ( track - > mode = = MODE_ISM )
2013-03-24 08:15:31 +03:00
flags & = ~ ( MOV_TFHD_DEFAULT_SIZE | MOV_TFHD_DEFAULT_DURATION |
MOV_TFHD_BASE_DATA_OFFSET ) ;
2012-01-09 18:58:26 +03:00
2012-01-05 14:57:05 +03:00
avio_wb32 ( pb , 0 ) ; /* size placeholder */
ffio_wfourcc ( pb , " tfhd " ) ;
avio_w8 ( pb , 0 ) ; /* version */
avio_wb24 ( pb , flags ) ;
2012-01-30 17:19:15 +03:00
avio_wb32 ( pb , track - > track_id ) ; /* track-id */
2012-02-16 22:48:30 +03:00
if ( flags & MOV_TFHD_BASE_DATA_OFFSET )
2012-01-05 14:57:05 +03:00
avio_wb64 ( pb , moof_offset ) ;
2012-02-16 22:48:30 +03:00
if ( flags & MOV_TFHD_DEFAULT_DURATION ) {
2012-02-27 01:21:07 +03:00
track - > default_duration = get_cluster_duration ( track , 0 ) ;
2012-01-05 14:57:05 +03:00
avio_wb32 ( pb , track - > default_duration ) ;
}
2012-02-16 22:48:30 +03:00
if ( flags & MOV_TFHD_DEFAULT_SIZE ) {
2012-01-05 14:57:05 +03:00
track - > default_size = track - > entry ? track - > cluster [ 0 ] . size : 1 ;
avio_wb32 ( pb , track - > default_size ) ;
} else
track - > default_size = - 1 ;
2012-02-16 22:48:30 +03:00
if ( flags & MOV_TFHD_DEFAULT_FLAGS ) {
2015-03-06 11:22:35 +02:00
/* Set the default flags based on the second sample, if available.
* If the first sample is different , that can be signaled via a separate field . */
if ( track - > entry > 1 )
track - > default_sample_flags = get_sample_flags ( track , & track - > cluster [ 1 ] ) ;
else
track - > default_sample_flags =
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
track - > par - > codec_type = = AVMEDIA_TYPE_VIDEO ?
2015-03-06 11:22:35 +02:00
( MOV_FRAG_SAMPLE_FLAG_DEPENDS_YES | MOV_FRAG_SAMPLE_FLAG_IS_NON_SYNC ) :
MOV_FRAG_SAMPLE_FLAG_DEPENDS_NO ;
2012-01-05 14:57:05 +03:00
avio_wb32 ( pb , track - > default_sample_flags ) ;
}
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2011-12-06 18:15:35 +03:00
}
2013-09-09 15:04:57 +03:00
static int mov_write_trun_tag ( AVIOContext * pb , MOVMuxContext * mov ,
2013-09-10 15:24:24 +03:00
MOVTrack * track , int moof_size ,
int first , int end )
2012-01-05 14:57:05 +03:00
{
int64_t pos = avio_tell ( pb ) ;
2012-02-16 22:58:09 +03:00
uint32_t flags = MOV_TRUN_DATA_OFFSET ;
2011-12-06 18:15:35 +03:00
int i ;
2012-01-05 14:57:05 +03:00
2013-09-10 15:24:24 +03:00
for ( i = first ; i < end ; i + + ) {
2012-02-27 01:17:13 +03:00
if ( get_cluster_duration ( track , i ) ! = track - > default_duration )
2012-02-16 22:58:09 +03:00
flags | = MOV_TRUN_SAMPLE_DURATION ;
2012-01-05 14:57:05 +03:00
if ( track - > cluster [ i ] . size ! = track - > default_size )
2012-02-16 22:58:09 +03:00
flags | = MOV_TRUN_SAMPLE_SIZE ;
2013-09-10 15:24:24 +03:00
if ( i > first & & get_sample_flags ( track , & track - > cluster [ i ] ) ! = track - > default_sample_flags )
2012-02-16 22:58:09 +03:00
flags | = MOV_TRUN_SAMPLE_FLAGS ;
2012-01-05 14:57:05 +03:00
}
2015-03-06 11:26:40 +02:00
if ( ! ( flags & MOV_TRUN_SAMPLE_FLAGS ) & & track - > entry > 0 & &
get_sample_flags ( track , & track - > cluster [ 0 ] ) ! = track - > default_sample_flags )
2012-02-16 22:58:09 +03:00
flags | = MOV_TRUN_FIRST_SAMPLE_FLAGS ;
2012-01-05 14:57:05 +03:00
if ( track - > flags & MOV_TRACK_CTTS )
2012-02-16 22:58:09 +03:00
flags | = MOV_TRUN_SAMPLE_CTS ;
2012-01-05 14:57:05 +03:00
avio_wb32 ( pb , 0 ) ; /* size placeholder */
ffio_wfourcc ( pb , " trun " ) ;
avio_w8 ( pb , 0 ) ; /* version */
avio_wb24 ( pb , flags ) ;
2013-09-10 15:24:24 +03:00
avio_wb32 ( pb , end - first ) ; /* sample count */
2013-09-09 15:04:57 +03:00
if ( mov - > flags & FF_MOV_FLAG_OMIT_TFHD_OFFSET & &
2014-10-29 23:26:40 +02:00
! ( mov - > flags & FF_MOV_FLAG_DEFAULT_BASE_MOOF ) & &
2014-10-29 09:20:51 +02:00
! mov - > first_trun )
2013-09-09 15:04:57 +03:00
avio_wb32 ( pb , 0 ) ; /* Later tracks follow immediately after the previous one */
else
avio_wb32 ( pb , moof_size + 8 + track - > data_offset +
2013-09-10 15:24:24 +03:00
track - > cluster [ first ] . pos ) ; /* data offset */
2012-02-16 22:58:09 +03:00
if ( flags & MOV_TRUN_FIRST_SAMPLE_FLAGS )
2013-09-10 15:24:24 +03:00
avio_wb32 ( pb , get_sample_flags ( track , & track - > cluster [ first ] ) ) ;
2012-01-05 14:57:05 +03:00
2013-09-10 15:24:24 +03:00
for ( i = first ; i < end ; i + + ) {
2012-02-16 22:58:09 +03:00
if ( flags & MOV_TRUN_SAMPLE_DURATION )
2012-02-27 01:17:13 +03:00
avio_wb32 ( pb , get_cluster_duration ( track , i ) ) ;
2012-02-16 22:58:09 +03:00
if ( flags & MOV_TRUN_SAMPLE_SIZE )
2012-01-05 14:57:05 +03:00
avio_wb32 ( pb , track - > cluster [ i ] . size ) ;
2012-02-16 22:58:09 +03:00
if ( flags & MOV_TRUN_SAMPLE_FLAGS )
2012-01-05 14:57:05 +03:00
avio_wb32 ( pb , get_sample_flags ( track , & track - > cluster [ i ] ) ) ;
2012-02-16 22:58:09 +03:00
if ( flags & MOV_TRUN_SAMPLE_CTS )
2012-01-05 14:57:05 +03:00
avio_wb32 ( pb , track - > cluster [ i ] . cts ) ;
}
2014-10-29 09:20:51 +02:00
mov - > first_trun = 0 ;
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2012-01-05 14:57:05 +03:00
}
2012-01-09 18:58:26 +03:00
static int mov_write_tfxd_tag ( AVIOContext * pb , MOVTrack * track )
{
2011-12-06 18:15:35 +03:00
int64_t pos = avio_tell ( pb ) ;
2013-08-02 22:09:54 +03:00
static const uint8_t uuid [ ] = {
2012-01-09 18:58:26 +03:00
0x6d , 0x1d , 0x9b , 0x05 , 0x42 , 0xd5 , 0x44 , 0xe6 ,
0x80 , 0xe2 , 0x14 , 0x1d , 0xaf , 0xf7 , 0x57 , 0xb2
} ;
avio_wb32 ( pb , 0 ) ; /* size placeholder */
ffio_wfourcc ( pb , " uuid " ) ;
avio_write ( pb , uuid , sizeof ( uuid ) ) ;
avio_w8 ( pb , 1 ) ;
avio_wb24 ( pb , 0 ) ;
2015-03-16 13:02:59 +02:00
avio_wb64 ( pb , track - > start_dts + track - > frag_start +
track - > cluster [ 0 ] . cts ) ;
2015-03-15 23:57:06 +02:00
avio_wb64 ( pb , track - > end_pts -
( track - > cluster [ 0 ] . dts + track - > cluster [ 0 ] . cts ) ) ;
2012-01-09 18:58:26 +03:00
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2012-01-09 18:58:26 +03:00
}
static int mov_write_tfrf_tag ( AVIOContext * pb , MOVMuxContext * mov ,
MOVTrack * track , int entry )
{
int n = track - > nb_frag_info - 1 - entry , i ;
int size = 8 + 16 + 4 + 1 + 16 * n ;
2013-08-02 22:09:54 +03:00
static const uint8_t uuid [ ] = {
2012-01-09 18:58:26 +03:00
0xd4 , 0x80 , 0x7e , 0xf2 , 0xca , 0x39 , 0x46 , 0x95 ,
0x8e , 0x54 , 0x26 , 0xcb , 0x9e , 0x46 , 0xa7 , 0x9f
} ;
if ( entry < 0 )
return 0 ;
avio_seek ( pb , track - > frag_info [ entry ] . tfrf_offset , SEEK_SET ) ;
avio_wb32 ( pb , size ) ;
ffio_wfourcc ( pb , " uuid " ) ;
avio_write ( pb , uuid , sizeof ( uuid ) ) ;
avio_w8 ( pb , 1 ) ;
avio_wb24 ( pb , 0 ) ;
avio_w8 ( pb , n ) ;
for ( i = 0 ; i < n ; i + + ) {
int index = entry + 1 + i ;
avio_wb64 ( pb , track - > frag_info [ index ] . time ) ;
avio_wb64 ( pb , track - > frag_info [ index ] . duration ) ;
}
if ( n < mov - > ism_lookahead ) {
2013-07-07 14:59:47 +03:00
int free_size = 16 * ( mov - > ism_lookahead - n ) ;
2012-01-09 18:58:26 +03:00
avio_wb32 ( pb , free_size ) ;
ffio_wfourcc ( pb , " free " ) ;
2013-06-30 17:15:44 +03:00
ffio_fill ( pb , 0 , free_size - 8 ) ;
2012-01-09 18:58:26 +03:00
}
return 0 ;
}
static int mov_write_tfrf_tags ( AVIOContext * pb , MOVMuxContext * mov ,
MOVTrack * track )
{
int64_t pos = avio_tell ( pb ) ;
int i ;
for ( i = 0 ; i < mov - > ism_lookahead ; i + + ) {
/* Update the tfrf tag for the last ism_lookahead fragments,
* nb_frag_info - 1 is the next fragment to be written . */
mov_write_tfrf_tag ( pb , mov , track , track - > nb_frag_info - 2 - i ) ;
}
avio_seek ( pb , pos , SEEK_SET ) ;
return 0 ;
}
2014-10-21 11:42:27 +03:00
static int mov_add_tfra_entries ( AVIOContext * pb , MOVMuxContext * mov , int tracks ,
int size )
2014-10-12 23:37:17 +03:00
{
int i ;
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
MOVTrack * track = & mov - > tracks [ i ] ;
MOVFragmentInfo * info ;
if ( ( tracks > = 0 & & i ! = tracks ) | | ! track - > entry )
continue ;
track - > nb_frag_info + + ;
if ( track - > nb_frag_info > = track - > frag_info_capacity ) {
unsigned new_capacity = track - > nb_frag_info + MOV_FRAG_INFO_ALLOC_INCREMENT ;
if ( av_reallocp_array ( & track - > frag_info ,
new_capacity ,
sizeof ( * track - > frag_info ) ) )
return AVERROR ( ENOMEM ) ;
track - > frag_info_capacity = new_capacity ;
}
info = & track - > frag_info [ track - > nb_frag_info - 1 ] ;
info - > offset = avio_tell ( pb ) ;
2014-10-21 11:42:27 +03:00
info - > size = size ;
2014-10-12 23:37:17 +03:00
// Try to recreate the original pts for the first packet
// from the fields we have stored
info - > time = track - > start_dts + track - > frag_start +
track - > cluster [ 0 ] . cts ;
2015-03-15 23:57:06 +02:00
info - > duration = track - > end_pts -
( track - > cluster [ 0 ] . dts + track - > cluster [ 0 ] . cts ) ;
2014-10-12 23:37:17 +03:00
// If the pts is less than zero, we will have trimmed
// away parts of the media track using an edit list,
// and the corresponding start presentation time is zero.
2015-03-15 23:57:06 +02:00
if ( info - > time < 0 ) {
info - > duration + = info - > time ;
2014-10-12 23:37:17 +03:00
info - > time = 0 ;
2015-03-15 23:57:06 +02:00
}
2014-10-12 23:37:17 +03:00
info - > tfrf_offset = 0 ;
mov_write_tfrf_tags ( pb , mov , track ) ;
}
return 0 ;
}
2014-10-06 23:20:50 +03:00
static int mov_write_tfdt_tag ( AVIOContext * pb , MOVTrack * track )
{
int64_t pos = avio_tell ( pb ) ;
avio_wb32 ( pb , 0 ) ; /* size */
ffio_wfourcc ( pb , " tfdt " ) ;
avio_w8 ( pb , 1 ) ; /* version */
avio_wb24 ( pb , 0 ) ;
avio_wb64 ( pb , track - > frag_start ) ;
return update_size ( pb , pos ) ;
}
2012-01-09 18:58:26 +03:00
static int mov_write_traf_tag ( AVIOContext * pb , MOVMuxContext * mov ,
2013-09-09 12:05:01 +03:00
MOVTrack * track , int64_t moof_offset ,
int moof_size )
2012-01-05 14:57:05 +03:00
{
int64_t pos = avio_tell ( pb ) ;
2013-09-10 15:24:24 +03:00
int i , start = 0 ;
2012-01-05 14:57:05 +03:00
avio_wb32 ( pb , 0 ) ; /* size placeholder */
ffio_wfourcc ( pb , " traf " ) ;
2013-09-09 15:04:57 +03:00
mov_write_tfhd_tag ( pb , mov , track , moof_offset ) ;
2014-10-30 09:35:10 +02:00
if ( mov - > mode ! = MODE_ISM )
mov_write_tfdt_tag ( pb , track ) ;
2013-09-10 15:24:24 +03:00
for ( i = 1 ; i < track - > entry ; i + + ) {
if ( track - > cluster [ i ] . pos ! = track - > cluster [ i - 1 ] . pos + track - > cluster [ i - 1 ] . size ) {
mov_write_trun_tag ( pb , mov , track , moof_size , start , i ) ;
start = i ;
}
}
mov_write_trun_tag ( pb , mov , track , moof_size , start , track - > entry ) ;
2012-01-09 18:58:26 +03:00
if ( mov - > mode = = MODE_ISM ) {
mov_write_tfxd_tag ( pb , track ) ;
if ( mov - > ism_lookahead ) {
2013-07-07 14:59:47 +03:00
int i , size = 16 + 4 + 1 + 16 * mov - > ism_lookahead ;
2012-01-09 18:58:26 +03:00
2014-10-12 23:37:17 +03:00
if ( track - > nb_frag_info > 0 ) {
MOVFragmentInfo * info = & track - > frag_info [ track - > nb_frag_info - 1 ] ;
if ( ! info - > tfrf_offset )
info - > tfrf_offset = avio_tell ( pb ) ;
}
2012-01-09 18:58:26 +03:00
avio_wb32 ( pb , 8 + size ) ;
ffio_wfourcc ( pb , " free " ) ;
for ( i = 0 ; i < size ; i + + )
avio_w8 ( pb , 0 ) ;
}
}
2012-01-05 14:57:05 +03:00
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2012-01-05 14:57:05 +03:00
}
2013-09-09 12:05:01 +03:00
static int mov_write_moof_tag_internal ( AVIOContext * pb , MOVMuxContext * mov ,
int tracks , int moof_size )
2012-01-05 14:57:05 +03:00
{
2013-09-09 12:05:01 +03:00
int64_t pos = avio_tell ( pb ) ;
int i ;
2012-01-05 14:57:05 +03:00
avio_wb32 ( pb , 0 ) ; /* size placeholder */
2011-12-06 18:15:35 +03:00
ffio_wfourcc ( pb , " moof " ) ;
2014-10-29 09:20:51 +02:00
mov - > first_trun = 1 ;
2011-12-06 18:15:35 +03:00
mov_write_mfhd_tag ( pb , mov ) ;
2012-01-05 14:57:05 +03:00
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
MOVTrack * track = & mov - > tracks [ i ] ;
if ( tracks > = 0 & & i ! = tracks )
continue ;
if ( ! track - > entry )
continue ;
2013-09-09 12:05:01 +03:00
mov_write_traf_tag ( pb , mov , track , pos , moof_size ) ;
2012-01-05 14:57:05 +03:00
}
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2012-01-05 14:57:05 +03:00
}
2014-10-05 23:51:47 +03:00
static int mov_write_sidx_tag ( AVIOContext * pb ,
MOVTrack * track , int ref_size , int total_sidx_size )
{
int64_t pos = avio_tell ( pb ) , offset_pos , end_pos ;
2014-10-21 11:42:27 +03:00
int64_t presentation_time , duration , offset ;
int starts_with_SAP , i , entries ;
if ( track - > entry ) {
entries = 1 ;
presentation_time = track - > start_dts + track - > frag_start +
track - > cluster [ 0 ] . cts ;
2015-03-15 23:57:06 +02:00
duration = track - > end_pts -
( track - > cluster [ 0 ] . dts + track - > cluster [ 0 ] . cts ) ;
2014-10-21 11:42:27 +03:00
starts_with_SAP = track - > cluster [ 0 ] . flags & MOV_SYNC_SAMPLE ;
2015-03-16 15:08:49 +02:00
// pts<0 should be cut away using edts
2015-03-15 23:57:06 +02:00
if ( presentation_time < 0 ) {
duration + = presentation_time ;
2015-03-16 15:08:49 +02:00
presentation_time = 0 ;
2015-03-15 23:57:06 +02:00
}
2014-10-21 11:42:27 +03:00
} else {
entries = track - > nb_frag_info ;
2015-08-09 21:36:25 +02:00
if ( entries < = 0 )
return 0 ;
2014-10-21 11:42:27 +03:00
presentation_time = track - > frag_info [ 0 ] . time ;
}
2014-10-05 23:51:47 +03:00
avio_wb32 ( pb , 0 ) ; /* size */
ffio_wfourcc ( pb , " sidx " ) ;
avio_w8 ( pb , 1 ) ; /* version */
avio_wb24 ( pb , 0 ) ;
avio_wb32 ( pb , track - > track_id ) ; /* reference_ID */
avio_wb32 ( pb , track - > timescale ) ; /* timescale */
avio_wb64 ( pb , presentation_time ) ; /* earliest_presentation_time */
offset_pos = avio_tell ( pb ) ;
avio_wb64 ( pb , 0 ) ; /* first_offset (offset to referenced moof) */
avio_wb16 ( pb , 0 ) ; /* reserved */
2014-10-21 11:42:27 +03:00
avio_wb16 ( pb , entries ) ; /* reference_count */
for ( i = 0 ; i < entries ; i + + ) {
if ( ! track - > entry ) {
if ( i > 1 & & track - > frag_info [ i ] . offset ! = track - > frag_info [ i - 1 ] . offset + track - > frag_info [ i - 1 ] . size ) {
av_log ( NULL , AV_LOG_ERROR , " Non-consecutive fragments, writing incorrect sidx \n " ) ;
}
duration = track - > frag_info [ i ] . duration ;
ref_size = track - > frag_info [ i ] . size ;
starts_with_SAP = 1 ;
}
avio_wb32 ( pb , ( 0 < < 31 ) | ( ref_size & 0x7fffffff ) ) ; /* reference_type (0 = media) | referenced_size */
avio_wb32 ( pb , duration ) ; /* subsegment_duration */
avio_wb32 ( pb , ( starts_with_SAP < < 31 ) | ( 0 < < 28 ) | 0 ) ; /* starts_with_SAP | SAP_type | SAP_delta_time */
}
2014-10-05 23:51:47 +03:00
end_pos = avio_tell ( pb ) ;
offset = pos + total_sidx_size - end_pos ;
avio_seek ( pb , offset_pos , SEEK_SET ) ;
avio_wb64 ( pb , offset ) ;
avio_seek ( pb , end_pos , SEEK_SET ) ;
return update_size ( pb , pos ) ;
}
static int mov_write_sidx_tags ( AVIOContext * pb , MOVMuxContext * mov ,
int tracks , int ref_size )
{
int i , round , ret ;
AVIOContext * avio_buf ;
int total_size = 0 ;
for ( round = 0 ; round < 2 ; round + + ) {
// First run one round to calculate the total size of all
// sidx atoms.
// This would be much simpler if we'd only write one sidx
// atom, for the first track in the moof.
if ( round = = 0 ) {
if ( ( ret = ffio_open_null_buf ( & avio_buf ) ) < 0 )
return ret ;
} else {
avio_buf = pb ;
}
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
MOVTrack * track = & mov - > tracks [ i ] ;
if ( tracks > = 0 & & i ! = tracks )
continue ;
2014-10-21 11:42:27 +03:00
// When writing a sidx for the full file, entry is 0, but
// we want to include all tracks. ref_size is 0 in this case,
// since we read it from frag_info instead.
if ( ! track - > entry & & ref_size > 0 )
2014-10-05 23:51:47 +03:00
continue ;
total_size - = mov_write_sidx_tag ( avio_buf , track , ref_size ,
total_size ) ;
}
if ( round = = 0 )
total_size = ffio_close_null_buf ( avio_buf ) ;
}
return 0 ;
}
static int mov_write_moof_tag ( AVIOContext * pb , MOVMuxContext * mov , int tracks ,
int64_t mdat_size )
2013-09-09 12:05:01 +03:00
{
AVIOContext * avio_buf ;
int ret , moof_size ;
2013-09-18 22:02:17 +03:00
if ( ( ret = ffio_open_null_buf ( & avio_buf ) ) < 0 )
2013-09-09 12:05:01 +03:00
return ret ;
mov_write_moof_tag_internal ( avio_buf , mov , tracks , 0 ) ;
2013-09-18 22:02:17 +03:00
moof_size = ffio_close_null_buf ( avio_buf ) ;
2014-10-12 23:37:17 +03:00
2015-08-09 22:11:55 +02:00
if ( mov - > flags & FF_MOV_FLAG_DASH & & ! ( mov - > flags & FF_MOV_FLAG_GLOBAL_SIDX ) )
2014-10-05 23:51:47 +03:00
mov_write_sidx_tags ( pb , mov , tracks , moof_size + 8 + mdat_size ) ;
2014-10-21 11:42:27 +03:00
if ( ( ret = mov_add_tfra_entries ( pb , mov , tracks , moof_size + 8 + mdat_size ) ) < 0 )
2014-10-12 23:37:17 +03:00
return ret ;
2013-09-09 12:05:01 +03:00
return mov_write_moof_tag_internal ( pb , mov , tracks , moof_size ) ;
}
2012-01-05 14:57:05 +03:00
static int mov_write_tfra_tag ( AVIOContext * pb , MOVTrack * track )
{
int64_t pos = avio_tell ( pb ) ;
int i ;
avio_wb32 ( pb , 0 ) ; /* size placeholder */
ffio_wfourcc ( pb , " tfra " ) ;
avio_w8 ( pb , 1 ) ; /* version */
avio_wb24 ( pb , 0 ) ;
2012-01-30 17:19:15 +03:00
avio_wb32 ( pb , track - > track_id ) ;
2012-01-05 14:57:05 +03:00
avio_wb32 ( pb , 0 ) ; /* length of traf/trun/sample num */
avio_wb32 ( pb , track - > nb_frag_info ) ;
for ( i = 0 ; i < track - > nb_frag_info ; i + + ) {
avio_wb64 ( pb , track - > frag_info [ i ] . time ) ;
2014-10-21 11:42:27 +03:00
avio_wb64 ( pb , track - > frag_info [ i ] . offset + track - > data_offset ) ;
2012-01-05 14:57:05 +03:00
avio_w8 ( pb , 1 ) ; /* traf number */
avio_w8 ( pb , 1 ) ; /* trun number */
avio_w8 ( pb , 1 ) ; /* sample number */
}
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2012-01-05 14:57:05 +03:00
}
static int mov_write_mfra_tag ( AVIOContext * pb , MOVMuxContext * mov )
{
int64_t pos = avio_tell ( pb ) ;
int i ;
avio_wb32 ( pb , 0 ) ; /* size placeholder */
ffio_wfourcc ( pb , " mfra " ) ;
2012-01-20 14:02:18 +03:00
/* An empty mfra atom is enough to indicate to the publishing point that
* the stream has ended . */
if ( mov - > flags & FF_MOV_FLAG_ISML )
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2012-01-05 14:57:05 +03:00
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
MOVTrack * track = & mov - > tracks [ i ] ;
if ( track - > nb_frag_info )
mov_write_tfra_tag ( pb , track ) ;
}
avio_wb32 ( pb , 16 ) ;
ffio_wfourcc ( pb , " mfro " ) ;
avio_wb32 ( pb , 0 ) ; /* version + flags */
avio_wb32 ( pb , avio_tell ( pb ) + 4 - pos ) ;
2011-12-06 18:15:35 +03:00
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2011-12-06 18:15:35 +03:00
}
2011-02-20 12:04:12 +02:00
static int mov_write_mdat_tag ( AVIOContext * pb , MOVMuxContext * mov )
2003-08-26 23:23:13 +03:00
{
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 8 ) ; // placeholder for extended size field (64 bit)
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , mov - > mode = = MODE_MOV ? " wide " : " free " ) ;
2006-01-23 16:12:03 +02:00
2011-03-03 21:11:45 +02:00
mov - > mdat_pos = avio_tell ( pb ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size placeholder*/
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " mdat " ) ;
2003-08-26 23:23:13 +03:00
return 0 ;
}
/* TODO: This needs to be more general */
2011-02-20 12:04:12 +02:00
static int mov_write_ftyp_tag ( AVIOContext * pb , AVFormatContext * s )
2003-08-26 23:23:13 +03:00
{
2009-02-28 18:02:29 +02:00
MOVMuxContext * mov = s - > priv_data ;
2011-03-03 21:11:45 +02:00
int64_t pos = avio_tell ( pb ) ;
2008-06-15 00:07:59 +03:00
int has_h264 = 0 , has_video = 0 ;
2008-06-15 04:38:38 +03:00
int minor = 0x200 ;
2008-06-11 12:33:35 +03:00
int i ;
2004-02-14 21:08:09 +02:00
2008-06-15 00:05:28 +03:00
for ( i = 0 ; i < s - > nb_streams ; i + + ) {
AVStream * st = s - > streams [ i ] ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( st - > codecpar - > codec_type = = AVMEDIA_TYPE_VIDEO )
2008-06-15 00:07:59 +03:00
has_video = 1 ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( st - > codecpar - > codec_id = = AV_CODEC_ID_H264 )
2008-06-15 00:05:28 +03:00
has_h264 = 1 ;
}
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " ftyp " ) ;
2003-11-03 23:51:07 +02:00
2014-03-18 16:11:00 +03:00
if ( mov - > major_brand & & strlen ( mov - > major_brand ) > = 4 )
ffio_wfourcc ( pb , mov - > major_brand ) ;
else if ( mov - > mode = = MODE_3GP ) {
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , has_h264 ? " 3gp6 " : " 3gp4 " ) ;
2008-06-15 04:38:38 +03:00
minor = has_h264 ? 0x100 : 0x200 ;
} else if ( mov - > mode & MODE_3G2 ) {
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , has_h264 ? " 3g2b " : " 3g2a " ) ;
2008-06-15 04:38:38 +03:00
minor = has_h264 ? 0x20000 : 0x10000 ;
2013-07-07 14:59:47 +03:00
} else if ( mov - > mode = = MODE_PSP )
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " MSNV " ) ;
2014-10-29 11:53:21 +02:00
else if ( mov - > mode = = MODE_MP4 & & mov - > flags & FF_MOV_FLAG_DEFAULT_BASE_MOOF )
ffio_wfourcc ( pb , " iso5 " ) ; // Required when using default-base-is-moof
2008-06-11 12:33:35 +03:00
else if ( mov - > mode = = MODE_MP4 )
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " isom " ) ;
2008-06-15 00:07:59 +03:00
else if ( mov - > mode = = MODE_IPOD )
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , has_video ? " M4V " : " M4A " ) ;
2012-01-09 18:58:26 +03:00
else if ( mov - > mode = = MODE_ISM )
ffio_wfourcc ( pb , " isml " ) ;
2012-09-17 17:16:16 +03:00
else if ( mov - > mode = = MODE_F4V )
ffio_wfourcc ( pb , " f4v " ) ;
2008-06-15 00:07:59 +03:00
else
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " qt " ) ;
2003-11-03 23:51:07 +02:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , minor ) ;
2003-11-03 23:51:07 +02:00
2013-07-07 14:59:47 +03:00
if ( mov - > mode = = MODE_MOV )
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " qt " ) ;
2012-01-09 18:58:26 +03:00
else if ( mov - > mode = = MODE_ISM ) {
ffio_wfourcc ( pb , " piff " ) ;
2014-10-29 11:53:21 +02:00
} else if ( ! ( mov - > flags & FF_MOV_FLAG_DEFAULT_BASE_MOOF ) ) {
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " isom " ) ;
ffio_wfourcc ( pb , " iso2 " ) ;
2013-07-07 14:59:47 +03:00
if ( has_h264 )
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " avc1 " ) ;
2008-06-15 00:12:51 +03:00
}
2008-06-13 15:19:01 +03:00
2014-10-29 17:00:38 +02:00
// We add tfdt atoms when fragmenting, signal this with the iso6 compatible
// brand. This is compatible with users that don't understand tfdt.
2014-10-30 09:35:10 +02:00
if ( mov - > flags & FF_MOV_FLAG_FRAGMENT & & mov - > mode ! = MODE_ISM )
2014-10-29 17:00:38 +02:00
ffio_wfourcc ( pb , " iso6 " ) ;
2008-02-06 20:57:00 +02:00
if ( mov - > mode = = MODE_3GP )
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , has_h264 ? " 3gp6 " : " 3gp4 " ) ;
2008-06-12 06:08:41 +03:00
else if ( mov - > mode & MODE_3G2 )
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , has_h264 ? " 3g2b " : " 3g2a " ) ;
2008-02-06 20:57:00 +02:00
else if ( mov - > mode = = MODE_PSP )
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " MSNV " ) ;
2008-06-11 12:33:35 +03:00
else if ( mov - > mode = = MODE_MP4 )
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " mp41 " ) ;
2014-10-21 11:42:27 +03:00
2015-08-09 22:11:55 +02:00
if ( mov - > flags & FF_MOV_FLAG_DASH & & mov - > flags & FF_MOV_FLAG_GLOBAL_SIDX )
2014-10-21 11:42:27 +03:00
ffio_wfourcc ( pb , " dash " ) ;
2012-01-30 17:19:15 +03:00
return update_size ( pb , pos ) ;
2003-08-26 23:23:13 +03:00
}
2011-02-20 12:04:12 +02:00
static void mov_write_uuidprof_tag ( AVIOContext * pb , AVFormatContext * s )
2005-01-27 16:48:15 +02:00
{
2014-05-18 13:12:59 +03:00
AVStream * video_st = s - > streams [ 0 ] ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
AVCodecParameters * video_par = s - > streams [ 0 ] - > codecpar ;
AVCodecParameters * audio_par = s - > streams [ 1 ] - > codecpar ;
int audio_rate = audio_par - > sample_rate ;
2014-05-18 13:12:59 +03:00
// TODO: should be avg_frame_rate
int frame_rate = ( ( video_st - > time_base . den ) * ( 0x10000 ) ) / ( video_st - > time_base . num ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
int audio_kbitrate = audio_par - > bit_rate / 1000 ;
int video_kbitrate = FFMIN ( video_par - > bit_rate / 1000 , 800 - audio_kbitrate ) ;
2005-01-27 16:48:15 +02:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x94 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " uuid " ) ;
ffio_wfourcc ( pb , " PROF " ) ;
2005-01-27 16:48:15 +02:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x21d24fce ) ; /* 96 bit UUID */
avio_wb32 ( pb , 0xbb88695c ) ;
avio_wb32 ( pb , 0xfac9c740 ) ;
2005-01-27 16:48:15 +02:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x0 ) ; /* ? */
avio_wb32 ( pb , 0x3 ) ; /* 3 sections ? */
2005-01-27 16:48:15 +02:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x14 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " FPRF " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x0 ) ; /* ? */
avio_wb32 ( pb , 0x0 ) ; /* ? */
avio_wb32 ( pb , 0x0 ) ; /* ? */
2005-01-27 16:48:15 +02:00
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x2c ) ; /* size */
2013-07-07 14:59:47 +03:00
ffio_wfourcc ( pb , " APRF " ) ; /* audio */
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x0 ) ;
avio_wb32 ( pb , 0x2 ) ; /* TrackID */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " mp4a " ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x20f ) ;
avio_wb32 ( pb , 0x0 ) ;
avio_wb32 ( pb , audio_kbitrate ) ;
avio_wb32 ( pb , audio_kbitrate ) ;
2012-01-30 17:19:15 +03:00
avio_wb32 ( pb , audio_rate ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
avio_wb32 ( pb , audio_par - > channels ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x34 ) ; /* size */
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " VPRF " ) ; /* video */
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x0 ) ;
avio_wb32 ( pb , 0x1 ) ; /* TrackID */
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( video_par - > codec_id = = AV_CODEC_ID_H264 ) {
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " avc1 " ) ;
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , 0x014D ) ;
avio_wb16 ( pb , 0x0015 ) ;
2006-04-04 20:44:59 +03:00
} else {
2011-02-24 08:36:02 +02:00
ffio_wfourcc ( pb , " mp4v " ) ;
2011-02-21 20:28:17 +02:00
avio_wb16 ( pb , 0x0000 ) ;
avio_wb16 ( pb , 0x0103 ) ;
2006-04-04 20:44:59 +03:00
}
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x0 ) ;
avio_wb32 ( pb , video_kbitrate ) ;
avio_wb32 ( pb , video_kbitrate ) ;
2012-01-30 17:19:15 +03:00
avio_wb32 ( pb , frame_rate ) ;
avio_wb32 ( pb , frame_rate ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
avio_wb16 ( pb , video_par - > width ) ;
avio_wb16 ( pb , video_par - > height ) ;
2011-02-21 20:28:17 +02:00
avio_wb32 ( pb , 0x010001 ) ; /* ? */
2005-01-27 16:48:15 +02:00
}
2014-12-18 23:52:42 +02:00
static int mov_write_identification ( AVIOContext * pb , AVFormatContext * s )
{
MOVMuxContext * mov = s - > priv_data ;
2015-01-03 02:18:54 +02:00
int i ;
2014-12-18 23:52:42 +02:00
mov_write_ftyp_tag ( pb , s ) ;
if ( mov - > mode = = MODE_PSP ) {
2015-01-03 02:18:54 +02:00
int video_streams_nb = 0 , audio_streams_nb = 0 , other_streams_nb = 0 ;
for ( i = 0 ; i < s - > nb_streams ; i + + ) {
AVStream * st = s - > streams [ i ] ;
2016-04-10 21:58:15 +02:00
if ( st - > codecpar - > codec_type = = AVMEDIA_TYPE_VIDEO )
2015-01-03 02:18:54 +02:00
video_streams_nb + + ;
2016-04-10 21:58:15 +02:00
else if ( st - > codecpar - > codec_type = = AVMEDIA_TYPE_AUDIO )
2015-01-03 02:18:54 +02:00
audio_streams_nb + + ;
else
other_streams_nb + + ;
}
if ( video_streams_nb ! = 1 | | audio_streams_nb ! = 1 | | other_streams_nb ) {
2014-12-18 23:52:42 +02:00
av_log ( s , AV_LOG_ERROR , " PSP mode need one video and one audio stream \n " ) ;
return AVERROR ( EINVAL ) ;
}
mov_write_uuidprof_tag ( pb , s ) ;
}
return 0 ;
}
2009-05-15 09:11:53 +03:00
static int mov_parse_mpeg2_frame ( AVPacket * pkt , uint32_t * flags )
{
uint32_t c = - 1 ;
int i , closed_gop = 0 ;
for ( i = 0 ; i < pkt - > size - 4 ; i + + ) {
2013-07-07 14:59:47 +03:00
c = ( c < < 8 ) + pkt - > data [ i ] ;
2009-05-15 09:11:53 +03:00
if ( c = = 0x1b8 ) { // gop
2013-07-07 14:59:47 +03:00
closed_gop = pkt - > data [ i + 4 ] > > 6 & 0x01 ;
2009-05-15 09:11:53 +03:00
} else if ( c = = 0x100 ) { // pic
2013-07-07 14:59:47 +03:00
int temp_ref = ( pkt - > data [ i + 1 ] < < 2 ) | ( pkt - > data [ i + 2 ] > > 6 ) ;
2009-05-15 09:11:53 +03:00
if ( ! temp_ref | | closed_gop ) // I picture is not reordered
* flags = MOV_SYNC_SAMPLE ;
else
* flags = MOV_PARTIAL_SYNC_SAMPLE ;
break ;
}
}
return 0 ;
}
2015-01-20 16:54:03 +02:00
static void mov_parse_vc1_frame ( AVPacket * pkt , MOVTrack * trk )
2012-01-21 03:16:34 +03:00
{
const uint8_t * start , * next , * end = pkt - > data + pkt - > size ;
int seq = 0 , entry = 0 ;
int key = pkt - > flags & AV_PKT_FLAG_KEY ;
start = find_next_marker ( pkt - > data , end ) ;
for ( next = start ; next < end ; start = next ) {
next = find_next_marker ( start + 4 , end ) ;
switch ( AV_RB32 ( start ) ) {
case VC1_CODE_SEQHDR :
seq = 1 ;
break ;
case VC1_CODE_ENTRYPOINT :
entry = 1 ;
break ;
case VC1_CODE_SLICE :
trk - > vc1_info . slices = 1 ;
break ;
}
}
2015-01-20 16:54:03 +02:00
if ( ! trk - > entry & & trk - > vc1_info . first_packet_seen )
trk - > vc1_info . first_frag_written = 1 ;
if ( ! trk - > entry & & ! trk - > vc1_info . first_frag_written ) {
2012-01-21 03:16:34 +03:00
/* First packet in first fragment */
trk - > vc1_info . first_packet_seq = seq ;
trk - > vc1_info . first_packet_entry = entry ;
2015-01-20 16:54:03 +02:00
trk - > vc1_info . first_packet_seen = 1 ;
2012-01-21 03:16:34 +03:00
} else if ( ( seq & & ! trk - > vc1_info . packet_seq ) | |
( entry & & ! trk - > vc1_info . packet_entry ) ) {
int i ;
for ( i = 0 ; i < trk - > entry ; i + + )
trk - > cluster [ i ] . flags & = ~ MOV_SYNC_SAMPLE ;
trk - > has_keyframes = 0 ;
if ( seq )
trk - > vc1_info . packet_seq = 1 ;
if ( entry )
trk - > vc1_info . packet_entry = 1 ;
2015-01-20 16:54:03 +02:00
if ( ! trk - > vc1_info . first_frag_written ) {
2012-01-21 03:16:34 +03:00
/* First fragment */
if ( ( ! seq | | trk - > vc1_info . first_packet_seq ) & &
( ! entry | | trk - > vc1_info . first_packet_entry ) ) {
/* First packet had the same headers as this one, readd the
* sync sample flag . */
trk - > cluster [ 0 ] . flags | = MOV_SYNC_SAMPLE ;
trk - > has_keyframes = 1 ;
}
}
}
if ( trk - > vc1_info . packet_seq & & trk - > vc1_info . packet_entry )
key = seq & & entry ;
else if ( trk - > vc1_info . packet_seq )
key = seq ;
else if ( trk - > vc1_info . packet_entry )
key = entry ;
if ( key ) {
trk - > cluster [ trk - > entry ] . flags | = MOV_SYNC_SAMPLE ;
trk - > has_keyframes + + ;
}
}
2013-09-10 15:35:41 +03:00
static int mov_flush_fragment_interleaving ( AVFormatContext * s , MOVTrack * track )
{
MOVMuxContext * mov = s - > priv_data ;
int ret , buf_size ;
uint8_t * buf ;
int i , offset ;
if ( ! track - > mdat_buf )
return 0 ;
if ( ! mov - > mdat_buf ) {
if ( ( ret = avio_open_dyn_buf ( & mov - > mdat_buf ) ) < 0 )
return ret ;
}
buf_size = avio_close_dyn_buf ( track - > mdat_buf , & buf ) ;
track - > mdat_buf = NULL ;
offset = avio_tell ( mov - > mdat_buf ) ;
avio_write ( mov - > mdat_buf , buf , buf_size ) ;
av_free ( buf ) ;
for ( i = track - > entries_flushed ; i < track - > entry ; i + + )
track - > cluster [ i ] . pos + = offset ;
track - > entries_flushed = track - > entry ;
return 0 ;
}
2015-10-20 21:30:03 +02:00
static int mov_flush_fragment ( AVFormatContext * s , int force )
2012-01-05 14:57:05 +03:00
{
2011-12-06 18:15:35 +03:00
MOVMuxContext * mov = s - > priv_data ;
2012-01-05 14:57:05 +03:00
int i , first_track = - 1 ;
int64_t mdat_size = 0 ;
2015-01-25 15:32:22 +02:00
int ret ;
2011-12-06 18:15:35 +03:00
2012-01-05 14:57:05 +03:00
if ( ! ( mov - > flags & FF_MOV_FLAG_FRAGMENT ) )
return 0 ;
2015-03-06 15:45:50 +02:00
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
MOVTrack * track = & mov - > tracks [ i ] ;
if ( track - > entry < = 1 )
continue ;
// Sample durations are calculated as the diff of dts values,
// but for the last sample in a fragment, we don't know the dts
// of the first sample in the next fragment, so we have to rely
// on what was set as duration in the AVPacket. Not all callers
// set this though, so we might want to replace it with an
// estimate if it currently is zero.
if ( get_cluster_duration ( track , track - > entry - 1 ) ! = 0 )
continue ;
// Use the duration (i.e. dts diff) of the second last sample for
// the last one. This is a wild guess (and fatal if it turns out
// to be too long), but probably the best we can do - having a zero
// duration is bad as well.
track - > track_duration + = get_cluster_duration ( track , track - > entry - 2 ) ;
2015-03-15 23:57:06 +02:00
track - > end_pts + = get_cluster_duration ( track , track - > entry - 2 ) ;
2015-03-06 15:45:50 +02:00
if ( ! mov - > missing_duration_warned ) {
av_log ( s , AV_LOG_WARNING ,
" Estimating the duration of the last packet in a "
" fragment, consider setting the duration field in "
" AVPacket instead. \n " ) ;
mov - > missing_duration_warned = 1 ;
}
}
2015-01-20 16:33:12 +02:00
if ( ! mov - > moov_written ) {
2012-01-05 14:57:05 +03:00
int64_t pos = avio_tell ( s - > pb ) ;
movenc: Buffer the mdat for the initial moov fragment, too
This allows writing QuickTime-compatible fragmented mp4 (with
a non-empty moov atom) to a non-seekable output.
This buffers the mdat for the initial fragment just as it does
for all normal fragments, too. Previously, the resulting
atom structure was mdat,moov, moof,mdat ..., while it now
is moov,mdat, moof,mdat.
Signed-off-by: Martin Storsjö <martin@martin.st>
2012-02-02 13:50:26 +03:00
uint8_t * buf ;
2012-09-20 11:34:28 +03:00
int buf_size , moov_size ;
2012-01-05 14:57:05 +03:00
for ( i = 0 ; i < mov - > nb_streams ; i + + )
if ( ! mov - > tracks [ i ] . entry )
break ;
/* Don't write the initial moov unless all tracks have data */
2015-10-20 21:30:03 +02:00
if ( i < mov - > nb_streams & & ! force )
2012-01-05 14:57:05 +03:00
return 0 ;
movenc: Buffer the mdat for the initial moov fragment, too
This allows writing QuickTime-compatible fragmented mp4 (with
a non-empty moov atom) to a non-seekable output.
This buffers the mdat for the initial fragment just as it does
for all normal fragments, too. Previously, the resulting
atom structure was mdat,moov, moof,mdat ..., while it now
is moov,mdat, moof,mdat.
Signed-off-by: Martin Storsjö <martin@martin.st>
2012-02-02 13:50:26 +03:00
2012-09-20 11:34:28 +03:00
moov_size = get_moov_size ( s ) ;
movenc: Buffer the mdat for the initial moov fragment, too
This allows writing QuickTime-compatible fragmented mp4 (with
a non-empty moov atom) to a non-seekable output.
This buffers the mdat for the initial fragment just as it does
for all normal fragments, too. Previously, the resulting
atom structure was mdat,moov, moof,mdat ..., while it now
is moov,mdat, moof,mdat.
Signed-off-by: Martin Storsjö <martin@martin.st>
2012-02-02 13:50:26 +03:00
for ( i = 0 ; i < mov - > nb_streams ; i + + )
2012-09-20 11:34:28 +03:00
mov - > tracks [ i ] . data_offset = pos + moov_size + 8 ;
movenc: Buffer the mdat for the initial moov fragment, too
This allows writing QuickTime-compatible fragmented mp4 (with
a non-empty moov atom) to a non-seekable output.
This buffers the mdat for the initial fragment just as it does
for all normal fragments, too. Previously, the resulting
atom structure was mdat,moov, moof,mdat ..., while it now
is moov,mdat, moof,mdat.
Signed-off-by: Martin Storsjö <martin@martin.st>
2012-02-02 13:50:26 +03:00
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
if ( mov - > flags & FF_MOV_FLAG_DELAY_MOOV )
mov_write_identification ( s - > pb , s ) ;
2015-01-25 15:32:22 +02:00
if ( ( ret = mov_write_moov_tag ( s - > pb , mov , s ) ) < 0 )
return ret ;
movenc: Buffer the mdat for the initial moov fragment, too
This allows writing QuickTime-compatible fragmented mp4 (with
a non-empty moov atom) to a non-seekable output.
This buffers the mdat for the initial fragment just as it does
for all normal fragments, too. Previously, the resulting
atom structure was mdat,moov, moof,mdat ..., while it now
is moov,mdat, moof,mdat.
Signed-off-by: Martin Storsjö <martin@martin.st>
2012-02-02 13:50:26 +03:00
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
if ( mov - > flags & FF_MOV_FLAG_DELAY_MOOV ) {
2015-08-09 22:11:55 +02:00
if ( mov - > flags & FF_MOV_FLAG_GLOBAL_SIDX )
2015-08-09 21:39:28 +02:00
mov - > reserved_header_pos = avio_tell ( s - > pb ) ;
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
avio_flush ( s - > pb ) ;
2015-01-20 16:33:12 +02:00
mov - > moov_written = 1 ;
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
return 0 ;
}
movenc: Buffer the mdat for the initial moov fragment, too
This allows writing QuickTime-compatible fragmented mp4 (with
a non-empty moov atom) to a non-seekable output.
This buffers the mdat for the initial fragment just as it does
for all normal fragments, too. Previously, the resulting
atom structure was mdat,moov, moof,mdat ..., while it now
is moov,mdat, moof,mdat.
Signed-off-by: Martin Storsjö <martin@martin.st>
2012-02-02 13:50:26 +03:00
buf_size = avio_close_dyn_buf ( mov - > mdat_buf , & buf ) ;
mov - > mdat_buf = NULL ;
avio_wb32 ( s - > pb , buf_size + 8 ) ;
ffio_wfourcc ( s - > pb , " mdat " ) ;
avio_write ( s - > pb , buf , buf_size ) ;
av_free ( buf ) ;
2015-08-09 22:11:55 +02:00
if ( mov - > flags & FF_MOV_FLAG_GLOBAL_SIDX )
2015-08-09 21:40:39 +02:00
mov - > reserved_header_pos = avio_tell ( s - > pb ) ;
2015-01-20 16:33:12 +02:00
mov - > moov_written = 1 ;
2012-01-05 14:57:05 +03:00
mov - > mdat_size = 0 ;
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
if ( mov - > tracks [ i ] . entry )
mov - > tracks [ i ] . frag_start + = mov - > tracks [ i ] . start_dts +
2012-01-30 17:19:15 +03:00
mov - > tracks [ i ] . track_duration -
2012-01-05 14:57:05 +03:00
mov - > tracks [ i ] . cluster [ 0 ] . dts ;
mov - > tracks [ i ] . entry = 0 ;
2011-12-06 18:15:35 +03:00
}
2012-01-19 12:17:13 +03:00
avio_flush ( s - > pb ) ;
2012-01-05 14:57:05 +03:00
return 0 ;
2011-12-06 18:15:35 +03:00
}
2012-01-05 14:57:05 +03:00
2013-09-10 15:35:41 +03:00
if ( mov - > frag_interleave ) {
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
MOVTrack * track = & mov - > tracks [ i ] ;
int ret ;
if ( ( ret = mov_flush_fragment_interleaving ( s , track ) ) < 0 )
return ret ;
}
if ( ! mov - > mdat_buf )
return 0 ;
mdat_size = avio_tell ( mov - > mdat_buf ) ;
}
2012-01-05 14:57:05 +03:00
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
MOVTrack * track = & mov - > tracks [ i ] ;
2013-09-10 15:35:41 +03:00
if ( mov - > flags & FF_MOV_FLAG_SEPARATE_MOOF | | mov - > frag_interleave )
2012-01-05 14:57:05 +03:00
track - > data_offset = 0 ;
else
track - > data_offset = mdat_size ;
2013-09-10 15:35:41 +03:00
if ( ! track - > entry )
2012-01-05 14:57:05 +03:00
continue ;
2013-09-10 15:35:41 +03:00
if ( track - > mdat_buf )
mdat_size + = avio_tell ( track - > mdat_buf ) ;
2012-01-05 14:57:05 +03:00
if ( first_track < 0 )
first_track = i ;
2011-12-06 18:15:35 +03:00
}
2012-01-05 14:57:05 +03:00
if ( ! mdat_size )
return 0 ;
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
MOVTrack * track = & mov - > tracks [ i ] ;
int buf_size , write_moof = 1 , moof_tracks = - 1 ;
uint8_t * buf ;
int64_t duration = 0 ;
if ( track - > entry )
2012-01-30 17:19:15 +03:00
duration = track - > start_dts + track - > track_duration -
2012-01-05 14:57:05 +03:00
track - > cluster [ 0 ] . dts ;
if ( mov - > flags & FF_MOV_FLAG_SEPARATE_MOOF ) {
if ( ! track - > mdat_buf )
continue ;
mdat_size = avio_tell ( track - > mdat_buf ) ;
moof_tracks = i ;
} else {
write_moof = i = = first_track ;
}
if ( write_moof ) {
2012-01-19 12:17:13 +03:00
avio_flush ( s - > pb ) ;
2012-01-05 14:57:05 +03:00
2014-10-05 23:51:47 +03:00
mov_write_moof_tag ( s - > pb , mov , moof_tracks , mdat_size ) ;
2012-01-05 14:57:05 +03:00
mov - > fragments + + ;
avio_wb32 ( s - > pb , mdat_size + 8 ) ;
ffio_wfourcc ( s - > pb , " mdat " ) ;
}
if ( track - > entry )
track - > frag_start + = duration ;
track - > entry = 0 ;
2013-09-10 15:35:41 +03:00
track - > entries_flushed = 0 ;
if ( ! mov - > frag_interleave ) {
if ( ! track - > mdat_buf )
continue ;
buf_size = avio_close_dyn_buf ( track - > mdat_buf , & buf ) ;
track - > mdat_buf = NULL ;
} else {
if ( ! mov - > mdat_buf )
continue ;
buf_size = avio_close_dyn_buf ( mov - > mdat_buf , & buf ) ;
mov - > mdat_buf = NULL ;
}
2012-01-05 14:57:05 +03:00
avio_write ( s - > pb , buf , buf_size ) ;
av_free ( buf ) ;
}
mov - > mdat_size = 0 ;
2011-12-06 18:15:35 +03:00
2012-01-19 12:17:13 +03:00
avio_flush ( s - > pb ) ;
2011-12-06 18:15:35 +03:00
return 0 ;
}
2015-10-20 21:30:03 +02:00
static int mov_auto_flush_fragment ( AVFormatContext * s , int force )
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
{
MOVMuxContext * mov = s - > priv_data ;
2015-01-20 16:33:12 +02:00
int had_moov = mov - > moov_written ;
2015-10-20 21:30:03 +02:00
int ret = mov_flush_fragment ( s , force ) ;
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
if ( ret < 0 )
return ret ;
// If using delay_moov, the first flush only wrote the moov,
// not the actual moof+mdat pair, thus flush once again.
2015-01-20 16:33:12 +02:00
if ( ! had_moov & & mov - > flags & FF_MOV_FLAG_DELAY_MOOV )
2015-10-20 21:30:03 +02:00
ret = mov_flush_fragment ( s , force ) ;
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
return ret ;
}
2012-04-11 13:37:14 +03:00
int ff_mov_write_packet ( AVFormatContext * s , AVPacket * pkt )
2003-08-26 23:23:13 +03:00
{
2009-02-28 18:02:29 +02:00
MOVMuxContext * mov = s - > priv_data ;
2011-02-20 12:04:12 +02:00
AVIOContext * pb = s - > pb ;
2006-05-13 21:01:16 +03:00
MOVTrack * trk = & mov - > tracks [ pkt - > stream_index ] ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
AVCodecParameters * par = trk - > par ;
2012-01-30 17:19:15 +03:00
unsigned int samples_in_chunk = 0 ;
2014-11-02 23:50:47 +02:00
int size = pkt - > size , ret = 0 ;
2011-12-09 22:19:57 +03:00
uint8_t * reformatted_data = NULL ;
2012-01-05 14:57:05 +03:00
2014-02-21 04:30:19 +03:00
if ( trk - > entry ) {
int64_t duration = pkt - > dts - trk - > cluster [ trk - > entry - 1 ] . dts ;
if ( duration < 0 | | duration > INT_MAX ) {
av_log ( s , AV_LOG_ERROR , " Application provided duration: % " PRId64 " / timestamp: % " PRId64 " is out of range for mov/mp4 format \n " ,
duration , pkt - > dts
) ;
pkt - > dts = trk - > cluster [ trk - > entry - 1 ] . dts + 1 ;
pkt - > pts = AV_NOPTS_VALUE ;
}
2016-01-09 04:36:19 +02:00
}
if ( pkt - > duration < 0 | | pkt - > duration > INT_MAX ) {
av_log ( s , AV_LOG_ERROR , " Application provided duration: % " PRId64 " is invalid \n " , pkt - > duration ) ;
return AVERROR ( EINVAL ) ;
2014-02-21 04:30:19 +03:00
}
movenc: Buffer the mdat for the initial moov fragment, too
This allows writing QuickTime-compatible fragmented mp4 (with
a non-empty moov atom) to a non-seekable output.
This buffers the mdat for the initial fragment just as it does
for all normal fragments, too. Previously, the resulting
atom structure was mdat,moov, moof,mdat ..., while it now
is moov,mdat, moof,mdat.
Signed-off-by: Martin Storsjö <martin@martin.st>
2012-02-02 13:50:26 +03:00
if ( mov - > flags & FF_MOV_FLAG_FRAGMENT ) {
int ret ;
2015-01-20 16:33:12 +02:00
if ( mov - > moov_written | | mov - > flags & FF_MOV_FLAG_EMPTY_MOOV ) {
2013-09-10 15:35:41 +03:00
if ( mov - > frag_interleave & & mov - > fragments > 0 ) {
if ( trk - > entry - trk - > entries_flushed > = mov - > frag_interleave ) {
if ( ( ret = mov_flush_fragment_interleaving ( s , trk ) ) < 0 )
return ret ;
}
}
movenc: Buffer the mdat for the initial moov fragment, too
This allows writing QuickTime-compatible fragmented mp4 (with
a non-empty moov atom) to a non-seekable output.
This buffers the mdat for the initial fragment just as it does
for all normal fragments, too. Previously, the resulting
atom structure was mdat,moov, moof,mdat ..., while it now
is moov,mdat, moof,mdat.
Signed-off-by: Martin Storsjö <martin@martin.st>
2012-02-02 13:50:26 +03:00
if ( ! trk - > mdat_buf ) {
if ( ( ret = avio_open_dyn_buf ( & trk - > mdat_buf ) ) < 0 )
return ret ;
}
pb = trk - > mdat_buf ;
} else {
if ( ! mov - > mdat_buf ) {
if ( ( ret = avio_open_dyn_buf ( & mov - > mdat_buf ) ) < 0 )
return ret ;
}
pb = mov - > mdat_buf ;
2012-01-05 14:57:05 +03:00
}
2011-12-06 18:15:35 +03:00
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( par - > codec_id = = AV_CODEC_ID_AMR_NB ) {
2006-07-04 17:03:59 +03:00
/* We must find out how many AMR blocks there are in one packet */
2015-06-11 00:24:54 +02:00
static const uint16_t packed_size [ 16 ] =
2011-10-19 12:20:48 +03:00
{ 13 , 14 , 16 , 18 , 20 , 21 , 27 , 32 , 6 , 0 , 0 , 0 , 0 , 0 , 0 , 1 } ;
2006-07-04 17:03:59 +03:00
int len = 0 ;
2012-01-30 17:19:15 +03:00
while ( len < size & & samples_in_chunk < 100 ) {
2006-07-04 17:03:59 +03:00
len + = packed_size [ ( pkt - > data [ len ] > > 3 ) & 0x0F ] ;
2012-01-30 17:19:15 +03:00
samples_in_chunk + + ;
2003-08-26 23:23:13 +03:00
}
2012-01-30 17:19:15 +03:00
if ( samples_in_chunk > 1 ) {
2007-09-12 02:46:46 +03:00
av_log ( s , AV_LOG_ERROR , " fatal error, input is not a single packet, implement a AVParser for it \n " ) ;
2006-07-06 15:27:43 +03:00
return - 1 ;
}
2016-04-10 21:58:15 +02:00
} else if ( par - > codec_id = = AV_CODEC_ID_ADPCM_MS | |
par - > codec_id = = AV_CODEC_ID_ADPCM_IMA_WAV ) {
samples_in_chunk = trk - > par - > frame_size ;
2012-01-30 17:19:15 +03:00
} else if ( trk - > sample_size )
samples_in_chunk = size / trk - > sample_size ;
2006-07-04 17:03:59 +03:00
else
2012-01-30 17:19:15 +03:00
samples_in_chunk = 1 ;
2003-11-03 23:51:07 +02:00
2006-04-14 13:51:32 +03:00
/* copy extradata if it exists */
2016-04-10 21:58:15 +02:00
if ( trk - > vos_len = = 0 & & par - > extradata_size > 0 & &
2015-02-24 12:00:07 +02:00
! TAG_IS_AVCI ( trk - > tag ) & &
2016-04-10 21:58:15 +02:00
( par - > codec_id ! = AV_CODEC_ID_DNXHD ) ) {
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
trk - > vos_len = par - > extradata_size ;
2012-01-30 17:19:15 +03:00
trk - > vos_data = av_malloc ( trk - > vos_len ) ;
2015-02-01 20:19:45 +02:00
if ( ! trk - > vos_data ) {
ret = AVERROR ( ENOMEM ) ;
goto err ;
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
memcpy ( trk - > vos_data , par - > extradata , trk - > vos_len ) ;
2003-11-03 23:51:07 +02:00
}
2016-04-10 21:58:15 +02:00
if ( par - > codec_id = = AV_CODEC_ID_AAC & & pkt - > size > 2 & &
2013-03-30 10:41:46 +03:00
( AV_RB16 ( pkt - > data ) & 0xfff0 ) = = 0xfff0 ) {
if ( ! s - > streams [ pkt - > stream_index ] - > nb_frames ) {
2013-06-26 18:59:38 +03:00
av_log ( s , AV_LOG_ERROR , " Malformed AAC bitstream detected: "
2014-08-19 15:28:35 +03:00
" use the audio bitstream filter 'aac_adtstoasc' to fix it "
2013-06-26 18:59:38 +03:00
" ('-bsf:a aac_adtstoasc' option with ffmpeg) \n " ) ;
2013-03-30 10:41:46 +03:00
return - 1 ;
}
av_log ( s , AV_LOG_WARNING , " aac bitstream error \n " ) ;
}
2016-04-10 21:58:15 +02:00
if ( par - > codec_id = = AV_CODEC_ID_H264 & & trk - > vos_len > 0 & & * ( uint8_t * ) trk - > vos_data ! = 1 & & ! TAG_IS_AVCI ( trk - > tag ) ) {
2009-01-16 03:12:32 +02:00
/* from x264 or from bytestream h264 */
/* nal reformating needed */
2011-12-09 22:19:57 +03:00
if ( trk - > hint_track > = 0 & & trk - > hint_track < mov - > nb_streams ) {
ff_avc_parse_nal_units_buf ( pkt - > data , & reformatted_data ,
& size ) ;
avio_write ( pb , reformatted_data , size ) ;
} else {
2015-12-07 12:01:09 +02:00
if ( mov - > encryption_scheme = = MOV_ENC_CENC_AES_CTR ) {
size = ff_mov_cenc_avc_parse_nal_units ( & trk - > cenc , pb , pkt - > data , size ) ;
if ( size < 0 ) {
ret = size ;
goto err ;
}
} else {
size = ff_avc_parse_nal_units ( pb , pkt - > data , pkt - > size ) ;
}
2011-12-09 22:19:57 +03:00
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( par - > codec_id = = AV_CODEC_ID_HEVC & & trk - > vos_len > 6 & &
2014-03-03 17:53:42 +03:00
( AV_RB24 ( trk - > vos_data ) = = 1 | | AV_RB32 ( trk - > vos_data ) = = 1 ) ) {
/* extradata is Annex B, assume the bitstream is too and convert it */
if ( trk - > hint_track > = 0 & & trk - > hint_track < mov - > nb_streams ) {
ff_hevc_annexb2mp4_buf ( pkt - > data , & reformatted_data , & size , 0 , NULL ) ;
avio_write ( pb , reformatted_data , size ) ;
} else {
size = ff_hevc_annexb2mp4 ( pb , pkt - > data , pkt - > size , 0 , NULL ) ;
}
2015-02-08 16:48:26 +02:00
# if CONFIG_AC3_PARSER
2016-04-10 21:58:15 +02:00
} else if ( par - > codec_id = = AV_CODEC_ID_EAC3 ) {
2014-10-07 15:57:19 +03:00
size = handle_eac3 ( mov , pkt , trk ) ;
if ( size < 0 )
return size ;
2014-10-13 10:35:11 +03:00
else if ( ! size )
goto end ;
2014-10-07 15:57:19 +03:00
avio_write ( pb , pkt - > data , size ) ;
2015-02-08 16:48:26 +02:00
# endif
2009-01-16 03:12:32 +02:00
} else {
2015-12-07 12:01:09 +02:00
if ( mov - > encryption_scheme = = MOV_ENC_CENC_AES_CTR ) {
2016-04-10 21:58:15 +02:00
if ( par - > codec_id = = AV_CODEC_ID_H264 & & par - > extradata_size > 4 ) {
int nal_size_length = ( par - > extradata [ 4 ] & 0x3 ) + 1 ;
2015-12-07 12:01:09 +02:00
ret = ff_mov_cenc_avc_write_nal_units ( s , & trk - > cenc , nal_size_length , pb , pkt - > data , size ) ;
} else {
ret = ff_mov_cenc_write_packet ( & trk - > cenc , pb , pkt - > data , size ) ;
}
if ( ret ) {
goto err ;
}
} else {
avio_write ( pb , pkt - > data , size ) ;
}
2009-01-16 03:12:32 +02:00
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( ( par - > codec_id = = AV_CODEC_ID_DNXHD | |
par - > codec_id = = AV_CODEC_ID_AC3 ) & & ! trk - > vos_len ) {
2008-08-31 23:20:12 +03:00
/* copy frame to create needed atoms */
2013-07-07 14:59:47 +03:00
trk - > vos_len = size ;
2012-01-30 17:19:15 +03:00
trk - > vos_data = av_malloc ( size ) ;
2014-11-02 23:50:47 +02:00
if ( ! trk - > vos_data ) {
ret = AVERROR ( ENOMEM ) ;
goto err ;
}
2012-01-30 17:19:15 +03:00
memcpy ( trk - > vos_data , pkt - > data , size ) ;
2006-03-11 20:18:17 +02:00
}
2013-06-02 22:57:31 +03:00
if ( trk - > entry > = trk - > cluster_capacity ) {
2013-07-07 14:59:47 +03:00
unsigned new_capacity = 2 * ( trk - > entry + MOV_INDEX_CLUSTER_SIZE ) ;
2013-06-02 22:57:31 +03:00
if ( av_reallocp_array ( & trk - > cluster , new_capacity ,
2014-11-02 23:50:47 +02:00
sizeof ( * trk - > cluster ) ) ) {
ret = AVERROR ( ENOMEM ) ;
goto err ;
}
2013-06-02 22:57:31 +03:00
trk - > cluster_capacity = new_capacity ;
2003-11-03 23:51:07 +02:00
}
2013-07-07 14:59:47 +03:00
trk - > cluster [ trk - > entry ] . pos = avio_tell ( pb ) - size ;
2012-01-30 17:19:15 +03:00
trk - > cluster [ trk - > entry ] . samples_in_chunk = samples_in_chunk ;
2013-07-08 05:08:07 +03:00
trk - > cluster [ trk - > entry ] . chunkNum = 0 ;
2013-07-07 14:59:47 +03:00
trk - > cluster [ trk - > entry ] . size = size ;
trk - > cluster [ trk - > entry ] . entries = samples_in_chunk ;
trk - > cluster [ trk - > entry ] . dts = pkt - > dts ;
2012-01-05 14:57:05 +03:00
if ( ! trk - > entry & & trk - > start_dts ! = AV_NOPTS_VALUE ) {
2014-11-20 09:51:05 +02:00
if ( ! trk - > frag_discont ) {
/* First packet of a new fragment. We already wrote the duration
* of the last packet of the previous fragment based on track_duration ,
* which might not exactly match our dts . Therefore adjust the dts
* of this packet to be what the previous packets duration implies . */
trk - > cluster [ trk - > entry ] . dts = trk - > start_dts + trk - > track_duration ;
2014-11-23 23:23:43 +02:00
/* We also may have written the pts and the corresponding duration
2015-03-15 23:40:06 +02:00
* in sidx / tfrf / tfxd tags ; make sure the sidx pts and duration match up with
2014-11-23 23:23:43 +02:00
* the next fragment . This means the cts of the first sample must
2016-04-19 13:07:25 +02:00
* be the same in all fragments , unless end_pts was updated by
* the packet causing the fragment to be written . */
2015-08-09 22:11:55 +02:00
if ( ( mov - > flags & FF_MOV_FLAG_DASH & & ! ( mov - > flags & FF_MOV_FLAG_GLOBAL_SIDX ) ) | |
2015-03-15 23:40:06 +02:00
mov - > mode = = MODE_ISM )
2015-03-15 23:57:06 +02:00
pkt - > pts = pkt - > dts + trk - > end_pts - trk - > cluster [ trk - > entry ] . dts ;
2014-11-20 09:51:05 +02:00
} else {
/* New fragment, but discontinuous from previous fragments.
* Pretend the duration sum of the earlier fragments is
* pkt - > dts - trk - > start_dts . */
trk - > frag_start = pkt - > dts - trk - > start_dts ;
2015-03-15 23:57:06 +02:00
trk - > end_pts = AV_NOPTS_VALUE ;
2014-11-20 09:51:05 +02:00
trk - > frag_discont = 0 ;
}
2012-01-05 14:57:05 +03:00
}
2014-11-08 00:59:57 +02:00
2014-11-04 16:28:48 +02:00
if ( ! trk - > entry & & trk - > start_dts = = AV_NOPTS_VALUE & & ! mov - > use_editlist & &
s - > avoid_negative_ts = = AVFMT_AVOID_NEG_TS_MAKE_ZERO ) {
/* Not using edit lists and shifting the first track to start from zero.
* If the other streams start from a later timestamp , we won ' t be able
* to signal the difference in starting time without an edit list .
* Thus move the timestamp for this first sample to 0 , increasing
* its duration instead . */
2012-09-29 07:33:32 +03:00
trk - > cluster [ trk - > entry ] . dts = trk - > start_dts = 0 ;
}
2014-10-12 00:33:24 +03:00
if ( trk - > start_dts = = AV_NOPTS_VALUE ) {
2012-01-07 02:32:53 +03:00
trk - > start_dts = pkt - > dts ;
2014-11-20 09:51:05 +02:00
if ( trk - > frag_discont ) {
2015-01-05 15:13:04 +02:00
if ( mov - > use_editlist ) {
/* Pretend the whole stream started at pts=0, with earlier fragments
* already written . If the stream started at pts = 0 , the duration sum
* of earlier fragments would have been pkt - > pts . */
trk - > frag_start = pkt - > pts ;
trk - > start_dts = pkt - > dts - pkt - > pts ;
} else {
/* Pretend the whole stream started at dts=0, with earlier fragments
* already written , with a duration summing up to pkt - > dts . */
trk - > frag_start = pkt - > dts ;
trk - > start_dts = 0 ;
}
2014-11-20 09:51:05 +02:00
trk - > frag_discont = 0 ;
2015-01-20 16:33:12 +02:00
} else if ( pkt - > dts & & mov - > moov_written )
2014-10-12 00:33:24 +03:00
av_log ( s , AV_LOG_WARNING ,
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
" Track %d starts with a nonzero dts % " PRId64 " , while the moov "
" already has been written. Set the delay_moov flag to handle "
" this case. \n " ,
pkt - > stream_index , pkt - > dts ) ;
2014-10-12 00:33:24 +03:00
}
2012-01-30 17:19:15 +03:00
trk - > track_duration = pkt - > dts - trk - > start_dts + pkt - > duration ;
2012-06-25 01:05:33 +03:00
trk - > last_sample_is_subtitle_end = 0 ;
2006-11-01 19:27:39 +02:00
2008-05-29 03:58:41 +03:00
if ( pkt - > pts = = AV_NOPTS_VALUE ) {
av_log ( s , AV_LOG_WARNING , " pts has no value \n " ) ;
pkt - > pts = pkt - > dts ;
}
2008-05-29 03:54:33 +03:00
if ( pkt - > dts ! = pkt - > pts )
2009-05-15 09:11:53 +03:00
trk - > flags | = MOV_TRACK_CTTS ;
2013-07-07 14:59:47 +03:00
trk - > cluster [ trk - > entry ] . cts = pkt - > pts - pkt - > dts ;
2009-05-15 09:11:53 +03:00
trk - > cluster [ trk - > entry ] . flags = 0 ;
2014-11-23 23:23:43 +02:00
if ( trk - > start_cts = = AV_NOPTS_VALUE )
trk - > start_cts = pkt - > pts - pkt - > dts ;
2015-03-15 23:57:06 +02:00
if ( trk - > end_pts = = AV_NOPTS_VALUE )
trk - > end_pts = trk - > cluster [ trk - > entry ] . dts +
trk - > cluster [ trk - > entry ] . cts + pkt - > duration ;
else
trk - > end_pts = FFMAX ( trk - > end_pts , trk - > cluster [ trk - > entry ] . dts +
trk - > cluster [ trk - > entry ] . cts +
pkt - > duration ) ;
2014-11-23 23:23:43 +02:00
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( par - > codec_id = = AV_CODEC_ID_VC1 ) {
2015-01-20 16:54:03 +02:00
mov_parse_vc1_frame ( pkt , trk ) ;
2012-01-21 03:16:34 +03:00
} else if ( pkt - > flags & AV_PKT_FLAG_KEY ) {
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( mov - > mode = = MODE_MOV & & par - > codec_id = = AV_CODEC_ID_MPEG2VIDEO & &
2011-02-13 02:18:45 +02:00
trk - > entry > 0 ) { // force sync sample for the first key frame
2009-05-15 09:11:53 +03:00
mov_parse_mpeg2_frame ( pkt , & trk - > cluster [ trk - > entry ] . flags ) ;
if ( trk - > cluster [ trk - > entry ] . flags & MOV_PARTIAL_SYNC_SAMPLE )
trk - > flags | = MOV_TRACK_STPS ;
} else {
trk - > cluster [ trk - > entry ] . flags = MOV_SYNC_SAMPLE ;
}
if ( trk - > cluster [ trk - > entry ] . flags & MOV_SYNC_SAMPLE )
2012-01-30 17:19:15 +03:00
trk - > has_keyframes + + ;
2009-05-15 00:05:52 +03:00
}
2003-11-03 23:51:07 +02:00
trk - > entry + + ;
2012-01-30 17:19:15 +03:00
trk - > sample_count + = samples_in_chunk ;
2013-07-07 14:59:47 +03:00
mov - > mdat_size + = size ;
2003-11-03 23:51:07 +02:00
2010-05-18 22:47:24 +03:00
if ( trk - > hint_track > = 0 & & trk - > hint_track < mov - > nb_streams )
2011-12-09 22:19:57 +03:00
ff_mov_add_hinted_packet ( s , pkt , trk - > hint_track , trk - > entry ,
reformatted_data , size ) ;
2014-11-02 23:50:47 +02:00
2014-10-13 10:35:11 +03:00
end :
2014-11-02 23:50:47 +02:00
err :
2014-11-08 12:08:57 +02:00
2011-12-09 22:19:57 +03:00
av_free ( reformatted_data ) ;
2014-11-02 23:50:47 +02:00
return ret ;
2003-08-26 23:23:13 +03:00
}
2012-06-25 01:05:33 +03:00
static int mov_write_single_packet ( AVFormatContext * s , AVPacket * pkt )
2012-01-20 20:29:33 +03:00
{
2012-04-11 13:37:14 +03:00
MOVMuxContext * mov = s - > priv_data ;
MOVTrack * trk = & mov - > tracks [ pkt - > stream_index ] ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
AVCodecParameters * par = trk - > par ;
2012-04-11 13:37:14 +03:00
int64_t frag_duration = 0 ;
int size = pkt - > size ;
2014-11-20 09:51:05 +02:00
if ( mov - > flags & FF_MOV_FLAG_FRAG_DISCONT ) {
int i ;
for ( i = 0 ; i < s - > nb_streams ; i + + )
mov - > tracks [ i ] . frag_discont = 1 ;
mov - > flags & = ~ FF_MOV_FLAG_FRAG_DISCONT ;
}
2015-11-03 22:16:21 +02:00
if ( ! pkt - > size ) {
if ( trk - > start_dts = = AV_NOPTS_VALUE & & trk - > frag_discont ) {
trk - > start_dts = pkt - > dts ;
if ( pkt - > pts ! = AV_NOPTS_VALUE )
trk - > start_cts = pkt - > pts - pkt - > dts ;
else
trk - > start_cts = 0 ;
}
return 0 ; /* Discard 0 sized packets */
}
2012-04-11 23:50:22 +03:00
if ( trk - > entry & & pkt - > stream_index < s - > nb_streams )
2012-04-11 13:37:14 +03:00
frag_duration = av_rescale_q ( pkt - > dts - trk - > cluster [ 0 ] . dts ,
s - > streams [ pkt - > stream_index ] - > time_base ,
AV_TIME_BASE_Q ) ;
if ( ( mov - > max_fragment_duration & &
frag_duration > = mov - > max_fragment_duration ) | |
( mov - > max_fragment_size & & mov - > mdat_size + size > = mov - > max_fragment_size ) | |
( mov - > flags & FF_MOV_FLAG_FRAG_KEYFRAME & &
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
par - > codec_type = = AVMEDIA_TYPE_VIDEO & &
2012-04-11 13:37:14 +03:00
trk - > entry & & pkt - > flags & AV_PKT_FLAG_KEY ) ) {
2015-03-09 11:47:54 +02:00
if ( frag_duration > = mov - > min_fragment_duration ) {
// Set the duration of this track to line up with the next
// sample in this track. This avoids relying on AVPacket
// duration, but only helps for this particular track, not
// for the other ones that are flushed at the same time.
trk - > track_duration = pkt - > dts - trk - > start_dts ;
2016-04-20 12:22:41 +02:00
if ( pkt - > pts ! = AV_NOPTS_VALUE )
trk - > end_pts = pkt - > pts ;
else
trk - > end_pts = pkt - > dts ;
2015-10-20 21:30:03 +02:00
mov_auto_flush_fragment ( s , 0 ) ;
2015-03-09 11:47:54 +02:00
}
2012-04-11 13:37:14 +03:00
}
return ff_mov_write_packet ( s , pkt ) ;
2012-06-25 01:05:33 +03:00
}
static int mov_write_subtitle_end_packet ( AVFormatContext * s ,
int stream_index ,
int64_t dts ) {
AVPacket end ;
2012-08-09 00:10:17 +03:00
uint8_t data [ 2 ] = { 0 } ;
2012-06-25 01:05:33 +03:00
int ret ;
av_init_packet ( & end ) ;
2012-08-09 00:10:17 +03:00
end . size = sizeof ( data ) ;
end . data = data ;
2012-06-25 01:05:33 +03:00
end . pts = dts ;
end . dts = dts ;
end . duration = 0 ;
end . stream_index = stream_index ;
ret = mov_write_single_packet ( s , & end ) ;
2015-10-27 15:35:30 +02:00
av_packet_unref ( & end ) ;
2012-06-25 01:05:33 +03:00
return ret ;
}
static int mov_write_packet ( AVFormatContext * s , AVPacket * pkt )
{
if ( ! pkt ) {
2015-10-27 13:46:21 +02:00
mov_flush_fragment ( s , 1 ) ;
2012-06-25 01:05:33 +03:00
return 1 ;
} else {
int i ;
MOVMuxContext * mov = s - > priv_data ;
2016-02-26 06:07:28 +02:00
MOVTrack * trk = & mov - > tracks [ pkt - > stream_index ] ;
2012-06-25 01:05:33 +03:00
2015-11-18 18:40:16 +02:00
if ( ! pkt - > size )
return mov_write_single_packet ( s , pkt ) ; /* Passthrough. */
2012-06-25 01:05:33 +03:00
/*
* Subtitles require special handling .
*
* 1 ) For full complaince , every track must have a sample at
* dts = = 0 , which is rarely true for subtitles . So , as soon
* as we see any packet with dts > 0 , write an empty subtitle
* at dts = = 0 for any subtitle track with no samples in it .
*
* 2 ) For each subtitle track , check if the current packet ' s
* dts is past the duration of the last subtitle sample . If
* so , we now need to write an end sample for that subtitle .
*
* This must be done conditionally to allow for subtitles that
* immediately replace each other , in which case an end sample
* is not needed , and is , in fact , actively harmful .
*
* 3 ) See mov_write_trailer for how the final end sample is
* handled .
*/
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
MOVTrack * trk = & mov - > tracks [ i ] ;
int ret ;
2016-04-10 21:58:15 +02:00
if ( trk - > par - > codec_id = = AV_CODEC_ID_MOV_TEXT & &
2012-06-25 01:05:33 +03:00
trk - > track_duration < pkt - > dts & &
( trk - > entry = = 0 | | ! trk - > last_sample_is_subtitle_end ) ) {
ret = mov_write_subtitle_end_packet ( s , i , trk - > track_duration ) ;
if ( ret < 0 ) return ret ;
trk - > last_sample_is_subtitle_end = 1 ;
}
}
2016-04-10 21:58:15 +02:00
if ( trk - > mode = = MODE_MOV & & trk - > par - > codec_type = = AVMEDIA_TYPE_VIDEO ) {
2016-03-03 01:27:53 +02:00
AVPacket * opkt = pkt ;
2016-05-28 02:55:33 +02:00
int reshuffle_ret , ret ;
2016-03-03 01:27:53 +02:00
if ( trk - > is_unaligned_qt_rgb ) {
2016-04-10 21:58:15 +02:00
int64_t bpc = trk - > par - > bits_per_coded_sample ! = 15 ? trk - > par - > bits_per_coded_sample : 16 ;
int expected_stride = ( ( trk - > par - > width * bpc + 15 ) > > 4 ) * 2 ;
2016-05-28 02:55:33 +02:00
reshuffle_ret = ff_reshuffle_raw_rgb ( s , & pkt , trk - > par , expected_stride ) ;
if ( reshuffle_ret < 0 )
return reshuffle_ret ;
2016-03-03 01:27:53 +02:00
} else
2016-05-28 02:55:33 +02:00
reshuffle_ret = 0 ;
2016-04-10 21:58:15 +02:00
if ( trk - > par - > format = = AV_PIX_FMT_PAL8 & & ! trk - > pal_done ) {
2016-05-28 02:55:33 +02:00
ret = ff_get_packet_palette ( s , opkt , reshuffle_ret , trk - > palette ) ;
if ( ret < 0 )
2016-05-28 03:12:04 +02:00
goto fail ;
2016-05-28 02:55:33 +02:00
if ( ret )
2016-03-03 01:27:53 +02:00
trk - > pal_done + + ;
2016-04-10 21:58:15 +02:00
} else if ( trk - > par - > codec_id = = AV_CODEC_ID_RAWVIDEO & &
( trk - > par - > format = = AV_PIX_FMT_GRAY8 | |
trk - > par - > format = = AV_PIX_FMT_MONOBLACK ) ) {
2016-02-26 06:07:28 +02:00
for ( i = 0 ; i < pkt - > size ; i + + )
pkt - > data [ i ] = ~ pkt - > data [ i ] ;
}
2016-05-28 02:55:33 +02:00
if ( reshuffle_ret ) {
2016-02-26 06:07:28 +02:00
ret = mov_write_single_packet ( s , pkt ) ;
2016-05-28 03:12:04 +02:00
fail :
if ( reshuffle_ret )
av_packet_free ( & pkt ) ;
2016-02-26 06:07:28 +02:00
return ret ;
}
}
2012-06-25 01:05:33 +03:00
return mov_write_single_packet ( s , pkt ) ;
2012-01-20 20:29:33 +03:00
}
}
2010-05-05 11:41:10 +03:00
// QuickTime chapters involve an additional text track with the chapter names
// as samples, and a tref pointing from the other tracks to the chapter one.
2013-08-20 02:05:33 +03:00
static int mov_create_chapter_track ( AVFormatContext * s , int tracknum )
2010-05-05 11:41:10 +03:00
{
2012-05-20 03:10:47 +03:00
AVIOContext * pb ;
2010-05-05 11:41:10 +03:00
MOVMuxContext * mov = s - > priv_data ;
MOVTrack * track = & mov - > tracks [ tracknum ] ;
AVPacket pkt = { . stream_index = tracknum , . flags = AV_PKT_FLAG_KEY } ;
int i , len ;
track - > mode = mov - > mode ;
track - > tag = MKTAG ( ' t ' , ' e ' , ' x ' , ' t ' ) ;
track - > timescale = MOV_TIMESCALE ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
track - > par = avcodec_parameters_alloc ( ) ;
if ( ! track - > par )
2013-08-25 21:23:58 +03:00
return AVERROR ( ENOMEM ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
track - > par - > codec_type = AVMEDIA_TYPE_SUBTITLE ;
2013-08-22 14:42:47 +03:00
#if 0
2013-08-25 00:23:00 +03:00
// These properties are required to make QT recognize the chapter track
uint8_t chapter_properties [ 43 ] = { 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 , } ;
2016-04-10 21:58:15 +02:00
if ( ff_alloc_extradata ( track - > par , sizeof ( chapter_properties ) ) )
2013-08-20 02:05:33 +03:00
return AVERROR ( ENOMEM ) ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
memcpy ( track - > par - > extradata , chapter_properties , sizeof ( chapter_properties ) ) ;
2013-08-22 14:42:47 +03:00
# else
2012-05-20 03:10:47 +03:00
if ( avio_open_dyn_buf ( & pb ) > = 0 ) {
int size ;
uint8_t * buf ;
/* Stub header (usually for Quicktime chapter track) */
// TextSampleEntry
avio_wb32 ( pb , 0x01 ) ; // displayFlags
avio_w8 ( pb , 0x00 ) ; // horizontal justification
avio_w8 ( pb , 0x00 ) ; // vertical justification
avio_w8 ( pb , 0x00 ) ; // bgColourRed
avio_w8 ( pb , 0x00 ) ; // bgColourGreen
avio_w8 ( pb , 0x00 ) ; // bgColourBlue
avio_w8 ( pb , 0x00 ) ; // bgColourAlpha
// BoxRecord
avio_wb16 ( pb , 0x00 ) ; // defTextBoxTop
avio_wb16 ( pb , 0x00 ) ; // defTextBoxLeft
avio_wb16 ( pb , 0x00 ) ; // defTextBoxBottom
avio_wb16 ( pb , 0x00 ) ; // defTextBoxRight
// StyleRecord
avio_wb16 ( pb , 0x00 ) ; // startChar
avio_wb16 ( pb , 0x00 ) ; // endChar
avio_wb16 ( pb , 0x01 ) ; // fontID
avio_w8 ( pb , 0x00 ) ; // fontStyleFlags
avio_w8 ( pb , 0x00 ) ; // fontSize
avio_w8 ( pb , 0x00 ) ; // fgColourRed
avio_w8 ( pb , 0x00 ) ; // fgColourGreen
avio_w8 ( pb , 0x00 ) ; // fgColourBlue
avio_w8 ( pb , 0x00 ) ; // fgColourAlpha
// FontTableBox
avio_wb32 ( pb , 0x0D ) ; // box size
ffio_wfourcc ( pb , " ftab " ) ; // box atom name
avio_wb16 ( pb , 0x01 ) ; // entry count
// FontRecord
avio_wb16 ( pb , 0x01 ) ; // font ID
avio_w8 ( pb , 0x00 ) ; // font name length
if ( ( size = avio_close_dyn_buf ( pb , & buf ) ) > 0 ) {
2016-04-10 21:58:15 +02:00
track - > par - > extradata = buf ;
track - > par - > extradata_size = size ;
2012-05-20 03:10:47 +03:00
} else {
2014-03-10 15:30:51 +03:00
av_freep ( & buf ) ;
2012-05-20 03:10:47 +03:00
}
}
2013-08-22 14:42:47 +03:00
# endif
2012-05-20 03:10:47 +03:00
2010-05-05 11:41:10 +03:00
for ( i = 0 ; i < s - > nb_chapters ; i + + ) {
AVChapter * c = s - > chapters [ i ] ;
2011-05-22 13:46:29 +03:00
AVDictionaryEntry * t ;
2010-05-05 11:41:10 +03:00
int64_t end = av_rescale_q ( c - > end , c - > time_base , ( AVRational ) { 1 , MOV_TIMESCALE } ) ;
pkt . pts = pkt . dts = av_rescale_q ( c - > start , c - > time_base , ( AVRational ) { 1 , MOV_TIMESCALE } ) ;
pkt . duration = end - pkt . dts ;
2011-05-22 13:46:29 +03:00
if ( ( t = av_dict_get ( c - > metadata , " title " , NULL , 0 ) ) ) {
2014-07-31 21:51:40 +03:00
const char encd [ 12 ] = {
0x00 , 0x00 , 0x00 , 0x0C ,
' e ' , ' n ' , ' c ' , ' d ' ,
0x00 , 0x00 , 0x01 , 0x00 } ;
2013-07-07 14:59:47 +03:00
len = strlen ( t - > value ) ;
2014-07-31 21:51:40 +03:00
pkt . size = len + 2 + 12 ;
2010-05-05 11:41:10 +03:00
pkt . data = av_malloc ( pkt . size ) ;
2013-08-25 21:23:58 +03:00
if ( ! pkt . data )
return AVERROR ( ENOMEM ) ;
2010-05-05 11:41:10 +03:00
AV_WB16 ( pkt . data , len ) ;
2013-07-07 14:59:47 +03:00
memcpy ( pkt . data + 2 , t - > value , len ) ;
2014-07-31 21:51:40 +03:00
memcpy ( pkt . data + len + 2 , encd , sizeof ( encd ) ) ;
2010-05-18 22:40:17 +03:00
ff_mov_write_packet ( s , & pkt ) ;
2010-05-05 11:41:10 +03:00
av_freep ( & pkt . data ) ;
}
}
2013-08-20 02:05:33 +03:00
return 0 ;
2010-05-05 11:41:10 +03:00
}
2012-03-05 10:51:08 +03:00
static int mov_create_timecode_track ( AVFormatContext * s , int index , int src_index , const char * tcstr )
{
2012-08-03 16:40:48 +03:00
int ret ;
2012-03-05 10:51:08 +03:00
MOVMuxContext * mov = s - > priv_data ;
MOVTrack * track = & mov - > tracks [ index ] ;
AVStream * src_st = s - > streams [ src_index ] ;
AVTimecode tc ;
AVPacket pkt = { . stream_index = index , . flags = AV_PKT_FLAG_KEY , . size = 4 } ;
2013-10-07 18:35:13 +03:00
AVRational rate = find_fps ( s , src_st ) ;
2012-08-03 16:40:48 +03:00
2012-03-05 10:51:08 +03:00
/* compute the frame number */
2012-08-03 16:40:48 +03:00
ret = av_timecode_init_from_string ( & tc , rate , tcstr , s ) ;
2012-03-05 10:51:08 +03:00
if ( ret < 0 )
return ret ;
/* tmcd track based on video stream */
track - > mode = mov - > mode ;
track - > tag = MKTAG ( ' t ' , ' m ' , ' c ' , ' d ' ) ;
track - > src_track = src_index ;
2012-10-12 21:08:08 +03:00
track - > timescale = mov - > tracks [ src_index ] . timescale ;
2012-03-05 10:51:08 +03:00
if ( tc . flags & AV_TIMECODE_FLAG_DROPFRAME )
track - > timecode_flags | = MOV_TIMECODE_FLAG_DROPFRAME ;
2014-08-08 20:37:06 +03:00
/* set st to src_st for metadata access*/
track - > st = src_st ;
2012-03-05 10:51:08 +03:00
/* encode context: tmcd data stream */
2016-04-10 21:58:15 +02:00
track - > par = avcodec_parameters_alloc ( ) ;
if ( ! track - > par )
2015-02-22 21:20:13 +02:00
return AVERROR ( ENOMEM ) ;
2016-04-10 21:58:15 +02:00
track - > par - > codec_type = AVMEDIA_TYPE_DATA ;
track - > par - > codec_tag = track - > tag ;
track - > st - > avg_frame_rate = av_inv_q ( rate ) ;
2012-03-05 10:51:08 +03:00
/* the tmcd track just contains one packet with the frame number */
pkt . data = av_malloc ( pkt . size ) ;
2015-02-22 21:20:13 +02:00
if ( ! pkt . data )
return AVERROR ( ENOMEM ) ;
2012-03-05 10:51:08 +03:00
AV_WB32 ( pkt . data , tc . start ) ;
ret = ff_mov_write_packet ( s , & pkt ) ;
av_free ( pkt . data ) ;
return ret ;
}
2013-08-22 17:25:18 +03:00
/*
* st - > disposition controls the " enabled " flag in the tkhd tag .
* QuickTime will not play a track if it is not enabled . So make sure
* that one track of each type ( audio , video , subtitle ) is enabled .
*
* Subtitles are special . For audio and video , setting " enabled " also
* makes the track " default " ( i . e . it is rendered when played ) . For
* subtitles , an " enabled " subtitle is not rendered by default , but
* if no subtitle is enabled , the subtitle menu in QuickTime will be
* empty !
*/
static void enable_tracks ( AVFormatContext * s )
{
MOVMuxContext * mov = s - > priv_data ;
int i ;
2014-07-04 17:26:06 +03:00
int enabled [ AVMEDIA_TYPE_NB ] ;
2013-08-22 17:25:18 +03:00
int first [ AVMEDIA_TYPE_NB ] ;
for ( i = 0 ; i < AVMEDIA_TYPE_NB ; i + + ) {
enabled [ i ] = 0 ;
first [ i ] = - 1 ;
}
for ( i = 0 ; i < s - > nb_streams ; i + + ) {
AVStream * st = s - > streams [ i ] ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( st - > codecpar - > codec_type < = AVMEDIA_TYPE_UNKNOWN | |
st - > codecpar - > codec_type > = AVMEDIA_TYPE_NB )
2013-08-22 17:25:18 +03:00
continue ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( first [ st - > codecpar - > codec_type ] < 0 )
first [ st - > codecpar - > codec_type ] = i ;
2013-08-22 17:25:18 +03:00
if ( st - > disposition & AV_DISPOSITION_DEFAULT ) {
mov - > tracks [ i ] . flags | = MOV_TRACK_ENABLED ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
enabled [ st - > codecpar - > codec_type ] + + ;
2013-08-22 17:25:18 +03:00
}
}
for ( i = 0 ; i < AVMEDIA_TYPE_NB ; i + + ) {
switch ( i ) {
case AVMEDIA_TYPE_VIDEO :
case AVMEDIA_TYPE_AUDIO :
case AVMEDIA_TYPE_SUBTITLE :
2014-07-04 17:26:06 +03:00
if ( enabled [ i ] > 1 )
mov - > per_stream_grouping = 1 ;
2013-08-22 17:25:18 +03:00
if ( ! enabled [ i ] & & first [ i ] > = 0 )
mov - > tracks [ first [ i ] ] . flags | = MOV_TRACK_ENABLED ;
break ;
}
}
}
2013-08-26 10:59:46 +03:00
static void mov_free ( AVFormatContext * s )
{
MOVMuxContext * mov = s - > priv_data ;
int i ;
if ( mov - > chapter_track ) {
2016-04-10 21:58:15 +02:00
if ( mov - > tracks [ mov - > chapter_track ] . par )
av_freep ( & mov - > tracks [ mov - > chapter_track ] . par - > extradata ) ;
av_freep ( & mov - > tracks [ mov - > chapter_track ] . par ) ;
2013-08-26 10:59:46 +03:00
}
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
if ( mov - > tracks [ i ] . tag = = MKTAG ( ' r ' , ' t ' , ' p ' , ' ' ) )
ff_mov_close_hinting ( & mov - > tracks [ i ] ) ;
2013-08-27 13:47:04 +03:00
else if ( mov - > tracks [ i ] . tag = = MKTAG ( ' t ' , ' m ' , ' c ' , ' d ' ) & & mov - > nb_meta_tmcd )
2016-04-10 21:58:15 +02:00
av_freep ( & mov - > tracks [ i ] . par ) ;
2013-08-26 10:59:46 +03:00
av_freep ( & mov - > tracks [ i ] . cluster ) ;
av_freep ( & mov - > tracks [ i ] . frag_info ) ;
if ( mov - > tracks [ i ] . vos_len )
2013-08-27 13:47:04 +03:00
av_freep ( & mov - > tracks [ i ] . vos_data ) ;
2015-12-07 12:01:09 +02:00
ff_mov_cenc_free ( & mov - > tracks [ i ] . cenc ) ;
2013-08-26 10:59:46 +03:00
}
av_freep ( & mov - > tracks ) ;
}
2014-03-18 17:50:25 +03:00
static uint32_t rgb_to_yuv ( uint32_t rgb )
{
uint8_t r , g , b ;
int y , cb , cr ;
r = ( rgb > > 16 ) & 0xFF ;
g = ( rgb > > 8 ) & 0xFF ;
b = ( rgb ) & 0xFF ;
2015-05-24 12:46:06 +02:00
y = av_clip_uint8 ( ( 16000 + 257 * r + 504 * g + 98 * b ) / 1000 ) ;
cb = av_clip_uint8 ( ( 128000 - 148 * r - 291 * g + 439 * b ) / 1000 ) ;
cr = av_clip_uint8 ( ( 128000 + 439 * r - 368 * g - 71 * b ) / 1000 ) ;
2014-03-18 17:50:25 +03:00
return ( y < < 16 ) | ( cr < < 8 ) | cb ;
}
static int mov_create_dvd_sub_decoder_specific_info ( MOVTrack * track ,
AVStream * st )
{
int i , width = 720 , height = 480 ;
int have_palette = 0 , have_size = 0 ;
uint32_t palette [ 16 ] ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
char * cur = st - > codecpar - > extradata ;
2014-03-18 17:50:25 +03:00
while ( cur & & * cur ) {
if ( strncmp ( " palette: " , cur , 8 ) = = 0 ) {
int i , count ;
count = sscanf ( cur + 8 ,
2014-04-01 21:19:59 +03:00
" %06 " PRIx32 " , %06 " PRIx32 " , %06 " PRIx32 " , %06 " PRIx32 " , "
" %06 " PRIx32 " , %06 " PRIx32 " , %06 " PRIx32 " , %06 " PRIx32 " , "
" %06 " PRIx32 " , %06 " PRIx32 " , %06 " PRIx32 " , %06 " PRIx32 " , "
" %06 " PRIx32 " , %06 " PRIx32 " , %06 " PRIx32 " , %06 " PRIx32 " " ,
2014-03-18 17:50:25 +03:00
& palette [ 0 ] , & palette [ 1 ] , & palette [ 2 ] , & palette [ 3 ] ,
& palette [ 4 ] , & palette [ 5 ] , & palette [ 6 ] , & palette [ 7 ] ,
& palette [ 8 ] , & palette [ 9 ] , & palette [ 10 ] , & palette [ 11 ] ,
& palette [ 12 ] , & palette [ 13 ] , & palette [ 14 ] , & palette [ 15 ] ) ;
for ( i = 0 ; i < count ; i + + ) {
palette [ i ] = rgb_to_yuv ( palette [ i ] ) ;
}
have_palette = 1 ;
} else if ( ! strncmp ( " size: " , cur , 5 ) ) {
sscanf ( cur + 5 , " %dx%d " , & width , & height ) ;
have_size = 1 ;
}
if ( have_palette & & have_size )
break ;
cur + = strcspn ( cur , " \n \r " ) ;
cur + = strspn ( cur , " \n \r " ) ;
}
if ( have_palette ) {
track - > vos_data = av_malloc ( 16 * 4 ) ;
if ( ! track - > vos_data )
return AVERROR ( ENOMEM ) ;
for ( i = 0 ; i < 16 ; i + + ) {
AV_WB32 ( track - > vos_data + i * 4 , palette [ i ] ) ;
}
track - > vos_len = 16 * 4 ;
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
st - > codecpar - > width = width ;
st - > codecpar - > height = track - > height = height ;
2014-03-18 17:50:25 +03:00
return 0 ;
}
2010-05-05 11:41:06 +03:00
static int mov_write_header ( AVFormatContext * s )
{
2011-02-20 12:04:12 +02:00
AVIOContext * pb = s - > pb ;
2010-05-05 11:41:06 +03:00
MOVMuxContext * mov = s - > priv_data ;
2012-03-05 10:51:08 +03:00
AVDictionaryEntry * t , * global_tcr = av_dict_get ( s - > metadata , " timecode " , NULL , 0 ) ;
2013-12-11 14:18:00 +03:00
int i , ret , hint_track = 0 , tmcd_track = 0 ;
2010-05-05 11:41:06 +03:00
2014-07-31 20:00:05 +03:00
mov - > fc = s ;
2013-09-22 01:29:33 +03:00
/* Default mode == MP4 */
mov - > mode = MODE_MP4 ;
2014-08-14 23:31:25 +03:00
if ( s - > oformat ) {
2013-09-22 01:29:33 +03:00
if ( ! strcmp ( " 3gp " , s - > oformat - > name ) ) mov - > mode = MODE_3GP ;
else if ( ! strcmp ( " 3g2 " , s - > oformat - > name ) ) mov - > mode = MODE_3GP | MODE_3G2 ;
else if ( ! strcmp ( " mov " , s - > oformat - > name ) ) mov - > mode = MODE_MOV ;
else if ( ! strcmp ( " psp " , s - > oformat - > name ) ) mov - > mode = MODE_PSP ;
else if ( ! strcmp ( " ipod " , s - > oformat - > name ) ) mov - > mode = MODE_IPOD ;
else if ( ! strcmp ( " ismv " , s - > oformat - > name ) ) mov - > mode = MODE_ISM ;
2013-09-23 13:17:10 +03:00
else if ( ! strcmp ( " f4v " , s - > oformat - > name ) ) mov - > mode = MODE_F4V ;
2013-09-22 01:29:33 +03:00
}
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
if ( mov - > flags & FF_MOV_FLAG_DELAY_MOOV )
mov - > flags | = FF_MOV_FLAG_EMPTY_MOOV ;
movenc: Buffer the mdat for the initial moov fragment, too
This allows writing QuickTime-compatible fragmented mp4 (with
a non-empty moov atom) to a non-seekable output.
This buffers the mdat for the initial fragment just as it does
for all normal fragments, too. Previously, the resulting
atom structure was mdat,moov, moof,mdat ..., while it now
is moov,mdat, moof,mdat.
Signed-off-by: Martin Storsjö <martin@martin.st>
2012-02-02 13:50:26 +03:00
/* Set the FRAGMENT flag if any of the fragmentation methods are
* enabled . */
if ( mov - > max_fragment_duration | | mov - > max_fragment_size | |
mov - > flags & ( FF_MOV_FLAG_EMPTY_MOOV |
FF_MOV_FLAG_FRAG_KEYFRAME |
FF_MOV_FLAG_FRAG_CUSTOM ) )
mov - > flags | = FF_MOV_FLAG_FRAGMENT ;
2013-09-10 15:56:19 +03:00
/* Set other implicit flags immediately */
if ( mov - > mode = = MODE_ISM )
mov - > flags | = FF_MOV_FLAG_EMPTY_MOOV | FF_MOV_FLAG_SEPARATE_MOOF |
FF_MOV_FLAG_FRAGMENT ;
2014-10-05 23:51:47 +03:00
if ( mov - > flags & FF_MOV_FLAG_DASH )
mov - > flags | = FF_MOV_FLAG_FRAGMENT | FF_MOV_FLAG_EMPTY_MOOV |
FF_MOV_FLAG_DEFAULT_BASE_MOOF ;
2013-09-10 15:56:19 +03:00
2012-09-20 11:40:10 +03:00
if ( mov - > flags & FF_MOV_FLAG_FASTSTART ) {
2014-11-18 00:02:02 +02:00
mov - > reserved_moov_size = - 1 ;
2012-09-20 11:40:10 +03:00
}
2014-11-04 16:28:48 +02:00
if ( mov - > use_editlist < 0 ) {
mov - > use_editlist = 1 ;
2015-01-03 01:44:38 +02:00
if ( mov - > flags & FF_MOV_FLAG_FRAGMENT & &
! ( mov - > flags & FF_MOV_FLAG_DELAY_MOOV ) ) {
2014-11-04 16:28:48 +02:00
// If we can avoid needing an edit list by shifting the
// tracks, prefer that over (trying to) write edit lists
// in fragmented output.
if ( s - > avoid_negative_ts = = AVFMT_AVOID_NEG_TS_AUTO | |
s - > avoid_negative_ts = = AVFMT_AVOID_NEG_TS_MAKE_ZERO )
mov - > use_editlist = 0 ;
}
2012-09-29 07:33:32 +03:00
}
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
if ( mov - > flags & FF_MOV_FLAG_EMPTY_MOOV & &
! ( mov - > flags & FF_MOV_FLAG_DELAY_MOOV ) & & mov - > use_editlist )
av_log ( s , AV_LOG_WARNING , " No meaningful edit list will be written when using empty_moov without delay_moov \n " ) ;
2014-11-04 16:28:48 +02:00
if ( ! mov - > use_editlist & & s - > avoid_negative_ts = = AVFMT_AVOID_NEG_TS_AUTO )
s - > avoid_negative_ts = AVFMT_AVOID_NEG_TS_MAKE_ZERO ;
2012-09-29 07:33:32 +03:00
2013-09-10 15:35:41 +03:00
/* Clear the omit_tfhd_offset flag if default_base_moof is set;
* if the latter is set that ' s enough and omit_tfhd_offset doesn ' t
* add anything extra on top of that . */
if ( mov - > flags & FF_MOV_FLAG_OMIT_TFHD_OFFSET & &
mov - > flags & FF_MOV_FLAG_DEFAULT_BASE_MOOF )
mov - > flags & = ~ FF_MOV_FLAG_OMIT_TFHD_OFFSET ;
if ( mov - > frag_interleave & &
mov - > flags & ( FF_MOV_FLAG_OMIT_TFHD_OFFSET | FF_MOV_FLAG_SEPARATE_MOOF ) ) {
av_log ( s , AV_LOG_ERROR ,
" Sample interleaving in fragments is mutually exclusive with "
" omit_tfhd_offset and separate_moof \n " ) ;
return AVERROR ( EINVAL ) ;
}
movenc: Buffer the mdat for the initial moov fragment, too
This allows writing QuickTime-compatible fragmented mp4 (with
a non-empty moov atom) to a non-seekable output.
This buffers the mdat for the initial fragment just as it does
for all normal fragments, too. Previously, the resulting
atom structure was mdat,moov, moof,mdat ..., while it now
is moov,mdat, moof,mdat.
Signed-off-by: Martin Storsjö <martin@martin.st>
2012-02-02 13:50:26 +03:00
/* Non-seekable output is ok if using fragmentation. If ism_lookahead
2012-01-19 12:17:13 +03:00
* is enabled , we don ' t support non - seekable output at all . */
if ( ! s - > pb - > seekable & &
2013-09-09 14:35:09 +03:00
( ! ( mov - > flags & FF_MOV_FLAG_FRAGMENT ) | | mov - > ism_lookahead ) ) {
2010-05-05 11:41:06 +03:00
av_log ( s , AV_LOG_ERROR , " muxer does not support non seekable output \n " ) ;
2013-09-22 14:44:17 +03:00
return AVERROR ( EINVAL ) ;
2010-05-05 11:41:06 +03:00
}
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
if ( ! ( mov - > flags & FF_MOV_FLAG_DELAY_MOOV ) ) {
if ( ( ret = mov_write_identification ( pb , s ) ) < 0 )
return ret ;
}
2010-05-05 11:41:06 +03:00
2010-05-05 11:41:10 +03:00
mov - > nb_streams = s - > nb_streams ;
2013-08-20 02:05:29 +03:00
if ( mov - > mode & ( MODE_MP4 | MODE_MOV | MODE_IPOD ) & & s - > nb_chapters )
2010-05-05 11:41:10 +03:00
mov - > chapter_track = mov - > nb_streams + + ;
2011-05-20 12:27:02 +03:00
if ( mov - > flags & FF_MOV_FLAG_RTP_HINT ) {
2010-05-18 22:47:24 +03:00
/* Add hint tracks for each audio and video stream */
hint_track = mov - > nb_streams ;
for ( i = 0 ; i < s - > nb_streams ; i + + ) {
AVStream * st = s - > streams [ i ] ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( st - > codecpar - > codec_type = = AVMEDIA_TYPE_VIDEO | |
st - > codecpar - > codec_type = = AVMEDIA_TYPE_AUDIO ) {
2010-05-18 22:47:24 +03:00
mov - > nb_streams + + ;
}
}
}
2016-01-21 08:08:09 +02:00
if ( mov - > mode = = MODE_MOV | | mov - > mode = = MODE_MP4 ) {
2012-03-05 10:51:08 +03:00
tmcd_track = mov - > nb_streams ;
2012-08-03 10:38:45 +03:00
/* +1 tmcd track for each video stream with a timecode */
2012-03-05 10:51:08 +03:00
for ( i = 0 ; i < s - > nb_streams ; i + + ) {
AVStream * st = s - > streams [ i ] ;
2016-04-10 21:58:15 +02:00
if ( st - > codecpar - > codec_type = = AVMEDIA_TYPE_VIDEO & &
2012-03-05 10:51:08 +03:00
( global_tcr | | av_dict_get ( st - > metadata , " timecode " , NULL , 0 ) ) )
2012-08-03 10:38:45 +03:00
mov - > nb_meta_tmcd + + ;
}
/* check if there is already a tmcd track to remux */
if ( mov - > nb_meta_tmcd ) {
for ( i = 0 ; i < s - > nb_streams ; i + + ) {
AVStream * st = s - > streams [ i ] ;
2016-04-10 21:58:15 +02:00
if ( st - > codecpar - > codec_tag = = MKTAG ( ' t ' , ' m ' , ' c ' , ' d ' ) ) {
2012-08-03 10:38:45 +03:00
av_log ( s , AV_LOG_WARNING , " You requested a copy of the original timecode track "
" so timecode metadata are now ignored \n " ) ;
mov - > nb_meta_tmcd = 0 ;
}
}
2012-03-05 10:51:08 +03:00
}
2012-08-03 10:38:45 +03:00
mov - > nb_streams + = mov - > nb_meta_tmcd ;
2012-03-05 10:51:08 +03:00
}
2013-08-20 02:05:30 +03:00
// Reserve an extra stream for chapters for the case where chapters
// are written in the trailer
2014-06-08 16:19:46 +03:00
mov - > tracks = av_mallocz_array ( ( mov - > nb_streams + 1 ) , sizeof ( * mov - > tracks ) ) ;
2010-05-05 11:41:06 +03:00
if ( ! mov - > tracks )
return AVERROR ( ENOMEM ) ;
2015-12-07 12:01:09 +02:00
if ( mov - > encryption_scheme_str ! = NULL & & strcmp ( mov - > encryption_scheme_str , " none " ) ! = 0 ) {
if ( strcmp ( mov - > encryption_scheme_str , " cenc-aes-ctr " ) = = 0 ) {
mov - > encryption_scheme = MOV_ENC_CENC_AES_CTR ;
if ( mov - > encryption_key_len ! = AES_CTR_KEY_SIZE ) {
av_log ( s , AV_LOG_ERROR , " Invalid encryption key len %d expected %d \n " ,
mov - > encryption_key_len , AES_CTR_KEY_SIZE ) ;
ret = AVERROR ( EINVAL ) ;
goto error ;
}
if ( mov - > encryption_kid_len ! = CENC_KID_SIZE ) {
av_log ( s , AV_LOG_ERROR , " Invalid encryption kid len %d expected %d \n " ,
mov - > encryption_kid_len , CENC_KID_SIZE ) ;
ret = AVERROR ( EINVAL ) ;
goto error ;
}
} else {
av_log ( s , AV_LOG_ERROR , " unsupported encryption scheme %s \n " ,
mov - > encryption_scheme_str ) ;
ret = AVERROR ( EINVAL ) ;
goto error ;
}
}
2013-07-07 14:59:47 +03:00
for ( i = 0 ; i < s - > nb_streams ; i + + ) {
2010-05-05 11:41:06 +03:00
AVStream * st = s - > streams [ i ] ;
MOVTrack * track = & mov - > tracks [ i ] ;
2011-05-22 13:46:29 +03:00
AVDictionaryEntry * lang = av_dict_get ( st - > metadata , " language " , NULL , 0 ) ;
2010-05-05 11:41:06 +03:00
2014-04-29 18:40:57 +03:00
track - > st = st ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
track - > par = st - > codecpar ;
2010-05-05 11:41:06 +03:00
track - > language = ff_mov_iso639_to_lang ( lang ? lang - > value : " und " , mov - > mode ! = MODE_MOV ) ;
if ( track - > language < 0 )
track - > language = 0 ;
track - > mode = mov - > mode ;
2013-07-07 14:59:47 +03:00
track - > tag = mov_find_codec_tag ( s , track ) ;
2010-05-05 11:41:06 +03:00
if ( ! track - > tag ) {
2013-12-11 14:13:12 +03:00
av_log ( s , AV_LOG_ERROR , " Could not find tag for codec %s in stream #%d, "
" codec not currently supported in container \n " ,
2016-04-10 21:58:15 +02:00
avcodec_get_name ( st - > codecpar - > codec_id ) , i ) ;
2013-12-11 14:18:00 +03:00
ret = AVERROR ( EINVAL ) ;
2010-05-05 11:41:06 +03:00
goto error ;
}
2010-05-18 22:47:24 +03:00
/* If hinting of this track is enabled by a later hint track,
* this is updated . */
track - > hint_track = - 1 ;
2013-07-07 14:59:47 +03:00
track - > start_dts = AV_NOPTS_VALUE ;
2014-11-23 23:23:43 +02:00
track - > start_cts = AV_NOPTS_VALUE ;
2015-03-15 23:57:06 +02:00
track - > end_pts = AV_NOPTS_VALUE ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( st - > codecpar - > codec_type = = AVMEDIA_TYPE_VIDEO ) {
2010-05-05 11:41:06 +03:00
if ( track - > tag = = MKTAG ( ' m ' , ' x ' , ' 3 ' , ' p ' ) | | track - > tag = = MKTAG ( ' m ' , ' x ' , ' 3 ' , ' n ' ) | |
track - > tag = = MKTAG ( ' m ' , ' x ' , ' 4 ' , ' p ' ) | | track - > tag = = MKTAG ( ' m ' , ' x ' , ' 4 ' , ' n ' ) | |
track - > tag = = MKTAG ( ' m ' , ' x ' , ' 5 ' , ' p ' ) | | track - > tag = = MKTAG ( ' m ' , ' x ' , ' 5 ' , ' n ' ) ) {
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( st - > codecpar - > width ! = 720 | | ( st - > codecpar - > height ! = 608 & & st - > codecpar - > height ! = 512 ) ) {
2010-05-05 11:41:06 +03:00
av_log ( s , AV_LOG_ERROR , " D-10/IMX must use 720x608 or 720x512 video resolution \n " ) ;
2013-12-11 14:18:00 +03:00
ret = AVERROR ( EINVAL ) ;
2010-05-05 11:41:06 +03:00
goto error ;
}
2013-07-07 14:59:47 +03:00
track - > height = track - > tag > > 24 = = ' n ' ? 486 : 576 ;
2010-05-05 11:41:06 +03:00
}
2013-05-17 00:35:05 +03:00
if ( mov - > video_track_timescale ) {
track - > timescale = mov - > video_track_timescale ;
} else {
2014-06-18 19:18:25 +03:00
track - > timescale = st - > time_base . den ;
2013-05-17 00:35:05 +03:00
while ( track - > timescale < 10000 )
track - > timescale * = 2 ;
}
2016-04-10 21:58:15 +02:00
if ( st - > codecpar - > width > 65535 | | st - > codecpar - > height > 65535 ) {
av_log ( s , AV_LOG_ERROR , " Resolution %dx%d too large for mov/mp4 \n " , st - > codecpar - > width , st - > codecpar - > height ) ;
2014-07-08 17:17:44 +03:00
ret = AVERROR ( EINVAL ) ;
goto error ;
}
2010-05-05 11:41:06 +03:00
if ( track - > mode = = MODE_MOV & & track - > timescale > 100000 )
av_log ( s , AV_LOG_WARNING ,
" WARNING codec timebase is very high. If duration is too long, \n "
" file may not be playable by quicktime. Specify a shorter timebase \n "
" or choose different container. \n " ) ;
2016-02-26 06:07:28 +02:00
if ( track - > mode = = MODE_MOV & &
2016-04-10 21:58:15 +02:00
track - > par - > codec_id = = AV_CODEC_ID_RAWVIDEO & &
2016-02-26 06:07:28 +02:00
track - > tag = = MKTAG ( ' r ' , ' a ' , ' w ' , ' ' ) ) {
2016-04-10 21:58:15 +02:00
enum AVPixelFormat pix_fmt = track - > par - > format ;
if ( pix_fmt = = AV_PIX_FMT_NONE & & track - > par - > bits_per_coded_sample = = 1 )
2016-02-26 06:07:28 +02:00
pix_fmt = AV_PIX_FMT_MONOWHITE ;
track - > is_unaligned_qt_rgb =
pix_fmt = = AV_PIX_FMT_RGB24 | |
pix_fmt = = AV_PIX_FMT_BGR24 | |
pix_fmt = = AV_PIX_FMT_PAL8 | |
pix_fmt = = AV_PIX_FMT_GRAY8 | |
pix_fmt = = AV_PIX_FMT_MONOWHITE | |
pix_fmt = = AV_PIX_FMT_MONOBLACK ;
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( st - > codecpar - > codec_type = = AVMEDIA_TYPE_AUDIO ) {
track - > timescale = st - > codecpar - > sample_rate ;
2016-04-10 21:58:15 +02:00
if ( ! st - > codecpar - > frame_size & & ! av_get_bits_per_sample ( st - > codecpar - > codec_id ) ) {
2012-05-08 00:32:45 +03:00
av_log ( s , AV_LOG_WARNING , " track %d: codec frame size is not set \n " , i ) ;
track - > audio_vbr = 1 ;
2016-04-10 21:58:15 +02:00
} else if ( st - > codecpar - > codec_id = = AV_CODEC_ID_ADPCM_MS | |
st - > codecpar - > codec_id = = AV_CODEC_ID_ADPCM_IMA_WAV | |
st - > codecpar - > codec_id = = AV_CODEC_ID_ILBC ) {
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( ! st - > codecpar - > block_align ) {
2012-01-23 01:23:23 +03:00
av_log ( s , AV_LOG_ERROR , " track %d: codec block align is not set for adpcm \n " , i ) ;
2013-12-11 14:18:00 +03:00
ret = AVERROR ( EINVAL ) ;
2011-01-20 23:14:12 +02:00
goto error ;
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
track - > sample_size = st - > codecpar - > block_align ;
2016-04-10 21:58:15 +02:00
} else if ( st - > codecpar - > frame_size > 1 ) { /* assume compressed audio */
2010-05-05 11:41:06 +03:00
track - > audio_vbr = 1 ;
2012-01-23 01:23:23 +03:00
} else {
2016-04-10 21:58:15 +02:00
track - > sample_size = ( av_get_bits_per_sample ( st - > codecpar - > codec_id ) > > 3 ) * st - > codecpar - > channels ;
2010-05-05 11:41:06 +03:00
}
2016-04-10 21:58:15 +02:00
if ( st - > codecpar - > codec_id = = AV_CODEC_ID_ILBC | |
st - > codecpar - > codec_id = = AV_CODEC_ID_ADPCM_IMA_QT ) {
2013-08-20 14:45:20 +03:00
track - > audio_vbr = 1 ;
}
2012-04-20 23:21:28 +03:00
if ( track - > mode ! = MODE_MOV & &
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
track - > par - > codec_id = = AV_CODEC_ID_MP3 & & track - > timescale < 16000 ) {
2015-05-27 01:58:24 +02:00
if ( s - > strict_std_compliance > = FF_COMPLIANCE_NORMAL ) {
2015-05-27 01:42:23 +02:00
av_log ( s , AV_LOG_ERROR , " track %d: muxing mp3 at %dhz is not standard, to mux anyway set strict to -1 \n " ,
2016-04-10 21:58:15 +02:00
i , track - > par - > sample_rate ) ;
2015-05-27 01:42:23 +02:00
ret = AVERROR ( EINVAL ) ;
goto error ;
} else {
av_log ( s , AV_LOG_WARNING , " track %d: muxing mp3 at %dhz is not standard in MP4 \n " ,
2016-04-10 21:58:15 +02:00
i , track - > par - > sample_rate ) ;
2015-05-27 01:42:23 +02:00
}
2010-05-05 11:41:06 +03:00
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( st - > codecpar - > codec_type = = AVMEDIA_TYPE_SUBTITLE ) {
2014-05-18 13:12:59 +03:00
track - > timescale = st - > time_base . den ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
} else if ( st - > codecpar - > codec_type = = AVMEDIA_TYPE_DATA ) {
2014-05-18 13:12:59 +03:00
track - > timescale = st - > time_base . den ;
2013-08-20 13:16:31 +03:00
} else {
2012-03-24 23:36:51 +03:00
track - > timescale = MOV_TIMESCALE ;
2010-05-05 11:41:06 +03:00
}
if ( ! track - > height )
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
track - > height = st - > codecpar - > height ;
2012-01-09 18:58:26 +03:00
/* The ism specific timescale isn't mandatory, but is assumed by
* some tools , such as mp4split . */
if ( mov - > mode = = MODE_ISM )
track - > timescale = 10000000 ;
2010-05-05 11:41:06 +03:00
2011-11-29 21:28:15 +03:00
avpriv_set_pts_info ( st , 64 , 1 , track - > timescale ) ;
2012-01-05 14:57:05 +03:00
/* copy extradata if it exists */
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( st - > codecpar - > extradata_size ) {
if ( st - > codecpar - > codec_id = = AV_CODEC_ID_DVD_SUBTITLE )
2014-03-18 17:50:25 +03:00
mov_create_dvd_sub_decoder_specific_info ( track , st ) ;
2016-04-10 21:58:15 +02:00
else if ( ! TAG_IS_AVCI ( track - > tag ) & & st - > codecpar - > codec_id ! = AV_CODEC_ID_DNXHD ) {
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
track - > vos_len = st - > codecpar - > extradata_size ;
2014-03-18 17:50:25 +03:00
track - > vos_data = av_malloc ( track - > vos_len ) ;
2015-02-18 17:18:26 +02:00
if ( ! track - > vos_data ) {
ret = AVERROR ( ENOMEM ) ;
2015-02-01 20:19:45 +02:00
goto error ;
2015-02-18 17:18:26 +02:00
}
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
memcpy ( track - > vos_data , st - > codecpar - > extradata , track - > vos_len ) ;
2014-03-18 17:50:25 +03:00
}
2012-01-05 14:57:05 +03:00
}
2015-12-07 12:01:09 +02:00
if ( mov - > encryption_scheme = = MOV_ENC_CENC_AES_CTR ) {
ret = ff_mov_cenc_init ( & track - > cenc , mov - > encryption_key ,
2016-04-10 21:58:15 +02:00
track - > par - > codec_id = = AV_CODEC_ID_H264 , s - > flags & AVFMT_FLAG_BITEXACT ) ;
2015-12-07 12:01:09 +02:00
if ( ret ) {
goto error ;
}
}
2010-05-05 11:41:06 +03:00
}
2014-07-14 17:51:28 +03:00
for ( i = 0 ; i < s - > nb_streams ; i + + ) {
int j ;
AVStream * st = s - > streams [ i ] ;
MOVTrack * track = & mov - > tracks [ i ] ;
2016-04-10 21:58:15 +02:00
if ( st - > codecpar - > codec_type ! = AVMEDIA_TYPE_AUDIO | |
track - > par - > channel_layout ! = AV_CH_LAYOUT_MONO )
2014-07-14 17:51:28 +03:00
continue ;
for ( j = 0 ; j < s - > nb_streams ; j + + ) {
AVStream * stj = s - > streams [ j ] ;
MOVTrack * trackj = & mov - > tracks [ j ] ;
if ( j = = i )
continue ;
2016-04-10 21:58:15 +02:00
if ( stj - > codecpar - > codec_type ! = AVMEDIA_TYPE_AUDIO | |
trackj - > par - > channel_layout ! = AV_CH_LAYOUT_MONO | |
2014-07-14 17:51:28 +03:00
trackj - > language ! = track - > language | |
trackj - > tag ! = track - > tag
)
continue ;
track - > multichannel_as_mono + + ;
}
}
2013-08-22 17:25:18 +03:00
enable_tracks ( s ) ;
2010-05-05 11:41:06 +03:00
2013-07-08 05:08:07 +03:00
if ( mov - > reserved_moov_size ) {
2015-08-10 11:23:21 +02:00
mov - > reserved_header_pos = avio_tell ( pb ) ;
2012-09-20 11:40:10 +03:00
if ( mov - > reserved_moov_size > 0 )
avio_skip ( pb , mov - > reserved_moov_size ) ;
2011-10-26 23:09:26 +03:00
}
2014-02-19 16:20:22 +03:00
if ( mov - > flags & FF_MOV_FLAG_FRAGMENT ) {
/* If no fragmentation options have been set, set a default. */
if ( ! ( mov - > flags & ( FF_MOV_FLAG_FRAG_KEYFRAME |
FF_MOV_FLAG_FRAG_CUSTOM ) ) & &
! mov - > max_fragment_duration & & ! mov - > max_fragment_size )
mov - > flags | = FF_MOV_FLAG_FRAG_KEYFRAME ;
} else {
2013-08-20 02:05:31 +03:00
if ( mov - > flags & FF_MOV_FLAG_FASTSTART )
2015-08-09 21:39:28 +02:00
mov - > reserved_header_pos = avio_tell ( pb ) ;
2012-01-05 14:57:05 +03:00
mov_write_mdat_tag ( pb , mov ) ;
2013-08-20 02:05:31 +03:00
}
2011-07-07 12:25:03 +03:00
2016-02-06 19:00:45 +02:00
ff_parse_creation_time_metadata ( s , & mov - > time , 1 ) ;
2012-01-09 19:35:01 +03:00
if ( mov - > time )
mov - > time + = 0x7C25B080 ; // 1970 based -> 1904 based
2010-05-05 11:41:10 +03:00
if ( mov - > chapter_track )
2013-12-11 14:18:00 +03:00
if ( ( ret = mov_create_chapter_track ( s , mov - > chapter_track ) ) < 0 )
2013-08-26 10:45:27 +03:00
goto error ;
2010-05-05 11:41:06 +03:00
2011-05-20 12:27:02 +03:00
if ( mov - > flags & FF_MOV_FLAG_RTP_HINT ) {
2010-05-18 22:47:24 +03:00
/* Initialize the hint tracks for each audio and video stream */
for ( i = 0 ; i < s - > nb_streams ; i + + ) {
AVStream * st = s - > streams [ i ] ;
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 21:42:52 +03:00
if ( st - > codecpar - > codec_type = = AVMEDIA_TYPE_VIDEO | |
st - > codecpar - > codec_type = = AVMEDIA_TYPE_AUDIO ) {
2013-12-11 14:18:00 +03:00
if ( ( ret = ff_mov_init_hinting ( s , hint_track , i ) ) < 0 )
2013-07-04 18:35:41 +03:00
goto error ;
2010-05-18 22:47:24 +03:00
hint_track + + ;
}
}
}
2012-08-03 10:38:45 +03:00
if ( mov - > nb_meta_tmcd ) {
2012-03-05 10:51:08 +03:00
/* Initialize the tmcd tracks */
for ( i = 0 ; i < s - > nb_streams ; i + + ) {
AVStream * st = s - > streams [ i ] ;
t = global_tcr ;
2016-04-10 21:58:15 +02:00
if ( st - > codecpar - > codec_type = = AVMEDIA_TYPE_VIDEO ) {
2012-03-05 10:51:08 +03:00
if ( ! t )
t = av_dict_get ( st - > metadata , " timecode " , NULL , 0 ) ;
if ( ! t )
continue ;
2013-12-11 14:18:00 +03:00
if ( ( ret = mov_create_timecode_track ( s , tmcd_track , i , t - > value ) ) < 0 )
2012-03-05 10:51:08 +03:00
goto error ;
tmcd_track + + ;
}
}
}
2011-03-14 21:39:06 +02:00
avio_flush ( pb ) ;
2010-05-05 11:41:06 +03:00
2012-01-20 14:02:18 +03:00
if ( mov - > flags & FF_MOV_FLAG_ISML )
mov_write_isml_manifest ( pb , mov ) ;
movenc: Add an option for delaying writing the moov with empty_moov
This delays writing the moov until the first fragment is written,
or can be flushed by the caller explicitly when wanted. If the first
sample in all streams is available at this point, we can write
a proper editlist at this point, allowing streams to start at
something else than dts=0. For AC3 and DNXHD, a packet is
needed in order to write the moov header properly.
This isn't added to the normal behaviour for empty_moov, since
the behaviour that ftyp+moov is written during avformat_write_header
would be changed. Callers that split the output stream into header+segments
(either by flushing manually, with the custom_frag flag set, or by
just differentiating between data written during avformat_write_header
and the rest) will need to be adjusted to take this option into use.
For handling streams that start at something else than dts=0, an
alternative would be to use different kinds of heuristics for
guessing the start dts (using AVCodecContext delay or has_b_frames
together with the frame rate), but this is not reliable and doesn't
necessarily work well with stream copy, and wouldn't work for getting
the right initialization data for AC3 or DNXHD either.
Signed-off-by: Martin Storsjö <martin@martin.st>
2014-11-03 16:09:15 +02:00
if ( mov - > flags & FF_MOV_FLAG_EMPTY_MOOV & &
! ( mov - > flags & FF_MOV_FLAG_DELAY_MOOV ) ) {
2015-01-25 15:32:22 +02:00
if ( ( ret = mov_write_moov_tag ( pb , mov , s ) ) < 0 )
return ret ;
2015-11-07 01:18:05 +02:00
avio_flush ( pb ) ;
2015-01-20 16:33:12 +02:00
mov - > moov_written = 1 ;
2015-08-09 22:11:55 +02:00
if ( mov - > flags & FF_MOV_FLAG_GLOBAL_SIDX )
2015-08-09 21:39:28 +02:00
mov - > reserved_header_pos = avio_tell ( pb ) ;
2012-01-05 14:57:05 +03:00
}
2010-05-05 11:41:06 +03:00
return 0 ;
error :
2013-08-26 10:59:46 +03:00
mov_free ( s ) ;
2013-12-11 14:18:00 +03:00
return ret ;
2010-05-05 11:41:06 +03:00
}
2013-08-20 02:05:31 +03:00
static int get_moov_size ( AVFormatContext * s )
{
int ret ;
AVIOContext * moov_buf ;
MOVMuxContext * mov = s - > priv_data ;
2013-09-18 22:02:17 +03:00
if ( ( ret = ffio_open_null_buf ( & moov_buf ) ) < 0 )
2013-08-20 02:05:31 +03:00
return ret ;
2015-01-25 15:32:22 +02:00
if ( ( ret = mov_write_moov_tag ( moov_buf , mov , s ) ) < 0 )
return ret ;
2013-09-18 22:02:17 +03:00
return ffio_close_null_buf ( moov_buf ) ;
2013-08-20 02:05:31 +03:00
}
2014-10-21 11:42:27 +03:00
static int get_sidx_size ( AVFormatContext * s )
{
int ret ;
AVIOContext * buf ;
MOVMuxContext * mov = s - > priv_data ;
if ( ( ret = ffio_open_null_buf ( & buf ) ) < 0 )
return ret ;
mov_write_sidx_tags ( buf , mov , - 1 , 0 ) ;
return ffio_close_null_buf ( buf ) ;
}
2013-08-20 02:05:31 +03:00
/*
2012-09-20 11:40:10 +03:00
* This function gets the moov size if moved to the top of the file : the chunk
* offset table can switch between stco ( 32 - bit entries ) to co64 ( 64 - bit
2013-08-20 02:05:31 +03:00
* entries ) when the moov is moved to the beginning , so the size of the moov
* would change . It also updates the chunk offset tables .
2012-09-20 11:40:10 +03:00
*/
static int compute_moov_size ( AVFormatContext * s )
{
int i , moov_size , moov_size2 ;
MOVMuxContext * mov = s - > priv_data ;
moov_size = get_moov_size ( s ) ;
if ( moov_size < 0 )
return moov_size ;
for ( i = 0 ; i < mov - > nb_streams ; i + + )
mov - > tracks [ i ] . data_offset + = moov_size ;
moov_size2 = get_moov_size ( s ) ;
if ( moov_size2 < 0 )
return moov_size2 ;
2013-08-20 02:05:31 +03:00
/* if the size changed, we just switched from stco to co64 and need to
2012-09-20 11:40:10 +03:00
* update the offsets */
if ( moov_size2 ! = moov_size )
for ( i = 0 ; i < mov - > nb_streams ; i + + )
mov - > tracks [ i ] . data_offset + = moov_size2 - moov_size ;
return moov_size2 ;
}
2014-10-21 11:42:27 +03:00
static int compute_sidx_size ( AVFormatContext * s )
{
int i , sidx_size ;
MOVMuxContext * mov = s - > priv_data ;
sidx_size = get_sidx_size ( s ) ;
if ( sidx_size < 0 )
return sidx_size ;
for ( i = 0 ; i < mov - > nb_streams ; i + + )
mov - > tracks [ i ] . data_offset + = sidx_size ;
return sidx_size ;
}
2012-09-20 11:40:10 +03:00
static int shift_data ( AVFormatContext * s )
{
int ret = 0 , moov_size ;
MOVMuxContext * mov = s - > priv_data ;
int64_t pos , pos_end = avio_tell ( s - > pb ) ;
uint8_t * buf , * read_buf [ 2 ] ;
int read_buf_id = 0 ;
int read_size [ 2 ] ;
AVIOContext * read_pb ;
2014-10-21 11:42:27 +03:00
if ( mov - > flags & FF_MOV_FLAG_FRAGMENT )
moov_size = compute_sidx_size ( s ) ;
else
moov_size = compute_moov_size ( s ) ;
2012-09-20 11:40:10 +03:00
if ( moov_size < 0 )
return moov_size ;
buf = av_malloc ( moov_size * 2 ) ;
if ( ! buf )
return AVERROR ( ENOMEM ) ;
read_buf [ 0 ] = buf ;
read_buf [ 1 ] = buf + moov_size ;
/* Shift the data: the AVIO context of the output can only be used for
* writing , so we re - open the same output , but for reading . It also avoids
* a read / seek / write / seek back and forth . */
avio_flush ( s - > pb ) ;
2016-01-16 18:53:43 +02:00
ret = s - > io_open ( s , & read_pb , s - > filename , AVIO_FLAG_READ , NULL ) ;
2012-09-20 11:40:10 +03:00
if ( ret < 0 ) {
av_log ( s , AV_LOG_ERROR , " Unable to re-open %s output file for "
" the second pass (faststart) \n " , s - > filename ) ;
goto end ;
}
/* mark the end of the shift to up to the last data we wrote, and get ready
* for writing */
pos_end = avio_tell ( s - > pb ) ;
2015-08-09 21:39:28 +02:00
avio_seek ( s - > pb , mov - > reserved_header_pos + moov_size , SEEK_SET ) ;
2012-09-20 11:40:10 +03:00
/* start reading at where the new moov will be placed */
2015-08-09 21:39:28 +02:00
avio_seek ( read_pb , mov - > reserved_header_pos , SEEK_SET ) ;
2012-09-20 11:40:10 +03:00
pos = avio_tell ( read_pb ) ;
# define READ_BLOCK do { \
read_size [ read_buf_id ] = avio_read ( read_pb , read_buf [ read_buf_id ] , moov_size ) ; \
read_buf_id ^ = 1 ; \
} while ( 0 )
/* shift data by chunk of at most moov_size */
READ_BLOCK ;
do {
int n ;
READ_BLOCK ;
n = read_size [ read_buf_id ] ;
if ( n < = 0 )
break ;
avio_write ( s - > pb , read_buf [ read_buf_id ] , n ) ;
pos + = n ;
} while ( pos < pos_end ) ;
2016-01-16 18:53:43 +02:00
ff_format_io_close ( s , & read_pb ) ;
2012-09-20 11:40:10 +03:00
end :
av_free ( buf ) ;
return ret ;
}
2003-08-26 23:23:13 +03:00
static int mov_write_trailer ( AVFormatContext * s )
{
2009-02-28 18:02:29 +02:00
MOVMuxContext * mov = s - > priv_data ;
2011-02-20 12:04:12 +02:00
AVIOContext * pb = s - > pb ;
2003-08-26 23:23:13 +03:00
int res = 0 ;
2006-06-24 21:09:20 +03:00
int i ;
2013-08-20 02:05:30 +03:00
int64_t moov_pos ;
2003-08-26 23:23:13 +03:00
2012-06-25 01:05:33 +03:00
/*
* Before actually writing the trailer , make sure that there are no
* dangling subtitles , that need a terminating sample .
*/
for ( i = 0 ; i < mov - > nb_streams ; i + + ) {
MOVTrack * trk = & mov - > tracks [ i ] ;
2016-04-10 21:58:15 +02:00
if ( trk - > par - > codec_id = = AV_CODEC_ID_MOV_TEXT & &
2012-06-25 01:05:33 +03:00
! trk - > last_sample_is_subtitle_end ) {
mov_write_subtitle_end_packet ( s , i , trk - > track_duration ) ;
trk - > last_sample_is_subtitle_end = 1 ;
}
}
2013-08-20 02:05:30 +03:00
// If there were no chapters when the header was written, but there
// are chapters now, write them in the trailer. This only works
// when we are not doing fragments.
if ( ! mov - > chapter_track & & ! ( mov - > flags & FF_MOV_FLAG_FRAGMENT ) ) {
if ( mov - > mode & ( MODE_MP4 | MODE_MOV | MODE_IPOD ) & & s - > nb_chapters ) {
mov - > chapter_track = mov - > nb_streams + + ;
2013-08-26 10:45:27 +03:00
if ( ( res = mov_create_chapter_track ( s , mov - > chapter_track ) ) < 0 )
goto error ;
2013-08-20 02:05:30 +03:00
}
}
2003-08-26 23:23:13 +03:00
2012-01-05 14:57:05 +03:00
if ( ! ( mov - > flags & FF_MOV_FLAG_FRAGMENT ) ) {
2013-08-20 02:05:31 +03:00
moov_pos = avio_tell ( pb ) ;
2012-01-25 13:40:05 +03:00
/* Write size of mdat tag */
if ( mov - > mdat_size + 8 < = UINT32_MAX ) {
avio_seek ( pb , mov - > mdat_pos , SEEK_SET ) ;
avio_wb32 ( pb , mov - > mdat_size + 8 ) ;
} else {
/* overwrite 'wide' placeholder atom */
avio_seek ( pb , mov - > mdat_pos - 8 , SEEK_SET ) ;
/* special value: real atom size will be 64 bit value after
* tag field */
avio_wb32 ( pb , 1 ) ;
ffio_wfourcc ( pb , " mdat " ) ;
avio_wb64 ( pb , mov - > mdat_size + 16 ) ;
}
2015-08-10 11:23:21 +02:00
avio_seek ( pb , mov - > reserved_moov_size > 0 ? mov - > reserved_header_pos : moov_pos , SEEK_SET ) ;
2012-09-20 11:40:10 +03:00
2013-08-20 02:05:31 +03:00
if ( mov - > flags & FF_MOV_FLAG_FASTSTART ) {
av_log ( s , AV_LOG_INFO , " Starting second pass: moving the moov atom to the beginning of the file \n " ) ;
2012-09-20 11:40:10 +03:00
res = shift_data ( s ) ;
2016-05-29 16:51:57 +02:00
if ( res < 0 )
goto error ;
2012-09-20 11:40:10 +03:00
if ( res = = 0 ) {
2015-08-09 21:39:28 +02:00
avio_seek ( pb , mov - > reserved_header_pos , SEEK_SET ) ;
2015-01-25 15:32:22 +02:00
if ( ( res = mov_write_moov_tag ( pb , mov , s ) ) < 0 )
goto error ;
2012-09-20 11:40:10 +03:00
}
} else if ( mov - > reserved_moov_size > 0 ) {
int64_t size ;
2015-01-25 15:32:22 +02:00
if ( ( res = mov_write_moov_tag ( pb , mov , s ) ) < 0 )
goto error ;
2015-08-10 11:23:21 +02:00
size = mov - > reserved_moov_size - ( avio_tell ( pb ) - mov - > reserved_header_pos ) ;
2013-07-08 05:08:07 +03:00
if ( size < 8 ) {
2012-01-26 03:52:29 +03:00
av_log ( s , AV_LOG_ERROR , " reserved_moov_size is too small, needed % " PRId64 " additional \n " , 8 - size ) ;
2015-01-25 15:28:43 +02:00
res = AVERROR ( EINVAL ) ;
goto error ;
2012-01-26 03:52:29 +03:00
}
avio_wb32 ( pb , size ) ;
ffio_wfourcc ( pb , " free " ) ;
2014-07-18 21:30:14 +03:00
ffio_fill ( pb , 0 , size - 8 ) ;
2012-01-26 03:52:29 +03:00
avio_seek ( pb , moov_pos , SEEK_SET ) ;
2012-09-20 11:40:10 +03:00
} else {
2015-01-25 15:32:22 +02:00
if ( ( res = mov_write_moov_tag ( pb , mov , s ) ) < 0 )
goto error ;
2011-10-26 23:09:26 +03:00
}
2015-01-25 15:32:22 +02:00
res = 0 ;
2012-01-05 14:57:05 +03:00
} else {
2015-10-20 21:30:03 +02:00
mov_auto_flush_fragment ( s , 1 ) ;
2014-10-21 11:42:27 +03:00
for ( i = 0 ; i < mov - > nb_streams ; i + + )
mov - > tracks [ i ] . data_offset = 0 ;
2015-08-09 22:11:55 +02:00
if ( mov - > flags & FF_MOV_FLAG_GLOBAL_SIDX ) {
2014-10-21 11:42:27 +03:00
av_log ( s , AV_LOG_INFO , " Starting second pass: inserting sidx atoms \n " ) ;
res = shift_data ( s ) ;
2016-05-29 16:51:57 +02:00
if ( res < 0 )
goto error ;
2014-10-21 11:42:27 +03:00
if ( res = = 0 ) {
int64_t end = avio_tell ( pb ) ;
2015-08-09 21:39:28 +02:00
avio_seek ( pb , mov - > reserved_header_pos , SEEK_SET ) ;
2014-10-21 11:42:27 +03:00
mov_write_sidx_tags ( pb , mov , - 1 , 0 ) ;
avio_seek ( pb , end , SEEK_SET ) ;
mov_write_mfra_tag ( pb , mov ) ;
}
} else {
mov_write_mfra_tag ( pb , mov ) ;
}
2011-10-26 23:09:26 +03:00
}
2003-08-26 23:23:13 +03:00
2013-08-26 10:45:27 +03:00
error :
2013-08-26 10:59:46 +03:00
mov_free ( s ) ;
2009-03-22 05:52:55 +02:00
2003-08-26 23:23:13 +03:00
return res ;
}
2009-01-14 01:44:16 +02:00
# if CONFIG_MOV_MUXER
2011-10-03 20:14:03 +03:00
MOV_CLASS ( mov )
2011-01-26 00:03:28 +02:00
AVOutputFormat ff_mov_muxer = {
2011-07-16 23:18:12 +03:00
. name = " mov " ,
2012-07-24 04:23:48 +03:00
. long_name = NULL_IF_CONFIG_SMALL ( " QuickTime / MOV " ) ,
2011-07-16 23:18:12 +03:00
. extensions = " mov " ,
. priv_data_size = sizeof ( MOVMuxContext ) ,
2012-08-05 12:11:04 +03:00
. audio_codec = AV_CODEC_ID_AAC ,
2012-07-22 02:44:26 +03:00
. video_codec = CONFIG_LIBX264_ENCODER ?
2012-08-05 12:11:04 +03:00
AV_CODEC_ID_H264 : AV_CODEC_ID_MPEG4 ,
2011-07-16 23:18:12 +03:00
. write_header = mov_write_header ,
2012-04-11 13:37:14 +03:00
. write_packet = mov_write_packet ,
2011-07-16 23:18:12 +03:00
. write_trailer = mov_write_trailer ,
2013-04-26 14:21:27 +03:00
. flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE ,
2012-04-06 17:50:48 +03:00
. codec_tag = ( const AVCodecTag * const [ ] ) {
ff_codec_movvideo_tags , ff_codec_movaudio_tags , 0
} ,
. priv_class = & mov_muxer_class ,
2003-08-26 23:23:13 +03:00
} ;
2006-07-11 00:14:37 +03:00
# endif
2009-01-14 01:44:16 +02:00
# if CONFIG_TGP_MUXER
2011-10-03 20:14:03 +03:00
MOV_CLASS ( tgp )
2011-01-26 00:03:28 +02:00
AVOutputFormat ff_tgp_muxer = {
2011-07-16 23:18:12 +03:00
. name = " 3gp " ,
2012-07-24 04:23:48 +03:00
. long_name = NULL_IF_CONFIG_SMALL ( " 3GP (3GPP file format) " ) ,
2011-07-16 23:18:12 +03:00
. extensions = " 3gp " ,
. priv_data_size = sizeof ( MOVMuxContext ) ,
2012-08-05 12:11:04 +03:00
. audio_codec = AV_CODEC_ID_AMR_NB ,
. video_codec = AV_CODEC_ID_H263 ,
2011-07-16 23:18:12 +03:00
. write_header = mov_write_header ,
2012-04-11 13:37:14 +03:00
. write_packet = mov_write_packet ,
2011-07-16 23:18:12 +03:00
. write_trailer = mov_write_trailer ,
2013-04-26 14:21:27 +03:00
. flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE ,
2012-04-06 17:50:48 +03:00
. codec_tag = ( const AVCodecTag * const [ ] ) { codec_3gp_tags , 0 } ,
. priv_class = & tgp_muxer_class ,
2003-08-26 23:23:13 +03:00
} ;
2006-07-11 00:14:37 +03:00
# endif
2009-01-14 01:44:16 +02:00
# if CONFIG_MP4_MUXER
2011-10-03 20:14:03 +03:00
MOV_CLASS ( mp4 )
2011-01-26 00:03:28 +02:00
AVOutputFormat ff_mp4_muxer = {
2011-07-16 23:18:12 +03:00
. name = " mp4 " ,
2012-07-24 04:23:48 +03:00
. long_name = NULL_IF_CONFIG_SMALL ( " MP4 (MPEG-4 Part 14) " ) ,
2015-07-08 20:24:46 +02:00
. mime_type = " video/mp4 " ,
2011-07-16 23:18:12 +03:00
. extensions = " mp4 " ,
. priv_data_size = sizeof ( MOVMuxContext ) ,
2012-08-05 12:11:04 +03:00
. audio_codec = AV_CODEC_ID_AAC ,
2012-07-22 02:44:26 +03:00
. video_codec = CONFIG_LIBX264_ENCODER ?
2012-08-05 12:11:04 +03:00
AV_CODEC_ID_H264 : AV_CODEC_ID_MPEG4 ,
2011-07-16 23:18:12 +03:00
. write_header = mov_write_header ,
2012-04-11 13:37:14 +03:00
. write_packet = mov_write_packet ,
2011-07-16 23:18:12 +03:00
. write_trailer = mov_write_trailer ,
2013-04-26 14:21:27 +03:00
. flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE ,
2012-04-06 17:50:48 +03:00
. codec_tag = ( const AVCodecTag * const [ ] ) { ff_mp4_obj_type , 0 } ,
. priv_class = & mp4_muxer_class ,
2003-08-26 23:23:13 +03:00
} ;
2006-07-11 00:14:37 +03:00
# endif
2009-01-14 01:44:16 +02:00
# if CONFIG_PSP_MUXER
2011-10-03 20:14:03 +03:00
MOV_CLASS ( psp )
2011-01-26 00:03:28 +02:00
AVOutputFormat ff_psp_muxer = {
2011-07-16 23:18:12 +03:00
. name = " psp " ,
2012-07-24 04:23:48 +03:00
. long_name = NULL_IF_CONFIG_SMALL ( " PSP MP4 (MPEG-4 Part 14) " ) ,
2011-07-16 23:18:12 +03:00
. extensions = " mp4,psp " ,
. priv_data_size = sizeof ( MOVMuxContext ) ,
2012-08-05 12:11:04 +03:00
. audio_codec = AV_CODEC_ID_AAC ,
2012-07-22 02:44:26 +03:00
. video_codec = CONFIG_LIBX264_ENCODER ?
2012-08-05 12:11:04 +03:00
AV_CODEC_ID_H264 : AV_CODEC_ID_MPEG4 ,
2011-07-16 23:18:12 +03:00
. write_header = mov_write_header ,
2012-04-11 13:37:14 +03:00
. write_packet = mov_write_packet ,
2011-07-16 23:18:12 +03:00
. write_trailer = mov_write_trailer ,
2013-08-17 19:05:53 +03:00
. flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE ,
2012-04-06 17:50:48 +03:00
. codec_tag = ( const AVCodecTag * const [ ] ) { ff_mp4_obj_type , 0 } ,
. priv_class = & psp_muxer_class ,
2005-01-27 16:48:15 +02:00
} ;
2006-07-11 00:14:37 +03:00
# endif
2009-01-14 01:44:16 +02:00
# if CONFIG_TG2_MUXER
2011-10-03 20:14:03 +03:00
MOV_CLASS ( tg2 )
2011-01-26 00:03:28 +02:00
AVOutputFormat ff_tg2_muxer = {
2011-07-16 23:18:12 +03:00
. name = " 3g2 " ,
2012-07-24 04:23:48 +03:00
. long_name = NULL_IF_CONFIG_SMALL ( " 3GP2 (3GPP2 file format) " ) ,
2011-07-16 23:18:12 +03:00
. extensions = " 3g2 " ,
. priv_data_size = sizeof ( MOVMuxContext ) ,
2012-08-05 12:11:04 +03:00
. audio_codec = AV_CODEC_ID_AMR_NB ,
. video_codec = AV_CODEC_ID_H263 ,
2011-07-16 23:18:12 +03:00
. write_header = mov_write_header ,
2012-04-11 13:37:14 +03:00
. write_packet = mov_write_packet ,
2011-07-16 23:18:12 +03:00
. write_trailer = mov_write_trailer ,
2013-04-26 14:21:27 +03:00
. flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE ,
2012-04-06 17:50:48 +03:00
. codec_tag = ( const AVCodecTag * const [ ] ) { codec_3gp_tags , 0 } ,
. priv_class = & tg2_muxer_class ,
2005-02-17 01:14:38 +02:00
} ;
2006-07-11 00:14:37 +03:00
# endif
2009-01-14 01:44:16 +02:00
# if CONFIG_IPOD_MUXER
2011-10-03 20:14:03 +03:00
MOV_CLASS ( ipod )
2011-01-26 00:03:28 +02:00
AVOutputFormat ff_ipod_muxer = {
2011-07-16 23:18:12 +03:00
. name = " ipod " ,
2012-07-24 04:23:48 +03:00
. long_name = NULL_IF_CONFIG_SMALL ( " iPod H.264 MP4 (MPEG-4 Part 14) " ) ,
2015-07-08 20:24:46 +02:00
. mime_type = " video/mp4 " ,
2011-07-16 23:18:12 +03:00
. extensions = " m4v,m4a " ,
. priv_data_size = sizeof ( MOVMuxContext ) ,
2012-08-05 12:11:04 +03:00
. audio_codec = AV_CODEC_ID_AAC ,
. video_codec = AV_CODEC_ID_H264 ,
2011-07-16 23:18:12 +03:00
. write_header = mov_write_header ,
2012-04-11 13:37:14 +03:00
. write_packet = mov_write_packet ,
2011-07-16 23:18:12 +03:00
. write_trailer = mov_write_trailer ,
2013-07-05 00:09:32 +03:00
. flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE ,
2012-04-06 17:50:48 +03:00
. codec_tag = ( const AVCodecTag * const [ ] ) { codec_ipod_tags , 0 } ,
. priv_class = & ipod_muxer_class ,
2008-03-16 15:36:36 +02:00
} ;
# endif
2012-01-09 18:58:26 +03:00
# if CONFIG_ISMV_MUXER
MOV_CLASS ( ismv )
AVOutputFormat ff_ismv_muxer = {
. name = " ismv " ,
2012-07-25 00:51:41 +03:00
. long_name = NULL_IF_CONFIG_SMALL ( " ISMV/ISMA (Smooth Streaming) " ) ,
2015-07-08 20:24:46 +02:00
. mime_type = " video/mp4 " ,
2012-01-09 18:58:26 +03:00
. extensions = " ismv,isma " ,
. priv_data_size = sizeof ( MOVMuxContext ) ,
2012-08-05 12:11:04 +03:00
. audio_codec = AV_CODEC_ID_AAC ,
. video_codec = AV_CODEC_ID_H264 ,
2012-01-09 18:58:26 +03:00
. write_header = mov_write_header ,
2012-04-11 13:37:14 +03:00
. write_packet = mov_write_packet ,
2012-01-09 18:58:26 +03:00
. write_trailer = mov_write_trailer ,
2013-08-17 19:05:53 +03:00
. flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE ,
2012-04-06 17:50:48 +03:00
. codec_tag = ( const AVCodecTag * const [ ] ) { ff_mp4_obj_type , 0 } ,
2012-01-09 18:58:26 +03:00
. priv_class = & ismv_muxer_class ,
} ;
# endif
2012-09-17 17:16:16 +03:00
# if CONFIG_F4V_MUXER
MOV_CLASS ( f4v )
AVOutputFormat ff_f4v_muxer = {
. name = " f4v " ,
. long_name = NULL_IF_CONFIG_SMALL ( " F4V Adobe Flash Video " ) ,
. mime_type = " application/f4v " ,
. extensions = " f4v " ,
. priv_data_size = sizeof ( MOVMuxContext ) ,
. audio_codec = AV_CODEC_ID_AAC ,
. video_codec = AV_CODEC_ID_H264 ,
. write_header = mov_write_header ,
. write_packet = mov_write_packet ,
. write_trailer = mov_write_trailer ,
. flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH ,
. codec_tag = ( const AVCodecTag * const [ ] ) { codec_f4v_tags , 0 } ,
. priv_class = & f4v_muxer_class ,
} ;
# endif