mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-02-04 06:08:26 +02:00
VC1testenc: convert pts values to correct time-base.
VC1 test container always uses time-base 1 ms, so we must convert from whatever time-base the application gave us to that, otherwise the video will play at ridiculous speeds. It would be possible to signal that a container supports only one time-base and have code in a layer above do the conversion, but for a single format this seems over-engineered.
This commit is contained in:
parent
55fa97e215
commit
76c802e989
@ -55,11 +55,14 @@ static int vc1test_write_packet(AVFormatContext *s, AVPacket *pkt)
|
|||||||
{
|
{
|
||||||
RCVContext *ctx = s->priv_data;
|
RCVContext *ctx = s->priv_data;
|
||||||
ByteIOContext *pb = s->pb;
|
ByteIOContext *pb = s->pb;
|
||||||
|
uint32_t pts = av_rescale(pkt->pts,
|
||||||
|
1000 * (uint64_t)s->streams[0]->time_base.num,
|
||||||
|
s->streams[0]->time_base.den);
|
||||||
|
|
||||||
if (!pkt->size)
|
if (!pkt->size)
|
||||||
return 0;
|
return 0;
|
||||||
put_le32(pb, pkt->size | ((pkt->flags & AV_PKT_FLAG_KEY) ? 0x80000000 : 0));
|
put_le32(pb, pkt->size | ((pkt->flags & AV_PKT_FLAG_KEY) ? 0x80000000 : 0));
|
||||||
put_le32(pb, pkt->pts);
|
put_le32(pb, pts);
|
||||||
put_buffer(pb, pkt->data, pkt->size);
|
put_buffer(pb, pkt->data, pkt->size);
|
||||||
put_flush_packet(pb);
|
put_flush_packet(pb);
|
||||||
ctx->frames++;
|
ctx->frames++;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user