1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

changed audio and video grab interface (simpler now)

Originally committed as revision 148 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
Fabrice Bellard 2001-09-24 23:27:06 +00:00
parent 46a3d0685d
commit 4972b26f24
3 changed files with 291 additions and 205 deletions

View File

@ -30,87 +30,28 @@
const char *audio_device = "/dev/dsp"; const char *audio_device = "/dev/dsp";
typedef struct {
int fd;
int rate;
int channels;
} AudioData;
#define AUDIO_BLOCK_SIZE 4096 #define AUDIO_BLOCK_SIZE 4096
/* audio read support */ typedef struct {
int fd;
int sample_rate;
int channels;
int frame_size; /* in bytes ! */
int codec_id;
UINT8 buffer[AUDIO_BLOCK_SIZE];
int buffer_ptr;
} AudioData;
static int audio_read(URLContext *h, UINT8 *buf, int size) static int audio_open(AudioData *s, int is_output)
{ {
AudioData *s = h->priv_data; int audio_fd;
int ret;
ret = read(s->fd, buf, size);
if (ret < 0)
return -errno;
else
return ret;
}
static int audio_write(URLContext *h, UINT8 *buf, int size)
{
AudioData *s = h->priv_data;
int ret;
ret = write(s->fd, buf, size);
if (ret < 0)
return -errno;
else
return ret;
}
static int audio_get_format(URLContext *h, URLFormat *f)
{
AudioData *s = h->priv_data;
strcpy(f->format_name, "pcm");
f->sample_rate = s->rate;
f->channels = s->channels;
return 0;
}
/* URI syntax: 'audio:[rate[,channels]]'
default: rate=44100, channels=2
*/
static int audio_open(URLContext *h, const char *uri, int flags)
{
AudioData *s;
const char *p;
int freq, channels, audio_fd;
int tmp, err; int tmp, err;
h->is_streamed = 1;
h->packet_size = AUDIO_BLOCK_SIZE;
s = malloc(sizeof(AudioData));
if (!s)
return -ENOMEM;
h->priv_data = s;
/* extract parameters */
p = uri;
strstart(p, "audio:", &p);
freq = strtol(p, (char **)&p, 0);
if (freq <= 0)
freq = 44100;
if (*p == ',')
p++;
channels = strtol(p, (char **)&p, 0);
if (channels <= 0)
channels = 2;
s->rate = freq;
s->channels = channels;
/* open linux audio device */ /* open linux audio device */
if (flags & URL_WRONLY) if (is_output)
audio_fd = open(audio_device,O_WRONLY); audio_fd = open(audio_device, O_WRONLY);
else else
audio_fd = open(audio_device,O_RDONLY); audio_fd = open(audio_device, O_RDONLY);
if (audio_fd < 0) { if (audio_fd < 0) {
perror(audio_device); perror(audio_device);
return -EIO; return -EIO;
@ -119,60 +60,233 @@ static int audio_open(URLContext *h, const char *uri, int flags)
/* non blocking mode */ /* non blocking mode */
fcntl(audio_fd, F_SETFL, O_NONBLOCK); fcntl(audio_fd, F_SETFL, O_NONBLOCK);
s->frame_size = AUDIO_BLOCK_SIZE;
#if 0 #if 0
tmp=(NB_FRAGMENTS << 16) | FRAGMENT_BITS; tmp = (NB_FRAGMENTS << 16) | FRAGMENT_BITS;
err=ioctl(audio_fd, SNDCTL_DSP_SETFRAGMENT, &tmp); err = ioctl(audio_fd, SNDCTL_DSP_SETFRAGMENT, &tmp);
if (err < 0) { if (err < 0) {
perror("SNDCTL_DSP_SETFRAGMENT"); perror("SNDCTL_DSP_SETFRAGMENT");
} }
#endif #endif
tmp=AFMT_S16_LE; /* select format : favour native format */
err=ioctl(audio_fd,SNDCTL_DSP_SETFMT,&tmp); err = ioctl(audio_fd, SNDCTL_DSP_GETFMTS, &tmp);
#ifdef WORDS_BIGENDIAN
if (tmp & AFMT_S16_BE) {
tmp = AFMT_S16_BE;
} else if (tmp & AFMT_S16_LE) {
tmp = AFMT_S16_LE;
} else {
tmp = 0;
}
#else
if (tmp & AFMT_S16_LE) {
tmp = AFMT_S16_LE;
} else if (tmp & AFMT_S16_BE) {
tmp = AFMT_S16_BE;
} else {
tmp = 0;
}
#endif
switch(tmp) {
case AFMT_S16_LE:
s->codec_id = CODEC_ID_PCM_S16LE;
break;
case AFMT_S16_BE:
s->codec_id = CODEC_ID_PCM_S16BE;
break;
default:
fprintf(stderr, "Soundcard does not support 16 bit sample format\n");
close(audio_fd);
return -EIO;
}
err=ioctl(audio_fd, SNDCTL_DSP_SETFMT, &tmp);
if (err < 0) { if (err < 0) {
perror("SNDCTL_DSP_SETFMT"); perror("SNDCTL_DSP_SETFMT");
goto fail; goto fail;
} }
tmp= (channels == 2); tmp = (s->channels == 2);
err=ioctl(audio_fd,SNDCTL_DSP_STEREO,&tmp); err = ioctl(audio_fd, SNDCTL_DSP_STEREO, &tmp);
if (err < 0) { if (err < 0) {
perror("SNDCTL_DSP_STEREO"); perror("SNDCTL_DSP_STEREO");
goto fail; goto fail;
} }
tmp = freq; tmp = s->sample_rate;
err=ioctl(audio_fd, SNDCTL_DSP_SPEED, &tmp); err = ioctl(audio_fd, SNDCTL_DSP_SPEED, &tmp);
if (err < 0) { if (err < 0) {
perror("SNDCTL_DSP_SPEED"); perror("SNDCTL_DSP_SPEED");
goto fail; goto fail;
} }
s->sample_rate = tmp; /* store real sample rate */
s->rate = tmp;
s->fd = audio_fd; s->fd = audio_fd;
return 0; return 0;
fail: fail:
close(audio_fd); close(audio_fd);
free(s);
return -EIO; return -EIO;
} }
static int audio_close(URLContext *h) static int audio_close(AudioData *s)
{ {
AudioData *s = h->priv_data;
close(s->fd); close(s->fd);
return 0;
}
/* sound output support */
static int audio_write_header(AVFormatContext *s1)
{
AudioData *s;
AVStream *st;
int ret;
s = av_mallocz(sizeof(AudioData));
if (!s)
return -ENOMEM;
s1->priv_data = s;
st = s1->streams[0];
s->sample_rate = st->codec.sample_rate;
s->channels = st->codec.channels;
ret = audio_open(s, 1);
if (ret < 0) {
free(s);
return -EIO;
} else {
return 0;
}
}
static int audio_write_packet(AVFormatContext *s1, int stream_index,
UINT8 *buf, int size)
{
AudioData *s = s1->priv_data;
int len, ret;
while (size > 0) {
len = AUDIO_BLOCK_SIZE - s->buffer_ptr;
if (len > size)
len = size;
memcpy(s->buffer + s->buffer_ptr, buf, len);
s->buffer_ptr += len;
if (s->buffer_ptr >= AUDIO_BLOCK_SIZE) {
for(;;) {
ret = write(s->fd, s->buffer, AUDIO_BLOCK_SIZE);
if (ret != 0)
break;
if (ret < 0 && (errno != EAGAIN && errno != EINTR))
return -EIO;
}
s->buffer_ptr = 0;
}
buf += len;
size -= len;
}
return 0;
}
static int audio_write_trailer(AVFormatContext *s1)
{
AudioData *s = s1->priv_data;
audio_close(s);
free(s); free(s);
return 0; return 0;
} }
URLProtocol audio_protocol = { /* grab support */
"audio",
audio_open, static int audio_read_header(AVFormatContext *s1, AVFormatParameters *ap)
audio_read, {
audio_write, AudioData *s;
NULL, /* seek */ AVStream *st;
audio_close, int ret;
audio_get_format,
if (!ap || ap->sample_rate <= 0 || ap->channels <= 0)
return -1;
s = av_mallocz(sizeof(AudioData));
if (!s)
return -ENOMEM;
st = av_mallocz(sizeof(AVStream));
if (!st) {
free(s);
return -ENOMEM;
}
s1->priv_data = s;
s1->nb_streams = 1;
s1->streams[0] = st;
s->sample_rate = ap->sample_rate;
s->channels = ap->channels;
ret = audio_open(s, 0);
if (ret < 0) {
free(st);
free(s);
return -EIO;
} else {
/* take real parameters */
st->codec.codec_type = CODEC_TYPE_AUDIO;
st->codec.codec_id = s->codec_id;
st->codec.sample_rate = s->sample_rate;
st->codec.channels = s->channels;
return 0;
}
}
static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
AudioData *s = s1->priv_data;
int ret;
if (av_new_packet(pkt, s->frame_size) < 0)
return -EIO;
for(;;) {
ret = read(s->fd, pkt->data, pkt->size);
if (ret > 0)
break;
if (!(ret == 0 || (ret == -1 && (errno == EAGAIN || errno == EINTR)))) {
av_free_packet(pkt);
return -EIO;
}
}
pkt->size = ret;
return 0;
}
static int audio_read_close(AVFormatContext *s1)
{
AudioData *s = s1->priv_data;
audio_close(s);
free(s);
return 0;
}
AVFormat audio_device_format = {
"audio_device",
"audio grab and output",
"",
"",
/* XXX: we make the assumption that the soundcard accepts this format */
/* XXX: find better solution with "preinit" method, needed also in
other formats */
#ifdef WORDS_BIGENDIAN
CODEC_ID_PCM_S16BE,
#else
CODEC_ID_PCM_S16LE,
#endif
CODEC_ID_NONE,
audio_write_header,
audio_write_packet,
audio_write_trailer,
audio_read_header,
audio_read_packet,
audio_read_close,
NULL,
AVFMT_NOFILE,
}; };

View File

@ -141,11 +141,3 @@ extern URLProtocol udp_protocol;
/* http.c */ /* http.c */
extern URLProtocol http_protocol; extern URLProtocol http_protocol;
/* audio.c */
extern const char *audio_device;
extern URLProtocol audio_protocol;
/* grab.c */
extern const char *v4l_device;
extern URLProtocol video_protocol;

View File

@ -30,8 +30,9 @@ typedef struct {
int frame_format; /* see VIDEO_PALETTE_xxx */ int frame_format; /* see VIDEO_PALETTE_xxx */
int use_mmap; int use_mmap;
int width, height; int width, height;
float rate; int frame_rate;
INT64 time_frame; INT64 time_frame;
int frame_size;
} VideoData; } VideoData;
const char *v4l_device = "/dev/video"; const char *v4l_device = "/dev/video";
@ -45,20 +46,41 @@ static struct video_mmap gb_buf;
static struct video_audio audio, audio_saved; static struct video_audio audio, audio_saved;
static int gb_frame = 0; static int gb_frame = 0;
static int v4l_init(URLContext *h) static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
{ {
VideoData *s = h->priv_data; VideoData *s;
AVStream *st;
int width, height; int width, height;
int ret;
int video_fd, frame_size; int video_fd, frame_size;
int ret, frame_rate;
width = s->width; if (!ap || ap->width <= 0 || ap->height <= 0 || ap->frame_rate <= 0)
height = s->height; return -1;
width = ap->width;
height = ap->height;
frame_rate = ap->frame_rate;
s = av_mallocz(sizeof(VideoData));
if (!s)
return -ENOMEM;
st = av_mallocz(sizeof(AVStream));
if (!st) {
free(s);
return -ENOMEM;
}
s1->priv_data = s;
s1->nb_streams = 1;
s1->streams[0] = st;
s->width = width;
s->height = height;
s->frame_rate = frame_rate;
video_fd = open(v4l_device, O_RDWR); video_fd = open(v4l_device, O_RDWR);
if (video_fd < 0) { if (video_fd < 0) {
perror(v4l_device); perror(v4l_device);
return -EIO; goto fail;
} }
if (ioctl(video_fd,VIDIOCGCAP,&video_cap) < 0) { if (ioctl(video_fd,VIDIOCGCAP,&video_cap) < 0) {
@ -166,27 +188,38 @@ static int v4l_init(URLContext *h)
switch(s->frame_format) { switch(s->frame_format) {
case VIDEO_PALETTE_YUV420P: case VIDEO_PALETTE_YUV420P:
frame_size = (width * height * 3) / 2; frame_size = (width * height * 3) / 2;
st->codec.pix_fmt = PIX_FMT_YUV420P;
break; break;
case VIDEO_PALETTE_YUV422: case VIDEO_PALETTE_YUV422:
frame_size = width * height * 2; frame_size = width * height * 2;
st->codec.pix_fmt = PIX_FMT_YUV422;
break; break;
case VIDEO_PALETTE_RGB24: case VIDEO_PALETTE_RGB24:
frame_size = width * height * 3; frame_size = width * height * 3;
st->codec.pix_fmt = PIX_FMT_BGR24; /* NOTE: v4l uses BGR24, not RGB24 ! */
break; break;
default: default:
goto fail; goto fail;
} }
s->fd = video_fd; s->fd = video_fd;
h->packet_size = frame_size; s->frame_size = frame_size;
st->codec.codec_id = CODEC_ID_RAWVIDEO;
st->codec.width = width;
st->codec.height = height;
st->codec.frame_rate = frame_rate;
return 0; return 0;
fail: fail:
if (video_fd >= 0)
close(video_fd); close(video_fd);
free(st);
free(s);
return -EIO; return -EIO;
} }
static int v4l_mm_read_picture(URLContext *h, UINT8 *buf) static int v4l_mm_read_picture(VideoData *s, UINT8 *buf)
{ {
VideoData *s = h->priv_data;
UINT8 *ptr; UINT8 *ptr;
gb_buf.frame = gb_frame; gb_buf.frame = gb_frame;
@ -203,105 +236,44 @@ static int v4l_mm_read_picture(URLContext *h, UINT8 *buf)
(errno == EAGAIN || errno == EINTR)); (errno == EAGAIN || errno == EINTR));
ptr = video_buf + gb_buffers.offsets[gb_frame]; ptr = video_buf + gb_buffers.offsets[gb_frame];
memcpy(buf, ptr, h->packet_size); memcpy(buf, ptr, s->frame_size);
return h->packet_size; return s->frame_size;
} }
/* note: we support only one picture read at a time */ static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
static int video_read(URLContext *h, UINT8 *buf, int size)
{ {
VideoData *s = h->priv_data; VideoData *s = s1->priv_data;
INT64 curtime; INT64 curtime, delay;
struct timespec ts;
if (size != h->packet_size)
return -EINVAL;
/* wait based on the frame rate */ /* wait based on the frame rate */
s->time_frame += (int)(1000000 / s->rate); s->time_frame += (INT64_C(1000000) * FRAME_RATE_BASE) / s->frame_rate;
do { for(;;) {
curtime = gettime(); curtime = gettime();
} while (curtime < s->time_frame); delay = s->time_frame - curtime;
if (delay <= 0)
break;
ts.tv_sec = delay / 1000000;
ts.tv_nsec = (delay % 1000000) * 1000;
nanosleep(&ts, NULL);
}
if (av_new_packet(pkt, s->frame_size) < 0)
return -EIO;
/* read one frame */ /* read one frame */
if (s->use_mmap) { if (s->use_mmap) {
return v4l_mm_read_picture(h, buf); return v4l_mm_read_picture(s, pkt->data);
} else { } else {
if (read(s->fd, buf, size) != size) if (read(s->fd, pkt->data, pkt->size) != pkt->size)
return -EIO; return -EIO;
return h->packet_size; return s->frame_size;
} }
} }
static int video_get_format(URLContext *h, URLFormat *f) static int grab_read_close(AVFormatContext *s1)
{ {
VideoData *s = h->priv_data; VideoData *s = s1->priv_data;
f->width = s->width;
f->height = s->height;
f->frame_rate = (int)(s->rate * FRAME_RATE_BASE);
strcpy(f->format_name, "rawvideo");
switch(s->frame_format) {
case VIDEO_PALETTE_YUV420P:
f->pix_fmt = PIX_FMT_YUV420P;
break;
case VIDEO_PALETTE_YUV422:
f->pix_fmt = PIX_FMT_YUV422;
break;
case VIDEO_PALETTE_RGB24:
f->pix_fmt = PIX_FMT_BGR24; /* NOTE: v4l uses BGR24, not RGB24 ! */
break;
default:
abort();
}
return 0;
}
/* URI syntax: 'video:width,height,rate'
*/
static int video_open(URLContext *h, const char *uri, int flags)
{
VideoData *s;
const char *p;
int width, height;
int ret;
float rate;
/* extract parameters */
p = uri;
strstart(p, "video:", &p);
width = strtol(p, (char **)&p, 0);
if (width <= 0)
return -EINVAL;
if (*p == ',')
p++;
height = strtol(p, (char **)&p, 0);
if (height <= 0)
return -EINVAL;
if (*p == ',')
p++;
rate = strtod(p, (char **)&p);
if (rate <= 0)
return -EINVAL;
s = malloc(sizeof(VideoData));
if (!s)
return -ENOMEM;
h->priv_data = s;
h->is_streamed = 1;
s->width = width;
s->height = height;
s->rate = rate;
ret = v4l_init(h);
if (ret)
free(s);
return ret;
}
static int video_close(URLContext *h)
{
VideoData *s = h->priv_data;
/* restore audio settings */ /* restore audio settings */
ioctl(s->fd, VIDIOCSAUDIO, &audio_saved); ioctl(s->fd, VIDIOCSAUDIO, &audio_saved);
@ -310,12 +282,20 @@ static int video_close(URLContext *h)
return 0; return 0;
} }
URLProtocol video_protocol = { AVFormat video_grab_device_format = {
"video", "video_grab_device",
video_open, "video grab",
video_read, "",
"",
CODEC_ID_NONE,
CODEC_ID_NONE,
NULL, NULL,
NULL, /* seek */ NULL,
video_close, NULL,
video_get_format,
grab_read_header,
grab_read_packet,
grab_read_close,
NULL,
AVFMT_NOFILE,
}; };