You've already forked FFmpeg
							
							
				mirror of
				https://github.com/FFmpeg/FFmpeg.git
				synced 2025-10-30 23:18:11 +02:00 
			
		
		
		
	Merge commit '4753f802c00853859b7b4b8fdb79c35e082cb7f8'
* commit '4753f802c00853859b7b4b8fdb79c35e082cb7f8': vf_libopencv: use the name 's' for the pointer to the private context vf_hqdn3d: use the name 's' for the pointer to the private context vf_hflip: use the name 's' for the pointer to the private context vf_gradfun: use the name 's' for the pointer to the private context Conflicts: libavfilter/vf_gradfun.c libavfilter/vf_hflip.c libavfilter/vf_hqdn3d.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
		| @@ -123,26 +123,26 @@ static void filter(GradFunContext *ctx, uint8_t *dst, const uint8_t *src, int wi | ||||
|  | ||||
| static av_cold int init(AVFilterContext *ctx) | ||||
| { | ||||
|     GradFunContext *gf = ctx->priv; | ||||
|     GradFunContext *s = ctx->priv; | ||||
|  | ||||
|     gf->thresh  = (1 << 15) / gf->strength; | ||||
|     gf->radius  = av_clip((gf->radius + 1) & ~1, 4, 32); | ||||
|     s->thresh  = (1 << 15) / s->strength; | ||||
|     s->radius  = av_clip((s->radius + 1) & ~1, 4, 32); | ||||
|  | ||||
|     gf->blur_line   = ff_gradfun_blur_line_c; | ||||
|     gf->filter_line = ff_gradfun_filter_line_c; | ||||
|     s->blur_line   = ff_gradfun_blur_line_c; | ||||
|     s->filter_line = ff_gradfun_filter_line_c; | ||||
|  | ||||
|     if (ARCH_X86) | ||||
|         ff_gradfun_init_x86(gf); | ||||
|         ff_gradfun_init_x86(s); | ||||
|  | ||||
|     av_log(ctx, AV_LOG_VERBOSE, "threshold:%.2f radius:%d\n", gf->strength, gf->radius); | ||||
|     av_log(ctx, AV_LOG_VERBOSE, "threshold:%.2f radius:%d\n", s->strength, s->radius); | ||||
|  | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| static av_cold void uninit(AVFilterContext *ctx) | ||||
| { | ||||
|     GradFunContext *gf = ctx->priv; | ||||
|     av_freep(&gf->buf); | ||||
|     GradFunContext *s = ctx->priv; | ||||
|     av_freep(&s->buf); | ||||
| } | ||||
|  | ||||
| static int query_formats(AVFilterContext *ctx) | ||||
| @@ -163,25 +163,25 @@ static int query_formats(AVFilterContext *ctx) | ||||
|  | ||||
| static int config_input(AVFilterLink *inlink) | ||||
| { | ||||
|     GradFunContext *gf = inlink->dst->priv; | ||||
|     GradFunContext *s = inlink->dst->priv; | ||||
|     const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | ||||
|     int hsub = desc->log2_chroma_w; | ||||
|     int vsub = desc->log2_chroma_h; | ||||
|  | ||||
|     gf->buf = av_mallocz((FFALIGN(inlink->w, 16) * (gf->radius + 1) / 2 + 32) * sizeof(uint16_t)); | ||||
|     if (!gf->buf) | ||||
|     s->buf = av_mallocz((FFALIGN(inlink->w, 16) * (s->radius + 1) / 2 + 32) * sizeof(uint16_t)); | ||||
|     if (!s->buf) | ||||
|         return AVERROR(ENOMEM); | ||||
|  | ||||
|     gf->chroma_w = FF_CEIL_RSHIFT(inlink->w, hsub); | ||||
|     gf->chroma_h = FF_CEIL_RSHIFT(inlink->h, vsub); | ||||
|     gf->chroma_r = av_clip(((((gf->radius >> hsub) + (gf->radius >> vsub)) / 2 ) + 1) & ~1, 4, 32); | ||||
|     s->chroma_w = FF_CEIL_RSHIFT(inlink->w, hsub); | ||||
|     s->chroma_h = FF_CEIL_RSHIFT(inlink->h, vsub); | ||||
|     s->chroma_r = av_clip(((((s->radius >> hsub) + (s->radius >> vsub)) / 2 ) + 1) & ~1, 4, 32); | ||||
|  | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| static int filter_frame(AVFilterLink *inlink, AVFrame *in) | ||||
| { | ||||
|     GradFunContext *gf = inlink->dst->priv; | ||||
|     GradFunContext *s = inlink->dst->priv; | ||||
|     AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
|     AVFrame *out; | ||||
|     int p, direct; | ||||
| @@ -202,15 +202,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) | ||||
|     for (p = 0; p < 4 && in->data[p]; p++) { | ||||
|         int w = inlink->w; | ||||
|         int h = inlink->h; | ||||
|         int r = gf->radius; | ||||
|         int r = s->radius; | ||||
|         if (p) { | ||||
|             w = gf->chroma_w; | ||||
|             h = gf->chroma_h; | ||||
|             r = gf->chroma_r; | ||||
|             w = s->chroma_w; | ||||
|             h = s->chroma_h; | ||||
|             r = s->chroma_r; | ||||
|         } | ||||
|  | ||||
|         if (FFMIN(w, h) > 2 * r) | ||||
|             filter(gf, out->data[p], in->data[p], w, h, out->linesize[p], in->linesize[p], r); | ||||
|             filter(s, out->data[p], in->data[p], w, h, out->linesize[p], in->linesize[p], r); | ||||
|         else if (out->data[p] != in->data[p]) | ||||
|             av_image_copy_plane(out->data[p], out->linesize[p], in->data[p], in->linesize[p], w, h); | ||||
|     } | ||||
|   | ||||
| @@ -60,12 +60,12 @@ static int query_formats(AVFilterContext *ctx) | ||||
|  | ||||
| static int config_props(AVFilterLink *inlink) | ||||
| { | ||||
|     FlipContext *flip = inlink->dst->priv; | ||||
|     FlipContext *s = inlink->dst->priv; | ||||
|     const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); | ||||
|  | ||||
|     av_image_fill_max_pixsteps(flip->max_step, NULL, pix_desc); | ||||
|     flip->hsub = pix_desc->log2_chroma_w; | ||||
|     flip->vsub = pix_desc->log2_chroma_h; | ||||
|     av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc); | ||||
|     s->hsub = pix_desc->log2_chroma_w; | ||||
|     s->vsub = pix_desc->log2_chroma_h; | ||||
|  | ||||
|     return 0; | ||||
| } | ||||
| @@ -73,7 +73,7 @@ static int config_props(AVFilterLink *inlink) | ||||
| static int filter_frame(AVFilterLink *inlink, AVFrame *in) | ||||
| { | ||||
|     AVFilterContext *ctx  = inlink->dst; | ||||
|     FlipContext *flip     = ctx->priv; | ||||
|     FlipContext *s     = ctx->priv; | ||||
|     AVFilterLink *outlink = ctx->outputs[0]; | ||||
|     AVFrame *out; | ||||
|     uint8_t *inrow, *outrow; | ||||
| @@ -91,9 +91,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) | ||||
|         memcpy(out->data[1], in->data[1], AVPALETTE_SIZE); | ||||
|  | ||||
|     for (plane = 0; plane < 4 && in->data[plane]; plane++) { | ||||
|         const int width  = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->w, flip->hsub) : inlink->w; | ||||
|         const int height = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, flip->vsub) : inlink->h; | ||||
|         step = flip->max_step[plane]; | ||||
|         const int width  = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->w, s->hsub) : inlink->w; | ||||
|         const int height = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, s->vsub) : inlink->h; | ||||
|         step = s->max_step[plane]; | ||||
|  | ||||
|         outrow = out->data[plane]; | ||||
|         inrow  = in ->data[plane] + (width - 1) * step; | ||||
|   | ||||
| @@ -77,7 +77,7 @@ static void denoise_temporal(uint8_t *src, uint8_t *dst, | ||||
| } | ||||
|  | ||||
| av_always_inline | ||||
| static void denoise_spatial(HQDN3DContext *hqdn3d, | ||||
| static void denoise_spatial(HQDN3DContext *s, | ||||
|                             uint8_t *src, uint8_t *dst, | ||||
|                             uint16_t *line_ant, uint16_t *frame_ant, | ||||
|                             int w, int h, int sstride, int dstride, | ||||
| @@ -103,8 +103,8 @@ static void denoise_spatial(HQDN3DContext *hqdn3d, | ||||
|         src += sstride; | ||||
|         dst += dstride; | ||||
|         frame_ant += w; | ||||
|         if (hqdn3d->denoise_row[depth]) { | ||||
|             hqdn3d->denoise_row[depth](src, dst, line_ant, frame_ant, w, spatial, temporal); | ||||
|         if (s->denoise_row[depth]) { | ||||
|             s->denoise_row[depth](src, dst, line_ant, frame_ant, w, spatial, temporal); | ||||
|             continue; | ||||
|         } | ||||
|         pixel_ant = LOAD(0); | ||||
| @@ -121,7 +121,7 @@ static void denoise_spatial(HQDN3DContext *hqdn3d, | ||||
| } | ||||
|  | ||||
| av_always_inline | ||||
| static void denoise_depth(HQDN3DContext *hqdn3d, | ||||
| static void denoise_depth(HQDN3DContext *s, | ||||
|                           uint8_t *src, uint8_t *dst, | ||||
|                           uint16_t *line_ant, uint16_t **frame_ant_ptr, | ||||
|                           int w, int h, int sstride, int dstride, | ||||
| @@ -142,7 +142,7 @@ static void denoise_depth(HQDN3DContext *hqdn3d, | ||||
|     } | ||||
|  | ||||
|     if (spatial[0]) | ||||
|         denoise_spatial(hqdn3d, src, dst, line_ant, frame_ant, | ||||
|         denoise_spatial(s, src, dst, line_ant, frame_ant, | ||||
|                         w, h, sstride, dstride, spatial, temporal, depth); | ||||
|     else | ||||
|         denoise_temporal(src, dst, frame_ant, | ||||
| @@ -150,7 +150,7 @@ static void denoise_depth(HQDN3DContext *hqdn3d, | ||||
| } | ||||
|  | ||||
| #define denoise(...) \ | ||||
|     switch (hqdn3d->depth) {\ | ||||
|     switch (s->depth) {\ | ||||
|         case  8: denoise_depth(__VA_ARGS__,  8); break;\ | ||||
|         case  9: denoise_depth(__VA_ARGS__,  9); break;\ | ||||
|         case 10: denoise_depth(__VA_ARGS__, 10); break;\ | ||||
| @@ -184,36 +184,36 @@ static int16_t *precalc_coefs(double dist25, int depth) | ||||
|  | ||||
| static av_cold int init(AVFilterContext *ctx) | ||||
| { | ||||
|     HQDN3DContext *hqdn3d = ctx->priv; | ||||
|     HQDN3DContext *s = ctx->priv; | ||||
|  | ||||
|     if (!hqdn3d->strength[LUMA_SPATIAL]) | ||||
|         hqdn3d->strength[LUMA_SPATIAL] = PARAM1_DEFAULT; | ||||
|     if (!hqdn3d->strength[CHROMA_SPATIAL]) | ||||
|         hqdn3d->strength[CHROMA_SPATIAL] = PARAM2_DEFAULT * hqdn3d->strength[LUMA_SPATIAL] / PARAM1_DEFAULT; | ||||
|     if (!hqdn3d->strength[LUMA_TMP]) | ||||
|         hqdn3d->strength[LUMA_TMP]   = PARAM3_DEFAULT * hqdn3d->strength[LUMA_SPATIAL] / PARAM1_DEFAULT; | ||||
|     if (!hqdn3d->strength[CHROMA_TMP]) | ||||
|         hqdn3d->strength[CHROMA_TMP] = hqdn3d->strength[LUMA_TMP] * hqdn3d->strength[CHROMA_SPATIAL] / hqdn3d->strength[LUMA_SPATIAL]; | ||||
|     if (!s->strength[LUMA_SPATIAL]) | ||||
|         s->strength[LUMA_SPATIAL] = PARAM1_DEFAULT; | ||||
|     if (!s->strength[CHROMA_SPATIAL]) | ||||
|         s->strength[CHROMA_SPATIAL] = PARAM2_DEFAULT * s->strength[LUMA_SPATIAL] / PARAM1_DEFAULT; | ||||
|     if (!s->strength[LUMA_TMP]) | ||||
|         s->strength[LUMA_TMP]   = PARAM3_DEFAULT * s->strength[LUMA_SPATIAL] / PARAM1_DEFAULT; | ||||
|     if (!s->strength[CHROMA_TMP]) | ||||
|         s->strength[CHROMA_TMP] = s->strength[LUMA_TMP] * s->strength[CHROMA_SPATIAL] / s->strength[LUMA_SPATIAL]; | ||||
|  | ||||
|     av_log(ctx, AV_LOG_VERBOSE, "ls:%f cs:%f lt:%f ct:%f\n", | ||||
|            hqdn3d->strength[LUMA_SPATIAL], hqdn3d->strength[CHROMA_SPATIAL], | ||||
|            hqdn3d->strength[LUMA_TMP], hqdn3d->strength[CHROMA_TMP]); | ||||
|            s->strength[LUMA_SPATIAL], s->strength[CHROMA_SPATIAL], | ||||
|            s->strength[LUMA_TMP], s->strength[CHROMA_TMP]); | ||||
|  | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| static av_cold void uninit(AVFilterContext *ctx) | ||||
| { | ||||
|     HQDN3DContext *hqdn3d = ctx->priv; | ||||
|     HQDN3DContext *s = ctx->priv; | ||||
|  | ||||
|     av_freep(&hqdn3d->coefs[0]); | ||||
|     av_freep(&hqdn3d->coefs[1]); | ||||
|     av_freep(&hqdn3d->coefs[2]); | ||||
|     av_freep(&hqdn3d->coefs[3]); | ||||
|     av_freep(&hqdn3d->line); | ||||
|     av_freep(&hqdn3d->frame_prev[0]); | ||||
|     av_freep(&hqdn3d->frame_prev[1]); | ||||
|     av_freep(&hqdn3d->frame_prev[2]); | ||||
|     av_freep(&s->coefs[0]); | ||||
|     av_freep(&s->coefs[1]); | ||||
|     av_freep(&s->coefs[2]); | ||||
|     av_freep(&s->coefs[3]); | ||||
|     av_freep(&s->line); | ||||
|     av_freep(&s->frame_prev[0]); | ||||
|     av_freep(&s->frame_prev[1]); | ||||
|     av_freep(&s->frame_prev[2]); | ||||
| } | ||||
|  | ||||
| static int query_formats(AVFilterContext *ctx) | ||||
| @@ -248,26 +248,26 @@ static int query_formats(AVFilterContext *ctx) | ||||
|  | ||||
| static int config_input(AVFilterLink *inlink) | ||||
| { | ||||
|     HQDN3DContext *hqdn3d = inlink->dst->priv; | ||||
|     HQDN3DContext *s = inlink->dst->priv; | ||||
|     const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | ||||
|     int i; | ||||
|  | ||||
|     hqdn3d->hsub  = desc->log2_chroma_w; | ||||
|     hqdn3d->vsub  = desc->log2_chroma_h; | ||||
|     hqdn3d->depth = desc->comp[0].depth_minus1+1; | ||||
|     s->hsub  = desc->log2_chroma_w; | ||||
|     s->vsub  = desc->log2_chroma_h; | ||||
|     s->depth = desc->comp[0].depth_minus1+1; | ||||
|  | ||||
|     hqdn3d->line = av_malloc(inlink->w * sizeof(*hqdn3d->line)); | ||||
|     if (!hqdn3d->line) | ||||
|     s->line = av_malloc(inlink->w * sizeof(*s->line)); | ||||
|     if (!s->line) | ||||
|         return AVERROR(ENOMEM); | ||||
|  | ||||
|     for (i = 0; i < 4; i++) { | ||||
|         hqdn3d->coefs[i] = precalc_coefs(hqdn3d->strength[i], hqdn3d->depth); | ||||
|         if (!hqdn3d->coefs[i]) | ||||
|         s->coefs[i] = precalc_coefs(s->strength[i], s->depth); | ||||
|         if (!s->coefs[i]) | ||||
|             return AVERROR(ENOMEM); | ||||
|     } | ||||
|  | ||||
|     if (ARCH_X86) | ||||
|         ff_hqdn3d_init_x86(hqdn3d); | ||||
|         ff_hqdn3d_init_x86(s); | ||||
|  | ||||
|     return 0; | ||||
| } | ||||
| @@ -275,7 +275,7 @@ static int config_input(AVFilterLink *inlink) | ||||
| static int filter_frame(AVFilterLink *inlink, AVFrame *in) | ||||
| { | ||||
|     AVFilterContext *ctx  = inlink->dst; | ||||
|     HQDN3DContext *hqdn3d = ctx->priv; | ||||
|     HQDN3DContext *s = ctx->priv; | ||||
|     AVFilterLink *outlink = ctx->outputs[0]; | ||||
|  | ||||
|     AVFrame *out; | ||||
| @@ -296,13 +296,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) | ||||
|     } | ||||
|  | ||||
|     for (c = 0; c < 3; c++) { | ||||
|         denoise(hqdn3d, in->data[c], out->data[c], | ||||
|                 hqdn3d->line, &hqdn3d->frame_prev[c], | ||||
|                 FF_CEIL_RSHIFT(in->width,  (!!c * hqdn3d->hsub)), | ||||
|                 FF_CEIL_RSHIFT(in->height, (!!c * hqdn3d->vsub)), | ||||
|         denoise(s, in->data[c], out->data[c], | ||||
|                 s->line, &s->frame_prev[c], | ||||
|                 FF_CEIL_RSHIFT(in->width,  (!!c * s->hsub)), | ||||
|                 FF_CEIL_RSHIFT(in->height, (!!c * s->vsub)), | ||||
|                 in->linesize[c], out->linesize[c], | ||||
|                 hqdn3d->coefs[c ? CHROMA_SPATIAL : LUMA_SPATIAL], | ||||
|                 hqdn3d->coefs[c ? CHROMA_TMP     : LUMA_TMP]); | ||||
|                 s->coefs[c ? CHROMA_SPATIAL : LUMA_SPATIAL], | ||||
|                 s->coefs[c ? CHROMA_TMP     : LUMA_TMP]); | ||||
|     } | ||||
|  | ||||
|     if (ctx->is_disabled) { | ||||
|   | ||||
| @@ -86,8 +86,8 @@ typedef struct { | ||||
|  | ||||
| static av_cold int smooth_init(AVFilterContext *ctx, const char *args) | ||||
| { | ||||
|     OCVContext *ocv = ctx->priv; | ||||
|     SmoothContext *smooth = ocv->priv; | ||||
|     OCVContext *s = ctx->priv; | ||||
|     SmoothContext *smooth = s->priv; | ||||
|     char type_str[128] = "gaussian"; | ||||
|  | ||||
|     smooth->param1 = 3; | ||||
| @@ -129,8 +129,8 @@ static av_cold int smooth_init(AVFilterContext *ctx, const char *args) | ||||
|  | ||||
| static void smooth_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg) | ||||
| { | ||||
|     OCVContext *ocv = ctx->priv; | ||||
|     SmoothContext *smooth = ocv->priv; | ||||
|     OCVContext *s = ctx->priv; | ||||
|     SmoothContext *smooth = s->priv; | ||||
|     cvSmooth(inimg, outimg, smooth->type, smooth->param1, smooth->param2, smooth->param3, smooth->param4); | ||||
| } | ||||
|  | ||||
| @@ -252,8 +252,8 @@ typedef struct { | ||||
|  | ||||
| static av_cold int dilate_init(AVFilterContext *ctx, const char *args) | ||||
| { | ||||
|     OCVContext *ocv = ctx->priv; | ||||
|     DilateContext *dilate = ocv->priv; | ||||
|     OCVContext *s = ctx->priv; | ||||
|     DilateContext *dilate = s->priv; | ||||
|     char default_kernel_str[] = "3x3+0x0/rect"; | ||||
|     char *kernel_str; | ||||
|     const char *buf = args; | ||||
| @@ -282,23 +282,23 @@ static av_cold int dilate_init(AVFilterContext *ctx, const char *args) | ||||
|  | ||||
| static av_cold void dilate_uninit(AVFilterContext *ctx) | ||||
| { | ||||
|     OCVContext *ocv = ctx->priv; | ||||
|     DilateContext *dilate = ocv->priv; | ||||
|     OCVContext *s = ctx->priv; | ||||
|     DilateContext *dilate = s->priv; | ||||
|  | ||||
|     cvReleaseStructuringElement(&dilate->kernel); | ||||
| } | ||||
|  | ||||
| static void dilate_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg) | ||||
| { | ||||
|     OCVContext *ocv = ctx->priv; | ||||
|     DilateContext *dilate = ocv->priv; | ||||
|     OCVContext *s = ctx->priv; | ||||
|     DilateContext *dilate = s->priv; | ||||
|     cvDilate(inimg, outimg, dilate->kernel, dilate->nb_iterations); | ||||
| } | ||||
|  | ||||
| static void erode_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg) | ||||
| { | ||||
|     OCVContext *ocv = ctx->priv; | ||||
|     DilateContext *dilate = ocv->priv; | ||||
|     OCVContext *s = ctx->priv; | ||||
|     DilateContext *dilate = s->priv; | ||||
|     cvErode(inimg, outimg, dilate->kernel, dilate->nb_iterations); | ||||
| } | ||||
|  | ||||
| @@ -318,43 +318,43 @@ static OCVFilterEntry ocv_filter_entries[] = { | ||||
|  | ||||
| static av_cold int init(AVFilterContext *ctx) | ||||
| { | ||||
|     OCVContext *ocv = ctx->priv; | ||||
|     OCVContext *s = ctx->priv; | ||||
|     int i; | ||||
|  | ||||
|     if (!ocv->name) { | ||||
|     if (!s->name) { | ||||
|         av_log(ctx, AV_LOG_ERROR, "No libopencv filter name specified\n"); | ||||
|         return AVERROR(EINVAL); | ||||
|     } | ||||
|     for (i = 0; i < FF_ARRAY_ELEMS(ocv_filter_entries); i++) { | ||||
|         OCVFilterEntry *entry = &ocv_filter_entries[i]; | ||||
|         if (!strcmp(ocv->name, entry->name)) { | ||||
|             ocv->init             = entry->init; | ||||
|             ocv->uninit           = entry->uninit; | ||||
|             ocv->end_frame_filter = entry->end_frame_filter; | ||||
|         if (!strcmp(s->name, entry->name)) { | ||||
|             s->init             = entry->init; | ||||
|             s->uninit           = entry->uninit; | ||||
|             s->end_frame_filter = entry->end_frame_filter; | ||||
|  | ||||
|             if (!(ocv->priv = av_mallocz(entry->priv_size))) | ||||
|             if (!(s->priv = av_mallocz(entry->priv_size))) | ||||
|                 return AVERROR(ENOMEM); | ||||
|             return ocv->init(ctx, ocv->params); | ||||
|             return s->init(ctx, s->params); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     av_log(ctx, AV_LOG_ERROR, "No libopencv filter named '%s'\n", ocv->name); | ||||
|     av_log(ctx, AV_LOG_ERROR, "No libopencv filter named '%s'\n", s->name); | ||||
|     return AVERROR(EINVAL); | ||||
| } | ||||
|  | ||||
| static av_cold void uninit(AVFilterContext *ctx) | ||||
| { | ||||
|     OCVContext *ocv = ctx->priv; | ||||
|     OCVContext *s = ctx->priv; | ||||
|  | ||||
|     if (ocv->uninit) | ||||
|         ocv->uninit(ctx); | ||||
|     av_free(ocv->priv); | ||||
|     if (s->uninit) | ||||
|         s->uninit(ctx); | ||||
|     av_free(s->priv); | ||||
| } | ||||
|  | ||||
| static int filter_frame(AVFilterLink *inlink, AVFrame *in) | ||||
| { | ||||
|     AVFilterContext *ctx = inlink->dst; | ||||
|     OCVContext *ocv = ctx->priv; | ||||
|     OCVContext *s = ctx->priv; | ||||
|     AVFilterLink *outlink= inlink->dst->outputs[0]; | ||||
|     AVFrame *out; | ||||
|     IplImage inimg, outimg; | ||||
| @@ -368,7 +368,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) | ||||
|  | ||||
|     fill_iplimage_from_frame(&inimg , in , inlink->format); | ||||
|     fill_iplimage_from_frame(&outimg, out, inlink->format); | ||||
|     ocv->end_frame_filter(ctx, &inimg, &outimg); | ||||
|     s->end_frame_filter(ctx, &inimg, &outimg); | ||||
|     fill_frame_from_iplimage(out, &outimg, inlink->format); | ||||
|  | ||||
|     av_frame_free(&in); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user