1 /* $Id: decomb.c,v 1.14 2008/04/25 5:00:00 jbrjake Exp $
3 This file is part of the HandBrake source code.
4 Homepage: <http://handbrake.fr/>.
5 It may be used under the terms of the GNU General Public License.
7 The yadif algorithm was created by Michael Niedermayer. */
9 #include "libavcodec/avcodec.h"
10 #include "mpeg2dec/mpeg2.h"
12 #define SUPPRESS_AV_LOG
14 #define MODE_DEFAULT 1
15 #define PARITY_DEFAULT -1
17 #define MCDEINT_MODE_DEFAULT -1
18 #define MCDEINT_QP_DEFAULT 1
20 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
21 #define MIN3(a,b,c) MIN(MIN(a,b),c)
22 #define MAX3(a,b,c) MAX(MAX(a,b),c)
24 struct hb_filter_private_s
33 int spatial_threshold;
45 int mcdeint_outbuf_size;
46 uint8_t * mcdeint_outbuf;
47 AVCodecContext * mcdeint_avctx_enc;
48 AVFrame * mcdeint_frame;
49 AVFrame * mcdeint_frame_dec;
51 int yadif_deinterlaced_frames;
52 int blend_deinterlaced_frames;
53 int unfiltered_frames;
58 /* Make a buffer to store a comb mask. */
63 hb_buffer_t * buf_out[2];
64 hb_buffer_t * buf_settings;
67 hb_filter_private_t * hb_decomb_init( int pix_fmt,
72 int hb_decomb_work( const hb_buffer_t * buf_in,
73 hb_buffer_t ** buf_out,
77 hb_filter_private_t * pv );
79 void hb_decomb_close( hb_filter_private_t * pv );
81 hb_filter_object_t hb_filter_decomb =
84 "Deinterlaces selectively with yadif/mcdeint or lowpass5 blending",
91 int cubic_interpolate( int y0, int y1, int y2, int y3 )
93 /* From http://www.neuron2.net/library/cubicinterp.html */
94 int result = ( y0 * -3 ) + ( y1 * 23 ) + ( y2 * 23 ) + ( y3 * -3 );
101 else if( result < 0 )
109 static void store_ref( const uint8_t ** pic,
110 hb_filter_private_t * pv )
114 sizeof(uint8_t *)*3 );
118 sizeof(uint8_t *)*3*3 );
121 for( i = 0; i < 3; i++ )
123 const uint8_t * src = pic[i];
124 uint8_t * ref = pv->ref[2][i];
126 int w = pv->width[i];
127 int h = pv->height[i];
128 int ref_stride = pv->ref_stride[i];
131 for( y = 0; y < pv->height[i]; y++ )
134 src = (uint8_t*)src + w;
135 ref = (uint8_t*)ref + ref_stride;
140 static void get_ref( uint8_t ** pic, hb_filter_private_t * pv, int frm )
143 for( i = 0; i < 3; i++ )
145 uint8_t * dst = pic[i];
146 const uint8_t * ref = pv->ref[frm][i];
147 int w = pv->width[i];
148 int ref_stride = pv->ref_stride[i];
151 for( y = 0; y < pv->height[i]; y++ )
160 int blend_filter_pixel( int up2, int up1, int current, int down1, int down2 )
162 /* Low-pass 5-tap filter */
166 result += current * 6;
183 static void blend_filter_line( uint8_t *dst,
187 hb_filter_private_t * pv )
189 int w = pv->width[plane];
190 int refs = pv->ref_stride[plane];
193 for( x = 0; x < w; x++)
205 /* First line, so A and B don't exist.*/
211 /* Second line, no A. */
214 else if( y == (pv->height[plane] - 2) )
216 /* Second to last line, no E. */
219 else if( y == (pv->height[plane] -1) )
221 /* Last line, no D or E. */
226 dst[0] = blend_filter_pixel( a, b, c, d, e );
233 int check_combing_mask( hb_filter_private_t * pv )
235 /* Go through the mask in X*Y blocks. If any of these windows
236 have threshold or more combed pixels, consider the whole
237 frame to be combed and send it on to be deinterlaced. */
239 /* Block mask threshold -- The number of pixels
240 in a block_width * block_height window of
241 he mask that need to show combing for the
242 whole frame to be seen as such. */
243 int threshold = pv->block_threshold;
244 int block_width = pv->block_width;
245 int block_height = pv->block_height;
246 int block_x, block_y;
247 int block_score = 0; int send_to_blend = 0;
251 for( k = 0; k < 1; k++ )
253 int ref_stride = pv->ref_stride[k];
254 for( y = 0; y < ( pv->height[k] - block_height ); y = y + block_height )
256 for( x = 0; x < ( pv->width[k] - block_width ); x = x + block_width )
259 for( block_y = 0; block_y < block_height; block_y++ )
261 for( block_x = 0; block_x < block_width; block_x++ )
263 int mask_y = y + block_y;
264 int mask_x = x + block_x;
266 /* We only want to mark a pixel in a block as combed
267 if the pixels above and below are as well. Got to
268 handle the top and bottom lines separately. */
269 if( y + block_y == 0 )
271 if( pv->mask[k][mask_y*ref_stride+mask_x ] == 255 &&
272 pv->mask[k][mask_y*ref_stride+mask_x + 1] == 255 )
275 else if( y + block_y == pv->height[k] - 1 )
277 if( pv->mask[k][mask_y*ref_stride+mask_x - 1] == 255 &&
278 pv->mask[k][mask_y*ref_stride+mask_x ] == 255 )
283 if( pv->mask[k][mask_y*ref_stride+mask_x - 1] == 255 &&
284 pv->mask[k][mask_y*ref_stride+mask_x ] == 255 &&
285 pv->mask[k][mask_y*ref_stride+mask_x + 1] == 255 )
291 if( block_score >= ( threshold / 2 ) )
294 hb_log("decomb: frame %i | score %i | type %s", pv->yadif_deinterlaced_frames + pv->blend_deinterlaced_frames + pv->unfiltered_frames + 1, block_score, pv->buf_settings->flags & 16 ? "Film" : "Video");
296 if ( block_score <= threshold && !( pv->buf_settings->flags & 16) )
298 /* Blend video content that scores between
299 ( threshold / 2 ) and threshold. */
302 else if( block_score > threshold )
304 if( pv->buf_settings->flags & 16 )
306 /* Blend progressive content above the threshold.*/
311 /* Yadif deinterlace video content above the threshold. */
326 /* Consider this frame to be uncombed. */
331 int tritical_detect_comb( hb_filter_private_t * pv )
333 /* A mish-mash of various comb detection tricks
334 picked up from neuron2's Decomb plugin for
335 AviSynth and tritical's IsCombedT and
336 IsCombedTIVTC plugins. */
338 int x, y, k, width, height;
340 /* Comb scoring algorithm */
341 int spatial_metric = pv->spatial_metric;
342 /* Motion threshold */
343 int mthresh = pv->motion_threshold;
344 /* Spatial threshold */
345 int athresh = pv->spatial_threshold;
346 int athresh_squared = athresh * athresh;
347 int athresh6 = 6 *athresh;
349 /* One pas for Y, one pass for Cb, one pass for Cr */
350 for( k = 0; k < 1; k++ )
352 int ref_stride = pv->ref_stride[k];
353 width = pv->width[k];
354 height = pv->height[k];
356 for( y = 2; y < ( height - 2 ); y++ )
358 /* These are just to make the buffer locations easier to read. */
359 int back_2 = ( y - 2 )*ref_stride ;
360 int back_1 = ( y - 1 )*ref_stride;
361 int current = y*ref_stride;
362 int forward_1 = ( y + 1 )*ref_stride;
363 int forward_2 = ( y + 2 )*ref_stride;
365 /* We need to examine a column of 5 pixels
366 in the prev, cur, and next frames. */
367 uint8_t previous_frame[5];
368 uint8_t current_frame[5];
369 uint8_t next_frame[5];
371 for( x = 0; x < width; x++ )
373 /* Fill up the current frame array with the current pixel values.*/
374 current_frame[0] = pv->ref[1][k][back_2 + x];
375 current_frame[1] = pv->ref[1][k][back_1 + x];
376 current_frame[2] = pv->ref[1][k][current + x];
377 current_frame[3] = pv->ref[1][k][forward_1 + x];
378 current_frame[4] = pv->ref[1][k][forward_2 + x];
380 int up_diff = current_frame[2] - current_frame[1];
381 int down_diff = current_frame[2] - current_frame[3];
383 if( ( up_diff > athresh && down_diff > athresh ) ||
384 ( up_diff < -athresh && down_diff < -athresh ) )
386 /* The pixel above and below are different,
387 and they change in the same "direction" too.*/
391 /* Make sure there's sufficient motion between frame t-1 to frame t+1. */
392 previous_frame[0] = pv->ref[0][k][back_2 + x];
393 previous_frame[1] = pv->ref[0][k][back_1 + x];
394 previous_frame[2] = pv->ref[0][k][current + x];
395 previous_frame[3] = pv->ref[0][k][forward_1 + x];
396 previous_frame[4] = pv->ref[0][k][forward_2 + x];
397 next_frame[0] = pv->ref[2][k][back_2 + x];
398 next_frame[1] = pv->ref[2][k][back_1 + x];
399 next_frame[2] = pv->ref[2][k][current + x];
400 next_frame[3] = pv->ref[2][k][forward_1 + x];
401 next_frame[4] = pv->ref[2][k][forward_2 + x];
403 if( abs( previous_frame[2] - current_frame[2] ) > mthresh &&
404 abs( current_frame[1] - next_frame[1] ) > mthresh &&
405 abs( current_frame[3] - next_frame[3] ) > mthresh )
407 if( abs( next_frame[2] - current_frame[2] ) > mthresh &&
408 abs( previous_frame[1] - current_frame[1] ) > mthresh &&
409 abs( previous_frame[3] - current_frame[3] ) > mthresh )
414 /* User doesn't want to check for motion,
415 so move on to the spatial check. */
419 if( motion || ( pv->yadif_deinterlaced_frames==0 && pv->blend_deinterlaced_frames==0 && pv->unfiltered_frames==0) )
421 /*That means it's time for the spatial check.
422 We've got several options here. */
423 if( spatial_metric == 0 )
425 /* Simple 32detect style comb detection */
426 if( ( abs( current_frame[2] - current_frame[4] ) < 10 ) &&
427 ( abs( current_frame[2] - current_frame[3] ) > 15 ) )
429 pv->mask[k][y*ref_stride + x] = 255;
433 pv->mask[k][y*ref_stride + x] = 0;
436 else if( spatial_metric == 1 )
438 /* This, for comparison, is what IsCombed uses.
439 It's better, but still noise senstive. */
440 int combing = ( current_frame[1] - current_frame[2] ) *
441 ( current_frame[3] - current_frame[2] );
443 if( combing > athresh_squared )
444 pv->mask[k][y*ref_stride + x] = 255;
446 pv->mask[k][y*ref_stride + x] = 0;
448 else if( spatial_metric == 2 )
450 /* Tritical's noise-resistant combing scorer.
451 The check is done on a bob+blur convolution. */
452 int combing = abs( current_frame[0]
453 + ( 4 * current_frame[2] )
455 - ( 3 * ( current_frame[1]
456 + current_frame[3] ) ) );
458 /* If the frame is sufficiently combed,
459 then mark it down on the mask as 255. */
460 if( combing > athresh6 )
461 pv->mask[k][y*ref_stride + x] = 255;
463 pv->mask[k][y*ref_stride + x] = 0;
468 pv->mask[k][y*ref_stride + x] = 0;
473 pv->mask[k][y*ref_stride + x] = 0;
479 return check_combing_mask( pv );
482 static void yadif_filter_line( uint8_t *dst,
489 hb_filter_private_t * pv )
491 uint8_t *prev2 = parity ? prev : cur ;
492 uint8_t *next2 = parity ? cur : next;
494 int w = pv->width[plane];
495 int refs = pv->ref_stride[plane];
498 for( x = 0; x < w; x++)
502 /* Temporal average -- the current pixel location in the previous and next fields */
503 int d = (prev2[0] + next2[0])>>1;
507 /* How the current pixel changes from the field before to the field after */
508 int temporal_diff0 = ABS(prev2[0] - next2[0]);
509 /* The average of how much the pixels above and below change from the field before to now. */
510 int temporal_diff1 = ( ABS(prev[-refs] - cur[-refs]) + ABS(prev[+refs] - cur[+refs]) ) >> 1;
511 /* The average of how much the pixels above and below change from now to the next field. */
512 int temporal_diff2 = ( ABS(next[-refs] - cur[-refs]) + ABS(next[+refs] - cur[+refs]) ) >> 1;
513 /* For the actual difference, use the largest of the previous average diffs. */
514 int diff = MAX3(temporal_diff0>>1, temporal_diff1, temporal_diff2);
516 /* SAD of how the pixel-1, the pixel, and the pixel+1 change from the line above to below. */
517 int spatial_score = ABS(cur[-refs-1] - cur[+refs-1]) + ABS(cur[-refs]-cur[+refs]) +
518 ABS(cur[-refs+1] - cur[+refs+1]) - 1;
521 /* Spatial pred is either a bilinear or cubic vertical interpolation. */
524 spatial_pred = cubic_interpolate( cur[-3*refs], cur[-refs], cur[+refs], cur[3*refs] );
528 spatial_pred = (c+e)>>1;
531 /* EDDI: Edge Directed Deinterlacing Interpolation
532 Uses the Martinez-Lim Line Shift Parametric Modeling algorithm...I think.
533 Checks 4 different slopes to see if there is more similarity along a diagonal
534 than there was vertically. If a diagonal is more similar, then it indicates
535 an edge, so interpolate along that instead of a vertical line, using either
536 linear or cubic interpolation depending on mode. */
537 #define YADIF_CHECK(j)\
538 { int score = ABS(cur[-refs-1+j] - cur[+refs-1-j])\
539 + ABS(cur[-refs +j] - cur[+refs -j])\
540 + ABS(cur[-refs+1+j] - cur[+refs+1-j]);\
541 if( score < spatial_score ){\
542 spatial_score = score;\
548 spatial_pred = cubic_interpolate(cur[-3 * refs - 3], cur[-refs -1], cur[+refs + 1], cur[3* refs + 3] );\
551 spatial_pred = cubic_interpolate( ( ( cur[-3*refs - 4] + cur[-refs - 4] ) / 2 ) , cur[-refs -2], cur[+refs + 2], ( ( cur[3*refs + 4] + cur[refs + 4] ) / 2 ) );\
554 spatial_pred = cubic_interpolate(cur[-3 * refs +3], cur[-refs +1], cur[+refs - 1], cur[3* refs -3] );\
557 spatial_pred = cubic_interpolate(( ( cur[-3*refs + 4] + cur[-refs + 4] ) / 2 ), cur[-refs +2], cur[+refs - 2], ( ( cur[3*refs - 4] + cur[refs - 4] ) / 2 ) );\
563 spatial_pred = ( cur[-refs +j] + cur[+refs -j] ) >>1;\
566 YADIF_CHECK(-1) YADIF_CHECK(-2) }} }}
567 YADIF_CHECK( 1) YADIF_CHECK( 2) }} }}
569 /* Temporally adjust the spatial prediction by comparing
570 against fields in the previous and next frames. */
571 int b = (prev2[-2*refs] + next2[-2*refs])>>1;
572 int f = (prev2[+2*refs] + next2[+2*refs])>>1;
574 /* Find the median value */
575 int max = MAX3(d-e, d-c, MIN(b-c, f-e));
576 int min = MIN3(d-e, d-c, MAX(b-c, f-e));
577 diff = MAX3( diff, min, -max );
579 if( spatial_pred > d + diff )
581 spatial_pred = d + diff;
583 else if( spatial_pred < d - diff )
585 spatial_pred = d - diff;
588 dst[0] = spatial_pred;
599 static void yadif_filter( uint8_t ** dst,
602 hb_filter_private_t * pv )
605 int is_combed = tritical_detect_comb( pv );
609 pv->yadif_deinterlaced_frames++;
611 else if( is_combed == 2 )
613 pv->blend_deinterlaced_frames++;
617 pv->unfiltered_frames++;
621 for( i = 0; i < 3; i++ )
623 int w = pv->width[i];
624 int h = pv->height[i];
625 int ref_stride = pv->ref_stride[i];
628 for( y = 0; y < h; y++ )
630 if( ( pv->mode == 4 && is_combed ) || is_combed == 2 )
632 uint8_t *prev = &pv->ref[0][i][y*ref_stride];
633 uint8_t *cur = &pv->ref[1][i][y*ref_stride];
634 uint8_t *next = &pv->ref[2][i][y*ref_stride];
635 uint8_t *dst2 = &dst[i][y*w];
637 blend_filter_line( dst2, cur, i, y, pv );
639 else if( (y ^ parity) & 1 && is_combed == 1 )
641 uint8_t *prev = &pv->ref[0][i][y*ref_stride];
642 uint8_t *cur = &pv->ref[1][i][y*ref_stride];
643 uint8_t *next = &pv->ref[2][i][y*ref_stride];
644 uint8_t *dst2 = &dst[i][y*w];
646 yadif_filter_line( dst2, prev, cur, next, i, parity ^ tff, y, pv );
650 memcpy( &dst[i][y*w],
651 &pv->ref[1][i][y*ref_stride],
652 w * sizeof(uint8_t) );
658 static void mcdeint_filter( uint8_t ** dst,
661 hb_filter_private_t * pv )
666 #ifdef SUPPRESS_AV_LOG
667 /* TODO: temporarily change log level to suppress obnoxious debug output */
668 int loglevel = av_log_get_level();
669 av_log_set_level( AV_LOG_QUIET );
674 pv->mcdeint_frame->data[i] = src[i];
675 pv->mcdeint_frame->linesize[i] = pv->width[i];
677 pv->mcdeint_avctx_enc->me_cmp = FF_CMP_SAD;
678 pv->mcdeint_avctx_enc->me_sub_cmp = FF_CMP_SAD;
679 pv->mcdeint_frame->quality = pv->mcdeint_qp * FF_QP2LAMBDA;
681 out_size = avcodec_encode_video( pv->mcdeint_avctx_enc,
683 pv->mcdeint_outbuf_size,
686 pv->mcdeint_frame_dec = pv->mcdeint_avctx_enc->coded_frame;
688 for( i = 0; i < 3; i++ )
690 int w = pv->width[i];
691 int h = pv->height[i];
692 int fils = pv->mcdeint_frame_dec->linesize[i];
693 int srcs = pv->width[i];
695 for( y = 0; y < h; y++ )
697 if( (y ^ parity) & 1 )
699 for( x = 0; x < w; x++ )
701 if( (x-2)+(y-1)*w >= 0 && (x+2)+(y+1)*w < w*h )
704 &pv->mcdeint_frame_dec->data[i][x + y*fils];
705 uint8_t * srcp = &src[i][x + y*srcs];
707 int diff0 = filp[-fils] - srcp[-srcs];
708 int diff1 = filp[+fils] - srcp[+srcs];
711 ABS(srcp[-srcs-1] - srcp[+srcs-1])
712 + ABS(srcp[-srcs ] - srcp[+srcs ])
713 + ABS(srcp[-srcs+1] - srcp[+srcs+1]) - 1;
717 #define MCDEINT_CHECK(j)\
718 { int score = ABS(srcp[-srcs-1+j] - srcp[+srcs-1-j])\
719 + ABS(srcp[-srcs +j] - srcp[+srcs -j])\
720 + ABS(srcp[-srcs+1+j] - srcp[+srcs+1-j]);\
721 if( score < spatial_score ) {\
722 spatial_score = score;\
723 diff0 = filp[-fils+j] - srcp[-srcs+j];\
724 diff1 = filp[+fils-j] - srcp[+srcs-j];
726 MCDEINT_CHECK(-1) MCDEINT_CHECK(-2) }} }}
727 MCDEINT_CHECK( 1) MCDEINT_CHECK( 2) }} }}
729 if(diff0 + diff1 > 0)
731 temp -= (diff0 + diff1 -
732 ABS( ABS(diff0) - ABS(diff1) ) / 2) / 2;
736 temp -= (diff0 + diff1 +
737 ABS( ABS(diff0) - ABS(diff1) ) / 2) / 2;
740 filp[0] = dst[i][x + y*w] =
741 temp > 255U ? ~(temp>>31) : temp;
746 pv->mcdeint_frame_dec->data[i][x + y*fils];
752 for( y = 0; y < h; y++ )
754 if( !((y ^ parity) & 1) )
756 for( x = 0; x < w; x++ )
758 pv->mcdeint_frame_dec->data[i][x + y*fils] =
759 dst[i][x + y*w]= src[i][x + y*srcs];
765 #ifdef SUPPRESS_AV_LOG
766 /* TODO: restore previous log level */
767 av_log_set_level(loglevel);
771 hb_filter_private_t * hb_decomb_init( int pix_fmt,
776 if( pix_fmt != PIX_FMT_YUV420P )
781 hb_filter_private_t * pv = calloc( 1, sizeof(struct hb_filter_private_s) );
783 pv->pix_fmt = pix_fmt;
785 pv->width[0] = width;
786 pv->height[0] = height;
787 pv->width[1] = pv->width[2] = width >> 1;
788 pv->height[1] = pv->height[2] = height >> 1;
790 int buf_size = 3 * width * height / 2;
791 pv->buf_out[0] = hb_buffer_init( buf_size );
792 pv->buf_out[1] = hb_buffer_init( buf_size );
793 pv->buf_settings = hb_buffer_init( 0 );
795 pv->yadif_deinterlaced_frames = 0;
796 pv->blend_deinterlaced_frames = 0;
797 pv->unfiltered_frames = 0;
801 pv->mode = MODE_DEFAULT;
802 pv->spatial_metric = 2;
803 pv->motion_threshold = 6;
804 pv->spatial_threshold = 9;
805 pv->block_threshold = 80;
806 pv->block_width = 16;
807 pv->block_height = 16;
809 pv->parity = PARITY_DEFAULT;
811 pv->mcdeint_mode = MCDEINT_MODE_DEFAULT;
812 pv->mcdeint_qp = MCDEINT_QP_DEFAULT;
816 sscanf( settings, "%d:%d:%d:%d:%d:%d:%d",
819 &pv->motion_threshold,
820 &pv->spatial_threshold,
821 &pv->block_threshold,
826 if( pv->mode == 2 || pv->mode == 3 )
828 pv->mcdeint_mode = 0;
831 /* Allocate yadif specific buffers */
833 for( i = 0; i < 3; i++ )
836 int w = ((width + 31) & (~31))>>is_chroma;
837 int h = ((height+6+ 31) & (~31))>>is_chroma;
839 pv->ref_stride[i] = w;
841 for( j = 0; j < 3; j++ )
843 pv->ref[j][i] = malloc( w*h*sizeof(uint8_t) ) + 3*w;
847 /* Allocate a buffer to store a comb mask. */
848 for( i = 0; i < 3; i++ )
851 int w = ((pv->width[0] + 31) & (~31))>>is_chroma;
852 int h = ((pv->height[0]+6+ 31) & (~31))>>is_chroma;
854 pv->mask[i] = malloc( w*h*sizeof(uint8_t) ) + 3*w;
857 /* Allocate mcdeint specific buffers */
858 if( pv->mcdeint_mode >= 0 )
861 avcodec_register_all();
863 AVCodec * enc = avcodec_find_encoder( CODEC_ID_SNOW );
866 for (i = 0; i < 3; i++ )
868 AVCodecContext * avctx_enc;
870 avctx_enc = pv->mcdeint_avctx_enc = avcodec_alloc_context();
872 avctx_enc->width = width;
873 avctx_enc->height = height;
874 avctx_enc->time_base = (AVRational){1,25}; // meaningless
875 avctx_enc->gop_size = 300;
876 avctx_enc->max_b_frames = 0;
877 avctx_enc->pix_fmt = PIX_FMT_YUV420P;
878 avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
879 avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
880 avctx_enc->global_quality = 1;
881 avctx_enc->flags2 = CODEC_FLAG2_MEMC_ONLY;
882 avctx_enc->me_cmp = FF_CMP_SAD; //SSE;
883 avctx_enc->me_sub_cmp = FF_CMP_SAD; //SSE;
884 avctx_enc->mb_cmp = FF_CMP_SSE;
886 switch( pv->mcdeint_mode )
891 avctx_enc->me_method = ME_UMH;
893 avctx_enc->flags |= CODEC_FLAG_4MV;
894 avctx_enc->dia_size =2;
896 avctx_enc->flags |= CODEC_FLAG_QPEL;
899 avcodec_open(avctx_enc, enc);
902 pv->mcdeint_frame = avcodec_alloc_frame();
903 pv->mcdeint_outbuf_size = width * height * 10;
904 pv->mcdeint_outbuf = malloc( pv->mcdeint_outbuf_size );
910 void hb_decomb_close( hb_filter_private_t * pv )
917 hb_log("decomb: yadif deinterlaced %i | blend deinterlaced %i | unfiltered %i | total %i", pv->yadif_deinterlaced_frames, pv->blend_deinterlaced_frames, pv->unfiltered_frames, pv->yadif_deinterlaced_frames + pv->blend_deinterlaced_frames + pv->unfiltered_frames);
919 /* Cleanup frame buffers */
922 hb_buffer_close( &pv->buf_out[0] );
926 hb_buffer_close( &pv->buf_out[1] );
928 if (pv->buf_settings )
930 hb_buffer_close( &pv->buf_settings );
933 /* Cleanup yadif specific buffers */
935 for( i = 0; i<3*3; i++ )
937 uint8_t **p = &pv->ref[i%3][i/3];
940 free( *p - 3*pv->ref_stride[i/3] );
945 /* Cleanup combing mask. */
946 for( i = 0; i<3*3; i++ )
948 uint8_t **p = &pv->mask[i/3];
951 free( *p - 3*pv->ref_stride[i/3] );
956 /* Cleanup mcdeint specific buffers */
957 if( pv->mcdeint_mode >= 0 )
959 if( pv->mcdeint_avctx_enc )
961 avcodec_close( pv->mcdeint_avctx_enc );
962 av_freep( &pv->mcdeint_avctx_enc );
964 if( pv->mcdeint_outbuf )
966 free( pv->mcdeint_outbuf );
973 int hb_decomb_work( const hb_buffer_t * cbuf_in,
974 hb_buffer_t ** buf_out,
978 hb_filter_private_t * pv )
980 hb_buffer_t * buf_in = (hb_buffer_t *)cbuf_in;
983 pix_fmt != pv->pix_fmt ||
984 width != pv->width[0] ||
985 height != pv->height[0] )
987 return FILTER_FAILED;
990 avpicture_fill( &pv->pic_in, buf_in->data,
991 pix_fmt, width, height );
993 /* Determine if top-field first layout */
997 tff = !!(buf_in->flags & PIC_FLAG_TOP_FIELD_FIRST);
1001 tff = (pv->parity & 1) ^ 1;
1004 /* Store current frame in yadif cache */
1005 store_ref( (const uint8_t**)pv->pic_in.data, pv );
1007 /* If yadif is not ready, store another ref and return FILTER_DELAY */
1008 if( pv->yadif_ready == 0 )
1010 store_ref( (const uint8_t**)pv->pic_in.data, pv );
1012 hb_buffer_copy_settings( pv->buf_settings, buf_in );
1014 /* don't let 'work_loop' send a chapter mark upstream */
1015 buf_in->new_chap = 0;
1017 pv->yadif_ready = 1;
1019 return FILTER_DELAY;
1022 /* Perform yadif filtering */
1024 for( frame = 0; frame <= ( ( pv->mode == 2 || pv->mode == 3 )? 1 : 0 ) ; frame++ )
1026 int parity = frame ^ tff ^ 1;
1028 avpicture_fill( &pv->pic_out, pv->buf_out[!(frame^1)]->data,
1029 pix_fmt, width, height );
1031 yadif_filter( pv->pic_out.data, parity, tff, pv );
1033 if( pv->mcdeint_mode >= 0 )
1035 /* Perform mcdeint filtering */
1036 avpicture_fill( &pv->pic_in, pv->buf_out[(frame^1)]->data,
1037 pix_fmt, width, height );
1039 mcdeint_filter( pv->pic_in.data, pv->pic_out.data, parity, pv );
1042 *buf_out = pv->buf_out[!(frame^1)];
1045 /* Copy buffered settings to output buffer settings */
1046 hb_buffer_copy_settings( *buf_out, pv->buf_settings );
1048 /* Replace buffered settings with input buffer settings */
1049 hb_buffer_copy_settings( pv->buf_settings, buf_in );
1051 /* don't let 'work_loop' send a chapter mark upstream */
1052 buf_in->new_chap = 0;