1 /* $Id: decomb.c,v 1.14 2008/04/25 5:00:00 jbrjake Exp $
3 This file is part of the HandBrake source code.
4 Homepage: <http://handbrake.fr/>.
5 It may be used under the terms of the GNU General Public License.
7 The yadif algorithm was created by Michael Niedermayer. */
9 #include "libavcodec/avcodec.h"
10 #include "mpeg2dec/mpeg2.h"
12 #define SUPPRESS_AV_LOG
14 #define MODE_DEFAULT 1
15 #define PARITY_DEFAULT -1
17 #define MCDEINT_MODE_DEFAULT -1
18 #define MCDEINT_QP_DEFAULT 1
20 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
21 #define MIN3(a,b,c) MIN(MIN(a,b),c)
22 #define MAX3(a,b,c) MAX(MAX(a,b),c)
24 struct hb_filter_private_s
33 int spatial_threshold;
45 int mcdeint_outbuf_size;
46 uint8_t * mcdeint_outbuf;
47 AVCodecContext * mcdeint_avctx_enc;
48 AVFrame * mcdeint_frame;
49 AVFrame * mcdeint_frame_dec;
51 int yadif_deinterlaced_frames;
52 int blend_deinterlaced_frames;
53 int unfiltered_frames;
58 /* Make a buffer to store a comb mask. */
63 hb_buffer_t * buf_out[2];
64 hb_buffer_t * buf_settings;
67 hb_filter_private_t * hb_decomb_init( int pix_fmt,
72 int hb_decomb_work( const hb_buffer_t * buf_in,
73 hb_buffer_t ** buf_out,
77 hb_filter_private_t * pv );
79 void hb_decomb_close( hb_filter_private_t * pv );
81 hb_filter_object_t hb_filter_decomb =
84 "Deinterlaces selectively with yadif/mcdeint or lowpass5 blending",
91 int cubic_interpolate( int y0, int y1, int y2, int y3 )
93 /* From http://www.neuron2.net/library/cubicinterp.html */
94 int result = ( y0 * -3 ) + ( y1 * 23 ) + ( y2 * 23 ) + ( y3 * -3 );
101 else if( result < 0 )
109 static void store_ref( const uint8_t ** pic,
110 hb_filter_private_t * pv )
114 sizeof(uint8_t *)*3 );
118 sizeof(uint8_t *)*3*3 );
121 for( i = 0; i < 3; i++ )
123 const uint8_t * src = pic[i];
124 uint8_t * ref = pv->ref[2][i];
126 int w = pv->width[i];
127 int h = pv->height[i];
128 int ref_stride = pv->ref_stride[i];
131 for( y = 0; y < pv->height[i]; y++ )
134 src = (uint8_t*)src + w;
135 ref = (uint8_t*)ref + ref_stride;
140 static void get_ref( uint8_t ** pic, hb_filter_private_t * pv, int frm )
143 for( i = 0; i < 3; i++ )
145 uint8_t * dst = pic[i];
146 const uint8_t * ref = pv->ref[frm][i];
147 int w = pv->width[i];
148 int ref_stride = pv->ref_stride[i];
151 for( y = 0; y < pv->height[i]; y++ )
160 int blend_filter_pixel( int up2, int up1, int current, int down1, int down2 )
162 /* Low-pass 5-tap filter */
166 result += current * 6;
183 static void blend_filter_line( uint8_t *dst,
187 hb_filter_private_t * pv )
189 int w = pv->width[plane];
190 int refs = pv->ref_stride[plane];
193 for( x = 0; x < w; x++)
205 /* First line, so A and B don't exist.*/
211 /* Second line, no A. */
214 else if( y == (pv->height[plane] - 2) )
216 /* Second to last line, no E. */
219 else if( y == (pv->height[plane] -1) )
221 /* Last line, no D or E. */
226 dst[0] = blend_filter_pixel( a, b, c, d, e );
233 int check_combing_mask( hb_filter_private_t * pv )
235 /* Go through the mask in X*Y blocks. If any of these windows
236 have threshold or more combed pixels, consider the whole
237 frame to be combed and send it on to be deinterlaced. */
239 /* Block mask threshold -- The number of pixels
240 in a block_width * block_height window of
241 he mask that need to show combing for the
242 whole frame to be seen as such. */
243 int threshold = pv->block_threshold;
244 int block_width = pv->block_width;
245 int block_height = pv->block_height;
246 int block_x, block_y;
247 int block_score = 0; int send_to_blend = 0;
251 for( k = 0; k < 1; k++ )
253 int ref_stride = pv->ref_stride[k];
254 for( y = 0; y < ( pv->height[k] - block_height ); y = y + block_height )
256 for( x = 0; x < ( pv->width[k] - block_width ); x = x + block_width )
259 for( block_y = 0; block_y < block_height; block_y++ )
261 for( block_x = 0; block_x < block_width; block_x++ )
263 int mask_y = y + block_y;
264 int mask_x = x + block_x;
266 /* We only want to mark a pixel in a block as combed
267 if the pixels above and below are as well. Got to
268 handle the top and bottom lines separately. */
269 if( y + block_y == 0 )
271 if( pv->mask[k][mask_y*ref_stride+mask_x ] == 255 &&
272 pv->mask[k][mask_y*ref_stride+mask_x + 1] == 255 )
275 else if( y + block_y == pv->height[k] - 1 )
277 if( pv->mask[k][mask_y*ref_stride+mask_x - 1] == 255 &&
278 pv->mask[k][mask_y*ref_stride+mask_x ] == 255 )
283 if( pv->mask[k][mask_y*ref_stride+mask_x - 1] == 255 &&
284 pv->mask[k][mask_y*ref_stride+mask_x ] == 255 &&
285 pv->mask[k][mask_y*ref_stride+mask_x + 1] == 255 )
291 if( block_score >= ( threshold / 2 ) )
294 hb_log("decomb: frame %i | score %i | type %s", pv->yadif_deinterlaced_frames + pv->blend_deinterlaced_frames + pv->unfiltered_frames + 1, block_score, pv->buf_settings->flags & 16 ? "Film" : "Video");
296 if ( block_score <= threshold && !( pv->buf_settings->flags & 16) )
298 /* Blend video content that scores between
299 ( threshold / 2 ) and threshold. */
302 else if( block_score > threshold )
304 if( pv->buf_settings->flags & 16 )
306 /* Blend progressive content above the threshold.*/
311 /* Yadif deinterlace video content above the threshold. */
326 /* Consider this frame to be uncombed. */
331 int tritical_detect_comb( hb_filter_private_t * pv )
333 /* A mish-mash of various comb detection tricks
334 picked up from neuron2's Decomb plugin for
335 AviSynth and tritical's IsCombedT and
336 IsCombedTIVTC plugins. */
338 int x, y, k, width, height;
340 /* Comb scoring algorithm */
341 int spatial_metric = pv->spatial_metric;
342 /* Motion threshold */
343 int mthresh = pv->motion_threshold;
344 /* Spatial threshold */
345 int athresh = pv->spatial_threshold;
346 int athresh_squared = athresh * athresh;
347 int athresh6 = 6 *athresh;
349 /* One pas for Y, one pass for U, one pass for V */
350 for( k = 0; k < 1; k++ )
352 int ref_stride = pv->ref_stride[k];
353 width = pv->width[k];
354 height = pv->height[k];
356 for( y = 2; y < ( height - 2 ); y++ )
358 /* These are just to make the buffer locations easier to read. */
359 int back_2 = ( y - 2 )*ref_stride ;
360 int back_1 = ( y - 1 )*ref_stride;
361 int current = y*ref_stride;
362 int forward_1 = ( y + 1 )*ref_stride;
363 int forward_2 = ( y + 2 )*ref_stride;
365 /* We need to examine a column of 5 pixels
366 in the prev, cur, and next frames. */
367 uint8_t previous_frame[5];
368 uint8_t current_frame[5];
369 uint8_t next_frame[5];
371 for( x = 0; x < width; x++ )
373 /* Fill up the current frame array with the current pixel values.*/
374 current_frame[0] = pv->ref[1][k][back_2 + x];
375 current_frame[1] = pv->ref[1][k][back_1 + x];
376 current_frame[2] = pv->ref[1][k][current + x];
377 current_frame[3] = pv->ref[1][k][forward_1 + x];
378 current_frame[4] = pv->ref[1][k][forward_2 + x];
380 int up_diff = current_frame[2] - current_frame[1];
381 int down_diff = current_frame[2] - current_frame[3];
383 if( ( up_diff > athresh && down_diff > athresh ) ||
384 ( up_diff < -athresh && down_diff < -athresh ) )
386 /* The pixel above and below are different,
387 and they change in the same "direction" too.*/
391 /* Make sure there's sufficient motion between frame t-1 to frame t+1. */
392 previous_frame[0] = pv->ref[0][k][back_2 + x];
393 previous_frame[1] = pv->ref[0][k][back_1 + x];
394 previous_frame[2] = pv->ref[0][k][current + x];
395 previous_frame[3] = pv->ref[0][k][forward_1 + x];
396 previous_frame[4] = pv->ref[0][k][forward_2 + x];
397 next_frame[0] = pv->ref[2][k][back_2 + x];
398 next_frame[1] = pv->ref[2][k][back_1 + x];
399 next_frame[2] = pv->ref[2][k][current + x];
400 next_frame[3] = pv->ref[2][k][forward_1 + x];
401 next_frame[4] = pv->ref[2][k][forward_2 + x];
403 if( abs( previous_frame[2] - current_frame[2] ) > mthresh &&
404 abs( current_frame[1] - next_frame[1] ) > mthresh &&
405 abs( current_frame[3] - next_frame[3] ) > mthresh )
407 if( abs( next_frame[2] - current_frame[2] ) > mthresh &&
408 abs( previous_frame[1] - current_frame[1] ) > mthresh &&
409 abs( previous_frame[3] - current_frame[3] ) > mthresh )
414 /* User doesn't want to check for motion,
415 so move on to the spatial check. */
419 if( motion || ( pv->yadif_deinterlaced_frames==0 && pv->blend_deinterlaced_frames==0 && pv->unfiltered_frames==0) )
421 /*That means it's time for the spatial check.
422 We've got several options here. */
423 if( spatial_metric == 0 )
425 /* Simple 32detect style comb detection */
426 if( ( abs( current_frame[2] - current_frame[4] ) < 10 ) &&
427 ( abs( current_frame[2] - current_frame[3] ) > 15 ) )
429 pv->mask[k][y*ref_stride + x] = 255;
433 pv->mask[k][y*ref_stride + x] = 0;
436 else if( spatial_metric == 1 )
438 /* This, for comparison, is what IsCombed uses.
439 It's better, but still noise senstive. */
440 int combing = ( current_frame[1] - current_frame[2] ) *
441 ( current_frame[3] - current_frame[2] );
443 if( combing > athresh_squared )
444 pv->mask[k][y*ref_stride + x] = 255;
446 pv->mask[k][y*ref_stride + x] = 0;
448 else if( spatial_metric == 2 )
450 /* Tritical's noise-resistant combing scorer.
451 The check is done on a bob+blur convolution. */
452 int combing = abs( current_frame[0]
453 + ( 4 * current_frame[2] )
455 - ( 3 * ( current_frame[1]
456 + current_frame[3] ) ) );
458 /* If the frame is sufficiently combed,
459 then mark it down on the mask as 255. */
460 if( combing > athresh6 )
461 pv->mask[k][y*ref_stride + x] = 255;
463 pv->mask[k][y*ref_stride + x] = 0;
468 pv->mask[k][y*ref_stride + x] = 0;
473 pv->mask[k][y*ref_stride + x] = 0;
479 return check_combing_mask( pv );
482 static void yadif_filter_line( uint8_t *dst,
489 hb_filter_private_t * pv )
491 /* While prev and next point to the previous and next frames,
492 prev2 and next2 will shift depending on the parity, usually 1.
493 They are the previous and next fields, the fields temporally adjacent
494 to the other field in the current frame--the one not being filtered. */
495 uint8_t *prev2 = parity ? prev : cur ;
496 uint8_t *next2 = parity ? cur : next;
497 int w = pv->width[plane];
498 int refs = pv->ref_stride[plane];
501 for( x = 0; x < w; x++)
505 /* Temporal average: the current location in the adjacent fields */
506 int d = (prev2[0] + next2[0])>>1;
510 /* How the current pixel changes between the adjacent fields */
511 int temporal_diff0 = ABS(prev2[0] - next2[0]);
512 /* The average of how much the pixels above and below change from the frame before to now. */
513 int temporal_diff1 = ( ABS(prev[-refs] - cur[-refs]) + ABS(prev[+refs] - cur[+refs]) ) >> 1;
514 /* The average of how much the pixels above and below change from now to the next frame. */
515 int temporal_diff2 = ( ABS(next[-refs] - cur[-refs]) + ABS(next[+refs] - cur[+refs]) ) >> 1;
516 /* For the actual difference, use the largest of the previous average diffs. */
517 int diff = MAX3(temporal_diff0>>1, temporal_diff1, temporal_diff2);
519 /* SAD of how the pixel-1, the pixel, and the pixel+1 change from the line above to below. */
520 int spatial_score = ABS(cur[-refs-1] - cur[+refs-1]) + ABS(cur[-refs]-cur[+refs]) +
521 ABS(cur[-refs+1] - cur[+refs+1]) - 1;
524 /* Spatial pred is either a bilinear or cubic vertical interpolation. */
527 spatial_pred = cubic_interpolate( cur[-3*refs], cur[-refs], cur[+refs], cur[3*refs] );
531 spatial_pred = (c+e)>>1;
534 /* EDDI: Edge Directed Deinterlacing Interpolation
535 Uses the Martinez-Lim Line Shift Parametric Modeling algorithm...I think.
536 Checks 4 different slopes to see if there is more similarity along a diagonal
537 than there was vertically. If a diagonal is more similar, then it indicates
538 an edge, so interpolate along that instead of a vertical line, using either
539 linear or cubic interpolation depending on mode. */
540 #define YADIF_CHECK(j)\
541 { int score = ABS(cur[-refs-1+j] - cur[+refs-1-j])\
542 + ABS(cur[-refs +j] - cur[+refs -j])\
543 + ABS(cur[-refs+1+j] - cur[+refs+1-j]);\
544 if( score < spatial_score ){\
545 spatial_score = score;\
551 spatial_pred = cubic_interpolate(cur[-3 * refs - 3], cur[-refs -1], cur[+refs + 1], cur[3* refs + 3] );\
554 spatial_pred = cubic_interpolate( ( ( cur[-3*refs - 4] + cur[-refs - 4] ) / 2 ) , cur[-refs -2], cur[+refs + 2], ( ( cur[3*refs + 4] + cur[refs + 4] ) / 2 ) );\
557 spatial_pred = cubic_interpolate(cur[-3 * refs +3], cur[-refs +1], cur[+refs - 1], cur[3* refs -3] );\
560 spatial_pred = cubic_interpolate(( ( cur[-3*refs + 4] + cur[-refs + 4] ) / 2 ), cur[-refs +2], cur[+refs - 2], ( ( cur[3*refs - 4] + cur[refs - 4] ) / 2 ) );\
566 spatial_pred = ( cur[-refs +j] + cur[+refs -j] ) >>1;\
569 YADIF_CHECK(-1) YADIF_CHECK(-2) }} }}
570 YADIF_CHECK( 1) YADIF_CHECK( 2) }} }}
572 /* Temporally adjust the spatial prediction by
573 comparing against lines in the adjacent fields. */
574 int b = (prev2[-2*refs] + next2[-2*refs])>>1;
575 int f = (prev2[+2*refs] + next2[+2*refs])>>1;
577 /* Find the median value */
578 int max = MAX3(d-e, d-c, MIN(b-c, f-e));
579 int min = MIN3(d-e, d-c, MAX(b-c, f-e));
580 diff = MAX3( diff, min, -max );
582 if( spatial_pred > d + diff )
584 spatial_pred = d + diff;
586 else if( spatial_pred < d - diff )
588 spatial_pred = d - diff;
591 dst[0] = spatial_pred;
602 static void yadif_filter( uint8_t ** dst,
605 hb_filter_private_t * pv )
608 int is_combed = tritical_detect_comb( pv );
612 pv->yadif_deinterlaced_frames++;
614 else if( is_combed == 2 )
616 pv->blend_deinterlaced_frames++;
620 pv->unfiltered_frames++;
624 for( i = 0; i < 3; i++ )
626 int w = pv->width[i];
627 int h = pv->height[i];
628 int ref_stride = pv->ref_stride[i];
631 for( y = 0; y < h; y++ )
633 if( ( pv->mode == 4 && is_combed ) || is_combed == 2 )
635 uint8_t *prev = &pv->ref[0][i][y*ref_stride];
636 uint8_t *cur = &pv->ref[1][i][y*ref_stride];
637 uint8_t *next = &pv->ref[2][i][y*ref_stride];
638 uint8_t *dst2 = &dst[i][y*w];
640 blend_filter_line( dst2, cur, i, y, pv );
642 else if( (y ^ parity) & 1 && is_combed == 1 )
644 uint8_t *prev = &pv->ref[0][i][y*ref_stride];
645 uint8_t *cur = &pv->ref[1][i][y*ref_stride];
646 uint8_t *next = &pv->ref[2][i][y*ref_stride];
647 uint8_t *dst2 = &dst[i][y*w];
649 yadif_filter_line( dst2, prev, cur, next, i, parity ^ tff, y, pv );
653 memcpy( &dst[i][y*w],
654 &pv->ref[1][i][y*ref_stride],
655 w * sizeof(uint8_t) );
661 static void mcdeint_filter( uint8_t ** dst,
664 hb_filter_private_t * pv )
669 #ifdef SUPPRESS_AV_LOG
670 /* TODO: temporarily change log level to suppress obnoxious debug output */
671 int loglevel = av_log_get_level();
672 av_log_set_level( AV_LOG_QUIET );
677 pv->mcdeint_frame->data[i] = src[i];
678 pv->mcdeint_frame->linesize[i] = pv->width[i];
680 pv->mcdeint_avctx_enc->me_cmp = FF_CMP_SAD;
681 pv->mcdeint_avctx_enc->me_sub_cmp = FF_CMP_SAD;
682 pv->mcdeint_frame->quality = pv->mcdeint_qp * FF_QP2LAMBDA;
684 out_size = avcodec_encode_video( pv->mcdeint_avctx_enc,
686 pv->mcdeint_outbuf_size,
689 pv->mcdeint_frame_dec = pv->mcdeint_avctx_enc->coded_frame;
691 for( i = 0; i < 3; i++ )
693 int w = pv->width[i];
694 int h = pv->height[i];
695 int fils = pv->mcdeint_frame_dec->linesize[i];
696 int srcs = pv->width[i];
698 for( y = 0; y < h; y++ )
700 if( (y ^ parity) & 1 )
702 for( x = 0; x < w; x++ )
704 if( (x-2)+(y-1)*w >= 0 && (x+2)+(y+1)*w < w*h )
707 &pv->mcdeint_frame_dec->data[i][x + y*fils];
708 uint8_t * srcp = &src[i][x + y*srcs];
710 int diff0 = filp[-fils] - srcp[-srcs];
711 int diff1 = filp[+fils] - srcp[+srcs];
714 ABS(srcp[-srcs-1] - srcp[+srcs-1])
715 + ABS(srcp[-srcs ] - srcp[+srcs ])
716 + ABS(srcp[-srcs+1] - srcp[+srcs+1]) - 1;
720 #define MCDEINT_CHECK(j)\
721 { int score = ABS(srcp[-srcs-1+j] - srcp[+srcs-1-j])\
722 + ABS(srcp[-srcs +j] - srcp[+srcs -j])\
723 + ABS(srcp[-srcs+1+j] - srcp[+srcs+1-j]);\
724 if( score < spatial_score ) {\
725 spatial_score = score;\
726 diff0 = filp[-fils+j] - srcp[-srcs+j];\
727 diff1 = filp[+fils-j] - srcp[+srcs-j];
729 MCDEINT_CHECK(-1) MCDEINT_CHECK(-2) }} }}
730 MCDEINT_CHECK( 1) MCDEINT_CHECK( 2) }} }}
732 if(diff0 + diff1 > 0)
734 temp -= (diff0 + diff1 -
735 ABS( ABS(diff0) - ABS(diff1) ) / 2) / 2;
739 temp -= (diff0 + diff1 +
740 ABS( ABS(diff0) - ABS(diff1) ) / 2) / 2;
743 filp[0] = dst[i][x + y*w] =
744 temp > 255U ? ~(temp>>31) : temp;
749 pv->mcdeint_frame_dec->data[i][x + y*fils];
755 for( y = 0; y < h; y++ )
757 if( !((y ^ parity) & 1) )
759 for( x = 0; x < w; x++ )
761 pv->mcdeint_frame_dec->data[i][x + y*fils] =
762 dst[i][x + y*w]= src[i][x + y*srcs];
768 #ifdef SUPPRESS_AV_LOG
769 /* TODO: restore previous log level */
770 av_log_set_level(loglevel);
774 hb_filter_private_t * hb_decomb_init( int pix_fmt,
779 if( pix_fmt != PIX_FMT_YUV420P )
784 hb_filter_private_t * pv = calloc( 1, sizeof(struct hb_filter_private_s) );
786 pv->pix_fmt = pix_fmt;
788 pv->width[0] = width;
789 pv->height[0] = height;
790 pv->width[1] = pv->width[2] = width >> 1;
791 pv->height[1] = pv->height[2] = height >> 1;
793 int buf_size = 3 * width * height / 2;
794 pv->buf_out[0] = hb_buffer_init( buf_size );
795 pv->buf_out[1] = hb_buffer_init( buf_size );
796 pv->buf_settings = hb_buffer_init( 0 );
798 pv->yadif_deinterlaced_frames = 0;
799 pv->blend_deinterlaced_frames = 0;
800 pv->unfiltered_frames = 0;
804 pv->mode = MODE_DEFAULT;
805 pv->spatial_metric = 2;
806 pv->motion_threshold = 6;
807 pv->spatial_threshold = 9;
808 pv->block_threshold = 80;
809 pv->block_width = 16;
810 pv->block_height = 16;
812 pv->parity = PARITY_DEFAULT;
814 pv->mcdeint_mode = MCDEINT_MODE_DEFAULT;
815 pv->mcdeint_qp = MCDEINT_QP_DEFAULT;
819 sscanf( settings, "%d:%d:%d:%d:%d:%d:%d",
822 &pv->motion_threshold,
823 &pv->spatial_threshold,
824 &pv->block_threshold,
829 if( pv->mode == 2 || pv->mode == 3 )
831 pv->mcdeint_mode = 0;
834 /* Allocate yadif specific buffers */
836 for( i = 0; i < 3; i++ )
839 int w = ((width + 31) & (~31))>>is_chroma;
840 int h = ((height+6+ 31) & (~31))>>is_chroma;
842 pv->ref_stride[i] = w;
844 for( j = 0; j < 3; j++ )
846 pv->ref[j][i] = malloc( w*h*sizeof(uint8_t) ) + 3*w;
850 /* Allocate a buffer to store a comb mask. */
851 for( i = 0; i < 3; i++ )
854 int w = ((pv->width[0] + 31) & (~31))>>is_chroma;
855 int h = ((pv->height[0]+6+ 31) & (~31))>>is_chroma;
857 pv->mask[i] = malloc( w*h*sizeof(uint8_t) ) + 3*w;
860 /* Allocate mcdeint specific buffers */
861 if( pv->mcdeint_mode >= 0 )
864 avcodec_register_all();
866 AVCodec * enc = avcodec_find_encoder( CODEC_ID_SNOW );
869 for (i = 0; i < 3; i++ )
871 AVCodecContext * avctx_enc;
873 avctx_enc = pv->mcdeint_avctx_enc = avcodec_alloc_context();
875 avctx_enc->width = width;
876 avctx_enc->height = height;
877 avctx_enc->time_base = (AVRational){1,25}; // meaningless
878 avctx_enc->gop_size = 300;
879 avctx_enc->max_b_frames = 0;
880 avctx_enc->pix_fmt = PIX_FMT_YUV420P;
881 avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
882 avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
883 avctx_enc->global_quality = 1;
884 avctx_enc->flags2 = CODEC_FLAG2_MEMC_ONLY;
885 avctx_enc->me_cmp = FF_CMP_SAD; //SSE;
886 avctx_enc->me_sub_cmp = FF_CMP_SAD; //SSE;
887 avctx_enc->mb_cmp = FF_CMP_SSE;
889 switch( pv->mcdeint_mode )
894 avctx_enc->me_method = ME_UMH;
896 avctx_enc->flags |= CODEC_FLAG_4MV;
897 avctx_enc->dia_size =2;
899 avctx_enc->flags |= CODEC_FLAG_QPEL;
902 avcodec_open(avctx_enc, enc);
905 pv->mcdeint_frame = avcodec_alloc_frame();
906 pv->mcdeint_outbuf_size = width * height * 10;
907 pv->mcdeint_outbuf = malloc( pv->mcdeint_outbuf_size );
913 void hb_decomb_close( hb_filter_private_t * pv )
920 hb_log("decomb: yadif deinterlaced %i | blend deinterlaced %i | unfiltered %i | total %i", pv->yadif_deinterlaced_frames, pv->blend_deinterlaced_frames, pv->unfiltered_frames, pv->yadif_deinterlaced_frames + pv->blend_deinterlaced_frames + pv->unfiltered_frames);
922 /* Cleanup frame buffers */
925 hb_buffer_close( &pv->buf_out[0] );
929 hb_buffer_close( &pv->buf_out[1] );
931 if (pv->buf_settings )
933 hb_buffer_close( &pv->buf_settings );
936 /* Cleanup yadif specific buffers */
938 for( i = 0; i<3*3; i++ )
940 uint8_t **p = &pv->ref[i%3][i/3];
943 free( *p - 3*pv->ref_stride[i/3] );
948 /* Cleanup combing mask. */
949 for( i = 0; i<3*3; i++ )
951 uint8_t **p = &pv->mask[i/3];
954 free( *p - 3*pv->ref_stride[i/3] );
959 /* Cleanup mcdeint specific buffers */
960 if( pv->mcdeint_mode >= 0 )
962 if( pv->mcdeint_avctx_enc )
964 avcodec_close( pv->mcdeint_avctx_enc );
965 av_freep( &pv->mcdeint_avctx_enc );
967 if( pv->mcdeint_outbuf )
969 free( pv->mcdeint_outbuf );
976 int hb_decomb_work( const hb_buffer_t * cbuf_in,
977 hb_buffer_t ** buf_out,
981 hb_filter_private_t * pv )
983 hb_buffer_t * buf_in = (hb_buffer_t *)cbuf_in;
986 pix_fmt != pv->pix_fmt ||
987 width != pv->width[0] ||
988 height != pv->height[0] )
990 return FILTER_FAILED;
993 avpicture_fill( &pv->pic_in, buf_in->data,
994 pix_fmt, width, height );
996 /* Determine if top-field first layout */
1000 tff = !!(buf_in->flags & PIC_FLAG_TOP_FIELD_FIRST);
1004 tff = (pv->parity & 1) ^ 1;
1007 /* Store current frame in yadif cache */
1008 store_ref( (const uint8_t**)pv->pic_in.data, pv );
1010 /* If yadif is not ready, store another ref and return FILTER_DELAY */
1011 if( pv->yadif_ready == 0 )
1013 store_ref( (const uint8_t**)pv->pic_in.data, pv );
1015 hb_buffer_copy_settings( pv->buf_settings, buf_in );
1017 /* don't let 'work_loop' send a chapter mark upstream */
1018 buf_in->new_chap = 0;
1020 pv->yadif_ready = 1;
1022 return FILTER_DELAY;
1025 /* Perform yadif filtering */
1027 for( frame = 0; frame <= ( ( pv->mode == 2 || pv->mode == 3 )? 1 : 0 ) ; frame++ )
1029 int parity = frame ^ tff ^ 1;
1031 avpicture_fill( &pv->pic_out, pv->buf_out[!(frame^1)]->data,
1032 pix_fmt, width, height );
1034 yadif_filter( pv->pic_out.data, parity, tff, pv );
1036 if( pv->mcdeint_mode >= 0 )
1038 /* Perform mcdeint filtering */
1039 avpicture_fill( &pv->pic_in, pv->buf_out[(frame^1)]->data,
1040 pix_fmt, width, height );
1042 mcdeint_filter( pv->pic_in.data, pv->pic_out.data, parity, pv );
1045 *buf_out = pv->buf_out[!(frame^1)];
1048 /* Copy buffered settings to output buffer settings */
1049 hb_buffer_copy_settings( *buf_out, pv->buf_settings );
1051 /* Replace buffered settings with input buffer settings */
1052 hb_buffer_copy_settings( pv->buf_settings, buf_in );
1054 /* don't let 'work_loop' send a chapter mark upstream */
1055 buf_in->new_chap = 0;