OSDN Git Service

Much improved decomb filter. Totally different algorithm, with a temporal element...
[handbrake-jp/handbrake-jp-git.git] / libhb / decomb.c
1 /* $Id: decomb.c,v 1.14 2008/04/25 5:00:00 jbrjake Exp $
2
3    This file is part of the HandBrake source code.
4    Homepage: <http://handbrake.fr/>.
5    It may be used under the terms of the GNU General Public License. 
6    
7    The yadif algorithm was created by Michael Niedermayer. */
8 #include "hb.h"
9 #include "libavcodec/avcodec.h"
10 #include "mpeg2dec/mpeg2.h"
11
12 #define SUPPRESS_AV_LOG
13
14 #define MODE_DEFAULT     1
15 #define PARITY_DEFAULT   -1
16
17 #define MCDEINT_MODE_DEFAULT   -1
18 #define MCDEINT_QP_DEFAULT      1
19
20 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
21 #define MIN3(a,b,c) MIN(MIN(a,b),c)
22 #define MAX3(a,b,c) MAX(MAX(a,b),c)
23
24 struct hb_filter_private_s
25 {
26     int              pix_fmt;
27     int              width[3];
28     int              height[3];
29
30     int              mode;
31     int              spatial_metric;
32     int              motion_threshold;
33     int              spatial_threshold;
34     int              block_threshold;
35     int              block_width;
36     int              block_height;
37
38     int              parity;
39     
40     int              yadif_ready;
41
42     int              mcdeint_mode;
43     int              mcdeint_qp;
44
45     int              mcdeint_outbuf_size;
46     uint8_t        * mcdeint_outbuf;
47     AVCodecContext * mcdeint_avctx_enc;
48     AVFrame        * mcdeint_frame;
49     AVFrame        * mcdeint_frame_dec;
50
51     int              yadif_deinterlaced_frames;
52     int              blend_deinterlaced_frames;
53     int              unfiltered_frames;
54
55     uint8_t        * ref[4][3];
56     int              ref_stride[3];
57
58     /* Make a buffer to store a comb mask. */
59     uint8_t        * mask[3];
60
61     AVPicture        pic_in;
62     AVPicture        pic_out;
63     hb_buffer_t *    buf_out[2];
64     hb_buffer_t *    buf_settings;
65 };
66
67 hb_filter_private_t * hb_decomb_init( int pix_fmt,
68                                            int width,
69                                            int height,
70                                            char * settings );
71
72 int hb_decomb_work(      const hb_buffer_t * buf_in,
73                          hb_buffer_t ** buf_out,
74                          int pix_fmt,
75                          int width,
76                          int height,
77                          hb_filter_private_t * pv );
78
79 void hb_decomb_close( hb_filter_private_t * pv );
80
81 hb_filter_object_t hb_filter_decomb =
82 {
83     FILTER_DECOMB,
84     "Deinterlaces selectively with yadif/mcdeint or lowpass5 blending",
85     NULL,
86     hb_decomb_init,
87     hb_decomb_work,
88     hb_decomb_close,
89 };
90
91 int cubic_interpolate( int y0, int y1, int y2, int y3 )
92 {
93     /* From http://www.neuron2.net/library/cubicinterp.html */
94     int result = ( y0 * -3 ) + ( y1 * 23 ) + ( y2 * 23 ) + ( y3 * -3 );
95     result /= 40;
96     
97     if( result > 255 )
98     {
99         result = 255;
100     }
101     else if( result < 0 )
102     {
103         result = 0;
104     }
105     
106     return result;
107 }
108
109 static void store_ref( const uint8_t ** pic,
110                              hb_filter_private_t * pv )
111 {
112     memcpy( pv->ref[3],
113             pv->ref[0],
114             sizeof(uint8_t *)*3 );
115
116     memmove( pv->ref[0],
117              pv->ref[1],
118              sizeof(uint8_t *)*3*3 );
119
120     int i;
121     for( i = 0; i < 3; i++ )
122     {
123         const uint8_t * src = pic[i];
124         uint8_t * ref = pv->ref[2][i];
125
126         int w = pv->width[i];
127         int h = pv->height[i];
128         int ref_stride = pv->ref_stride[i];
129
130         int y;
131         for( y = 0; y < pv->height[i]; y++ )
132         {
133             memcpy(ref, src, w);
134             src = (uint8_t*)src + w;
135             ref = (uint8_t*)ref + ref_stride;
136         }
137     }
138 }
139
140 static void get_ref( uint8_t ** pic, hb_filter_private_t * pv, int frm )
141 {
142     int i;
143     for( i = 0; i < 3; i++ )
144     {
145         uint8_t * dst = pic[i];
146         const uint8_t * ref = pv->ref[frm][i];
147         int w = pv->width[i];
148         int ref_stride = pv->ref_stride[i];
149         
150         int y;
151         for( y = 0; y < pv->height[i]; y++ )
152         {
153             memcpy(dst, ref, w);
154             dst += w;
155             ref += ref_stride;
156         }
157     }
158 }
159
160 int blend_filter_pixel( int up2, int up1, int current, int down1, int down2 )
161 {
162     /* Low-pass 5-tap filter */
163     int result = 0;
164     result += -up2;
165     result += up1 * 2;
166     result += current * 6;
167     result += down1 *2;
168     result += -down2;
169     result /= 8;
170
171     if( result > 255 )
172     {
173         result = 255;
174     }
175     if( result < 0 )
176     {
177         result = 0;
178     }
179     
180     return result;
181 }
182
183 static void blend_filter_line( uint8_t *dst,
184                                uint8_t *cur,
185                                int plane,
186                                int y,
187                                hb_filter_private_t * pv )
188 {
189     int w = pv->width[plane];
190     int refs = pv->ref_stride[plane];
191     int x;
192
193     for( x = 0; x < w; x++)
194     {
195         int a, b, c, d, e;
196         
197         a = cur[-2*refs];
198         b = cur[-refs];
199         c = cur[0];
200         d = cur[+refs];
201         e = cur[2*refs];
202         
203         if( y == 0 )
204         {
205             /* First line, so A and B don't exist.*/
206             a = cur[0];
207             b = cur[0];
208         }
209         else if( y == 1 )
210         {
211             /* Second line, no A. */
212             a = cur[-refs];
213         }
214         else if( y == (pv->height[plane] - 2) )
215         {
216             /* Second to last line, no E. */
217             e = cur[+refs];
218         }
219         else if( y == (pv->height[plane] -1) )
220         {
221             /* Last line, no D or E. */
222             d = cur[0];
223             e = cur[0];
224         }
225                 
226         dst[0] = blend_filter_pixel( a, b, c, d, e );
227
228         dst++;
229         cur++;
230     }
231 }
232
233 int check_combing_mask( hb_filter_private_t * pv )
234 {
235     /* Go through the mask in X*Y blocks. If any of these windows
236        have threshold or more combed pixels, consider the whole
237        frame to be combed and send it on to be deinterlaced.     */
238
239     /* Block mask threshold -- The number of pixels
240        in a block_width * block_height window of
241        he mask that need to show combing for the
242        whole frame to be seen as such.            */
243     int threshold       = pv->block_threshold;
244     int block_width     = pv->block_width;
245     int block_height    = pv->block_height;
246     int block_x, block_y;
247     int block_score = 0; int send_to_blend = 0;
248     
249     int x, y, k;
250
251     for( k = 0; k < 1; k++ )
252     {
253         int ref_stride = pv->ref_stride[k];
254         for( y = 0; y < ( pv->height[k] - block_height ); y = y + block_height )
255         {
256             for( x = 0; x < ( pv->width[k] - block_width ); x = x + block_width )
257             {
258                 block_score = 0;
259                 for( block_y = 0; block_y < block_height; block_y++ )
260                 {
261                     for( block_x = 0; block_x < block_width; block_x++ )
262                     {
263                         int mask_y = y + block_y;
264                         int mask_x = x + block_x;
265                         
266                         /* We only want to mark a pixel in a block as combed
267                            if the pixels above and below are as well. Got to
268                            handle the top and bottom lines separately.       */
269                         if( y + block_y == 0 )
270                         {
271                             if( pv->mask[k][mask_y*ref_stride+mask_x    ] == 255 &&
272                                 pv->mask[k][mask_y*ref_stride+mask_x + 1] == 255 )
273                                     block_score++;
274                         }
275                         else if( y + block_y == pv->height[k] - 1 )
276                         {
277                             if( pv->mask[k][mask_y*ref_stride+mask_x - 1] == 255 &&
278                                 pv->mask[k][mask_y*ref_stride+mask_x    ] == 255 )
279                                     block_score++;
280                         }
281                         else
282                         {
283                             if( pv->mask[k][mask_y*ref_stride+mask_x - 1] == 255 &&
284                                 pv->mask[k][mask_y*ref_stride+mask_x    ] == 255 &&
285                                 pv->mask[k][mask_y*ref_stride+mask_x + 1] == 255 )
286                                     block_score++;
287                         } 
288                     }
289                 }
290
291                 if( block_score >= ( threshold / 2 ) )
292                 {
293 #if 0
294                     hb_log("decomb: frame %i | score %i | type %s", pv->yadif_deinterlaced_frames + pv->blend_deinterlaced_frames +  pv->unfiltered_frames + 1, block_score, pv->buf_settings->flags & 16 ? "Film" : "Video");
295 #endif
296                     if ( block_score <= threshold && !( pv->buf_settings->flags & 16) )
297                     {
298                         /* Blend video content that scores between
299                            ( threshold / 2 ) and threshold.        */
300                         send_to_blend = 1;
301                     }
302                     else if( block_score > threshold )
303                     {
304                         if( pv->buf_settings->flags & 16 )
305                         {
306                             /* Blend progressive content above the threshold.*/
307                             return 2;
308                         }
309                         else
310                         {
311                             /* Yadif deinterlace video content above the threshold. */
312                             return 1;
313                         }
314                     }
315                 }
316             }
317         } 
318     }
319     
320     if( send_to_blend )
321     {
322         return 2;
323     }
324     else
325     {
326         /* Consider this frame to be uncombed. */
327         return 0;
328     }
329 }
330
331 int tritical_detect_comb( hb_filter_private_t * pv )
332 {
333     /* A mish-mash of various comb detection tricks
334        picked up from neuron2's Decomb plugin for
335        AviSynth and tritical's IsCombedT and
336        IsCombedTIVTC plugins.                       */
337        
338     int x, y, k, width, height;
339     
340     /* Comb scoring algorithm */
341     int spatial_metric  = pv->spatial_metric;
342     /* Motion threshold */
343     int mthresh         = pv->motion_threshold;
344     /* Spatial threshold */
345     int athresh         = pv->spatial_threshold;
346     int athresh_squared = athresh * athresh;
347     int athresh6        = 6 *athresh;
348
349     /* One pas for Y, one pass for Cb, one pass for Cr */    
350     for( k = 0; k < 1; k++ )
351     {
352         int ref_stride  = pv->ref_stride[k];
353         width           = pv->width[k];
354         height          = pv->height[k];
355
356         for( y = 2; y < ( height - 2 ); y++ )
357         {
358             /* These are just to make the buffer locations easier to read. */
359             int back_2    = ( y - 2 )*ref_stride ;
360             int back_1    = ( y - 1 )*ref_stride;
361             int current   =         y*ref_stride;
362             int forward_1 = ( y + 1 )*ref_stride;
363             int forward_2 = ( y + 2 )*ref_stride;
364             
365             /* We need to examine a column of 5 pixels
366                in the prev, cur, and next frames.      */
367             uint8_t previous_frame[5];
368             uint8_t current_frame[5];
369             uint8_t next_frame[5];
370             
371             for( x = 0; x < width; x++ )
372             {
373                 /* Fill up the current frame array with the current pixel values.*/
374                 current_frame[0] = pv->ref[1][k][back_2    + x];
375                 current_frame[1] = pv->ref[1][k][back_1    + x];
376                 current_frame[2] = pv->ref[1][k][current   + x];
377                 current_frame[3] = pv->ref[1][k][forward_1 + x];
378                 current_frame[4] = pv->ref[1][k][forward_2 + x];
379
380                 int up_diff   = current_frame[2] - current_frame[1];
381                 int down_diff = current_frame[2] - current_frame[3];
382
383                 if( ( up_diff >  athresh && down_diff >  athresh ) ||
384                     ( up_diff < -athresh && down_diff < -athresh ) )
385                 {
386                     /* The pixel above and below are different,
387                        and they change in the same "direction" too.*/
388                     int motion = 0;
389                     if( mthresh > 0 )
390                     {
391                         /* Make sure there's sufficient motion between frame t-1 to frame t+1. */
392                         previous_frame[0] = pv->ref[0][k][back_2    + x];
393                         previous_frame[1] = pv->ref[0][k][back_1    + x];
394                         previous_frame[2] = pv->ref[0][k][current   + x];
395                         previous_frame[3] = pv->ref[0][k][forward_1 + x];
396                         previous_frame[4] = pv->ref[0][k][forward_2 + x];
397                         next_frame[0]     = pv->ref[2][k][back_2    + x];
398                         next_frame[1]     = pv->ref[2][k][back_1    + x];
399                         next_frame[2]     = pv->ref[2][k][current   + x];
400                         next_frame[3]     = pv->ref[2][k][forward_1 + x];
401                         next_frame[4]     = pv->ref[2][k][forward_2 + x];
402                         
403                         if( abs( previous_frame[2] - current_frame[2] ) > mthresh &&
404                             abs(  current_frame[1] - next_frame[1]    ) > mthresh &&
405                             abs(  current_frame[3] - next_frame[3]    ) > mthresh )
406                                 motion++;
407                         if( abs(     next_frame[2] - current_frame[2] ) > mthresh &&
408                             abs( previous_frame[1] - current_frame[1] ) > mthresh &&
409                             abs( previous_frame[3] - current_frame[3] ) > mthresh )
410                                 motion++;
411                     }
412                     else
413                     {
414                         /* User doesn't want to check for motion,
415                            so move on to the spatial check.       */
416                         motion = 1;
417                     }
418                            
419                     if( motion || ( pv->yadif_deinterlaced_frames==0 && pv->blend_deinterlaced_frames==0 && pv->unfiltered_frames==0) )
420                     {
421                            /*That means it's time for the spatial check.
422                            We've got several options here.             */
423                         if( spatial_metric == 0 )
424                         {
425                             /* Simple 32detect style comb detection */
426                             if( ( abs( current_frame[2] - current_frame[4] ) < 10  ) &&
427                                 ( abs( current_frame[2] - current_frame[3] ) > 15 ) )
428                             {
429                                 pv->mask[k][y*ref_stride + x] = 255;
430                             }
431                             else
432                             {
433                                 pv->mask[k][y*ref_stride + x] = 0;
434                             }
435                         }
436                         else if( spatial_metric == 1 )
437                         {
438                             /* This, for comparison, is what IsCombed uses.
439                                It's better, but still noise senstive.      */
440                                int combing = ( current_frame[1] - current_frame[2] ) *
441                                              ( current_frame[3] - current_frame[2] );
442                                
443                                if( combing > athresh_squared )
444                                    pv->mask[k][y*ref_stride + x] = 255; 
445                                else
446                                    pv->mask[k][y*ref_stride + x] = 0;
447                         }
448                         else if( spatial_metric == 2 )
449                         {
450                             /* Tritical's noise-resistant combing scorer.
451                                The check is done on a bob+blur convolution. */
452                             int combing = abs( current_frame[0]
453                                              + ( 4 * current_frame[2] )
454                                              + current_frame[4]
455                                              - ( 3 * ( current_frame[1]
456                                                      + current_frame[3] ) ) );
457
458                             /* If the frame is sufficiently combed,
459                                then mark it down on the mask as 255. */
460                             if( combing > athresh6 )
461                                 pv->mask[k][y*ref_stride + x] = 255; 
462                             else
463                                 pv->mask[k][y*ref_stride + x] = 0;
464                         }
465                     }
466                     else
467                     {
468                         pv->mask[k][y*ref_stride + x] = 0;
469                     }
470                 }
471                 else
472                 {
473                     pv->mask[k][y*ref_stride + x] = 0;
474                 }
475             }
476         }
477     }
478     
479     return check_combing_mask( pv );
480 }
481
482 static void yadif_filter_line( uint8_t *dst,
483                                uint8_t *prev,
484                                uint8_t *cur,
485                                uint8_t *next,
486                                int plane,
487                                int parity,
488                                int y,
489                                hb_filter_private_t * pv )
490 {
491     uint8_t *prev2 = parity ? prev : cur ;
492     uint8_t *next2 = parity ? cur  : next;
493
494     int w = pv->width[plane];
495     int refs = pv->ref_stride[plane];
496     int x;
497     
498     for( x = 0; x < w; x++)
499     {
500         /* Pixel above*/
501         int c              = cur[-refs];
502         /* Temporal average -- the current pixel location in the previous and next fields */
503         int d              = (prev2[0] + next2[0])>>1;
504         /* Pixel below */
505         int e              = cur[+refs];
506         
507         /* How the current pixel changes from the field before to the field after */
508         int temporal_diff0 = ABS(prev2[0] - next2[0]);
509         /* The average of how much the pixels above and below change from the field before to now. */
510         int temporal_diff1 = ( ABS(prev[-refs] - cur[-refs]) + ABS(prev[+refs] - cur[+refs]) ) >> 1;
511         /* The average of how much the pixels above and below change from now to the next field. */
512         int temporal_diff2 = ( ABS(next[-refs] - cur[-refs]) + ABS(next[+refs] - cur[+refs]) ) >> 1;
513         /* For the actual difference, use the largest of the previous average diffs. */
514         int diff           = MAX3(temporal_diff0>>1, temporal_diff1, temporal_diff2);
515         
516         /* SAD of how the pixel-1, the pixel, and the pixel+1 change from the line above to below. */ 
517         int spatial_score  = ABS(cur[-refs-1] - cur[+refs-1]) + ABS(cur[-refs]-cur[+refs]) +
518                                      ABS(cur[-refs+1] - cur[+refs+1]) - 1;         
519         int spatial_pred;
520          
521         /* Spatial pred is either a bilinear or cubic vertical interpolation. */
522         if( pv->mode > 0 )
523         {
524             spatial_pred = cubic_interpolate( cur[-3*refs], cur[-refs], cur[+refs], cur[3*refs] );
525         }
526         else
527         {
528             spatial_pred = (c+e)>>1;
529         }
530
531 /* EDDI: Edge Directed Deinterlacing Interpolation
532    Uses the Martinez-Lim Line Shift Parametric Modeling algorithm...I think.
533    Checks 4 different slopes to see if there is more similarity along a diagonal
534    than there was vertically. If a diagonal is more similar, then it indicates
535    an edge, so interpolate along that instead of a vertical line, using either
536    linear or cubic interpolation depending on mode. */
537 #define YADIF_CHECK(j)\
538         {   int score = ABS(cur[-refs-1+j] - cur[+refs-1-j])\
539                       + ABS(cur[-refs  +j] - cur[+refs  -j])\
540                       + ABS(cur[-refs+1+j] - cur[+refs+1-j]);\
541             if( score < spatial_score ){\
542                 spatial_score = score;\
543                 if( pv->mode > 0 )\
544                 {\
545                     switch(j)\
546                     {\
547                         case -1:\
548                             spatial_pred = cubic_interpolate(cur[-3 * refs - 3], cur[-refs -1], cur[+refs + 1], cur[3* refs + 3] );\
549                         break;\
550                         case -2:\
551                             spatial_pred = cubic_interpolate( ( ( cur[-3*refs - 4] + cur[-refs - 4] ) / 2 ) , cur[-refs -2], cur[+refs + 2], ( ( cur[3*refs + 4] + cur[refs + 4] ) / 2 ) );\
552                         break;\
553                         case 1:\
554                             spatial_pred = cubic_interpolate(cur[-3 * refs +3], cur[-refs +1], cur[+refs - 1], cur[3* refs -3] );\
555                         break;\
556                         case 2:\
557                             spatial_pred = cubic_interpolate(( ( cur[-3*refs + 4] + cur[-refs + 4] ) / 2 ), cur[-refs +2], cur[+refs - 2], ( ( cur[3*refs - 4] + cur[refs - 4] ) / 2 ) );\
558                         break;\
559                     }\
560                 }\
561                 else\
562                 {\
563                     spatial_pred = ( cur[-refs +j] + cur[+refs -j] ) >>1;\
564                 }\
565                 
566                 YADIF_CHECK(-1) YADIF_CHECK(-2) }} }}
567                 YADIF_CHECK( 1) YADIF_CHECK( 2) }} }}
568                                 
569         /* Temporally adjust the spatial prediction by comparing
570            against fields in the previous and next frames.       */
571         int b = (prev2[-2*refs] + next2[-2*refs])>>1;
572         int f = (prev2[+2*refs] + next2[+2*refs])>>1;
573         
574         /* Find the median value */
575         int max = MAX3(d-e, d-c, MIN(b-c, f-e));
576         int min = MIN3(d-e, d-c, MAX(b-c, f-e));
577         diff = MAX3( diff, min, -max );
578         
579         if( spatial_pred > d + diff )
580         {
581             spatial_pred = d + diff;
582         }
583         else if( spatial_pred < d - diff )
584         {
585             spatial_pred = d - diff;
586         }
587         
588         dst[0] = spatial_pred;
589                         
590         dst++;
591         cur++;
592         prev++;
593         next++;
594         prev2++;
595         next2++;
596     }
597 }
598
599 static void yadif_filter( uint8_t ** dst,
600                           int parity,
601                           int tff,
602                           hb_filter_private_t * pv )
603 {
604     
605     int is_combed = tritical_detect_comb( pv );
606     
607     if( is_combed == 1 )
608     {
609         pv->yadif_deinterlaced_frames++;
610     }
611     else if( is_combed == 2 )
612     {
613         pv->blend_deinterlaced_frames++;
614     }
615     else
616     {
617         pv->unfiltered_frames++;
618     }
619
620     int i;
621     for( i = 0; i < 3; i++ )
622     {
623         int w = pv->width[i];
624         int h = pv->height[i];
625         int ref_stride = pv->ref_stride[i];        
626         
627         int y;
628         for( y = 0; y < h; y++ )
629         {
630             if( ( pv->mode == 4 && is_combed ) || is_combed == 2 )
631             {
632                 uint8_t *prev = &pv->ref[0][i][y*ref_stride];
633                 uint8_t *cur  = &pv->ref[1][i][y*ref_stride];
634                 uint8_t *next = &pv->ref[2][i][y*ref_stride];
635                 uint8_t *dst2 = &dst[i][y*w];
636
637                 blend_filter_line( dst2, cur, i, y, pv );
638             }
639             else if( (y ^ parity) & 1 && is_combed == 1 )
640             {
641                 uint8_t *prev = &pv->ref[0][i][y*ref_stride];
642                 uint8_t *cur  = &pv->ref[1][i][y*ref_stride];
643                 uint8_t *next = &pv->ref[2][i][y*ref_stride];
644                 uint8_t *dst2 = &dst[i][y*w];
645
646                 yadif_filter_line( dst2, prev, cur, next, i, parity ^ tff, y, pv );
647             }
648             else
649             {
650                 memcpy( &dst[i][y*w],
651                         &pv->ref[1][i][y*ref_stride],
652                         w * sizeof(uint8_t) );              
653             }
654         }
655     }
656 }
657
658 static void mcdeint_filter( uint8_t ** dst,
659                             uint8_t ** src,
660                             int parity,
661                             hb_filter_private_t * pv )
662 {
663     int x, y, i;
664     int out_size;
665
666 #ifdef SUPPRESS_AV_LOG
667     /* TODO: temporarily change log level to suppress obnoxious debug output */
668     int loglevel = av_log_get_level();
669     av_log_set_level( AV_LOG_QUIET );
670 #endif
671
672     for( i=0; i<3; i++ )
673     {
674         pv->mcdeint_frame->data[i] = src[i];
675         pv->mcdeint_frame->linesize[i] = pv->width[i];
676     }
677     pv->mcdeint_avctx_enc->me_cmp     = FF_CMP_SAD;
678     pv->mcdeint_avctx_enc->me_sub_cmp = FF_CMP_SAD;
679     pv->mcdeint_frame->quality        = pv->mcdeint_qp * FF_QP2LAMBDA;
680
681     out_size = avcodec_encode_video( pv->mcdeint_avctx_enc,
682                                      pv->mcdeint_outbuf,
683                                      pv->mcdeint_outbuf_size,
684                                      pv->mcdeint_frame );
685
686     pv->mcdeint_frame_dec = pv->mcdeint_avctx_enc->coded_frame;
687
688     for( i = 0; i < 3; i++ )
689     {
690         int w    = pv->width[i];
691         int h    = pv->height[i];
692         int fils = pv->mcdeint_frame_dec->linesize[i];
693         int srcs = pv->width[i];
694
695         for( y = 0; y < h; y++ )
696         {
697             if( (y ^ parity) & 1 )
698             {
699                 for( x = 0; x < w; x++ )
700                 {
701                     if( (x-2)+(y-1)*w >= 0 && (x+2)+(y+1)*w < w*h )
702                     {
703                         uint8_t * filp =
704                             &pv->mcdeint_frame_dec->data[i][x + y*fils];
705                         uint8_t * srcp = &src[i][x + y*srcs];
706
707                         int diff0 = filp[-fils] - srcp[-srcs];
708                         int diff1 = filp[+fils] - srcp[+srcs];
709
710                         int spatial_score =
711                               ABS(srcp[-srcs-1] - srcp[+srcs-1])
712                             + ABS(srcp[-srcs  ] - srcp[+srcs  ])
713                             + ABS(srcp[-srcs+1] - srcp[+srcs+1]) - 1;
714
715                         int temp = filp[0];
716
717 #define MCDEINT_CHECK(j)\
718                         {   int score = ABS(srcp[-srcs-1+j] - srcp[+srcs-1-j])\
719                                       + ABS(srcp[-srcs  +j] - srcp[+srcs  -j])\
720                                       + ABS(srcp[-srcs+1+j] - srcp[+srcs+1-j]);\
721                             if( score < spatial_score ) {\
722                                 spatial_score = score;\
723                                 diff0 = filp[-fils+j] - srcp[-srcs+j];\
724                                 diff1 = filp[+fils-j] - srcp[+srcs-j];
725
726                         MCDEINT_CHECK(-1) MCDEINT_CHECK(-2) }} }}
727                         MCDEINT_CHECK( 1) MCDEINT_CHECK( 2) }} }}
728
729                         if(diff0 + diff1 > 0)
730                         {
731                             temp -= (diff0 + diff1 -
732                                      ABS( ABS(diff0) - ABS(diff1) ) / 2) / 2;
733                         }
734                         else
735                         {
736                             temp -= (diff0 + diff1 +
737                                      ABS( ABS(diff0) - ABS(diff1) ) / 2) / 2;
738                         }
739
740                         filp[0] = dst[i][x + y*w] =
741                             temp > 255U ? ~(temp>>31) : temp;
742                     }
743                     else
744                     {
745                         dst[i][x + y*w] =
746                             pv->mcdeint_frame_dec->data[i][x + y*fils];
747                     }
748                 }
749             }
750         }
751
752         for( y = 0; y < h; y++ )
753         {
754             if( !((y ^ parity) & 1) )
755             {
756                 for( x = 0; x < w; x++ )
757                 {
758                     pv->mcdeint_frame_dec->data[i][x + y*fils] =
759                         dst[i][x + y*w]= src[i][x + y*srcs];
760                 }
761             }
762         }
763     }
764
765 #ifdef SUPPRESS_AV_LOG
766     /* TODO: restore previous log level */
767     av_log_set_level(loglevel);
768 #endif
769 }
770
771 hb_filter_private_t * hb_decomb_init( int pix_fmt,
772                                            int width,
773                                            int height,
774                                            char * settings )
775 {
776     if( pix_fmt != PIX_FMT_YUV420P )
777     {
778         return 0;
779     }
780
781     hb_filter_private_t * pv = calloc( 1, sizeof(struct hb_filter_private_s) );
782
783     pv->pix_fmt = pix_fmt;
784
785     pv->width[0]  = width;
786     pv->height[0] = height;
787     pv->width[1]  = pv->width[2]  = width >> 1;
788     pv->height[1] = pv->height[2] = height >> 1;
789
790     int buf_size = 3 * width * height / 2;
791     pv->buf_out[0] = hb_buffer_init( buf_size );
792     pv->buf_out[1] = hb_buffer_init( buf_size );
793     pv->buf_settings = hb_buffer_init( 0 );
794
795     pv->yadif_deinterlaced_frames = 0;
796     pv->blend_deinterlaced_frames = 0;
797     pv->unfiltered_frames = 0;
798
799     pv->yadif_ready    = 0;
800
801     pv->mode     = MODE_DEFAULT;
802     pv->spatial_metric = 2;
803     pv->motion_threshold = 6;
804     pv->spatial_threshold = 9;
805     pv->block_threshold = 80;
806     pv->block_width = 16;
807     pv->block_height = 16;
808     
809     pv->parity   = PARITY_DEFAULT;
810
811     pv->mcdeint_mode   = MCDEINT_MODE_DEFAULT;
812     pv->mcdeint_qp     = MCDEINT_QP_DEFAULT;
813
814     if( settings )
815     {
816         sscanf( settings, "%d:%d:%d:%d:%d:%d:%d",
817                 &pv->mode,
818                 &pv->spatial_metric,
819                 &pv->motion_threshold,
820                 &pv->spatial_threshold,
821                 &pv->block_threshold,
822                 &pv->block_width,
823                 &pv->block_height );
824     }
825
826     if( pv->mode == 2 || pv->mode == 3 )
827     {
828         pv->mcdeint_mode = 0;
829     }
830     
831     /* Allocate yadif specific buffers */
832     int i, j;
833     for( i = 0; i < 3; i++ )
834     {
835         int is_chroma = !!i;
836         int w = ((width   + 31) & (~31))>>is_chroma;
837         int h = ((height+6+ 31) & (~31))>>is_chroma;
838
839         pv->ref_stride[i] = w;
840
841         for( j = 0; j < 3; j++ )
842         {
843             pv->ref[j][i] = malloc( w*h*sizeof(uint8_t) ) + 3*w;
844         }
845     }
846
847     /* Allocate a buffer to store a comb mask. */
848     for( i = 0; i < 3; i++ )
849     {
850         int is_chroma = !!i;
851         int w = ((pv->width[0]   + 31) & (~31))>>is_chroma;
852         int h = ((pv->height[0]+6+ 31) & (~31))>>is_chroma;
853
854         pv->mask[i] = malloc( w*h*sizeof(uint8_t) ) + 3*w;
855     }
856
857     /* Allocate mcdeint specific buffers */
858     if( pv->mcdeint_mode >= 0 )
859     {
860         avcodec_init();
861         avcodec_register_all();
862
863         AVCodec * enc = avcodec_find_encoder( CODEC_ID_SNOW );
864
865         int i;
866         for (i = 0; i < 3; i++ )
867         {
868             AVCodecContext * avctx_enc;
869
870             avctx_enc = pv->mcdeint_avctx_enc = avcodec_alloc_context();
871
872             avctx_enc->width                    = width;
873             avctx_enc->height                   = height;
874             avctx_enc->time_base                = (AVRational){1,25};  // meaningless
875             avctx_enc->gop_size                 = 300;
876             avctx_enc->max_b_frames             = 0;
877             avctx_enc->pix_fmt                  = PIX_FMT_YUV420P;
878             avctx_enc->flags                    = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
879             avctx_enc->strict_std_compliance    = FF_COMPLIANCE_EXPERIMENTAL;
880             avctx_enc->global_quality           = 1;
881             avctx_enc->flags2                   = CODEC_FLAG2_MEMC_ONLY;
882             avctx_enc->me_cmp                   = FF_CMP_SAD; //SSE;
883             avctx_enc->me_sub_cmp               = FF_CMP_SAD; //SSE;
884             avctx_enc->mb_cmp                   = FF_CMP_SSE;
885
886             switch( pv->mcdeint_mode )
887             {
888                 case 3:
889                     avctx_enc->refs = 3;
890                 case 2:
891                     avctx_enc->me_method = ME_UMH;
892                 case 1:
893                     avctx_enc->flags |= CODEC_FLAG_4MV;
894                     avctx_enc->dia_size =2;
895                 case 0:
896                     avctx_enc->flags |= CODEC_FLAG_QPEL;
897             }
898
899             avcodec_open(avctx_enc, enc);
900         }
901
902         pv->mcdeint_frame       = avcodec_alloc_frame();
903         pv->mcdeint_outbuf_size = width * height * 10;
904         pv->mcdeint_outbuf      = malloc( pv->mcdeint_outbuf_size );
905     }
906
907     return pv;
908 }
909
910 void hb_decomb_close( hb_filter_private_t * pv )
911 {
912     if( !pv )
913     {
914         return;
915     }
916     
917     hb_log("decomb: yadif deinterlaced %i | blend deinterlaced %i | unfiltered %i | total %i", pv->yadif_deinterlaced_frames, pv->blend_deinterlaced_frames, pv->unfiltered_frames, pv->yadif_deinterlaced_frames + pv->blend_deinterlaced_frames + pv->unfiltered_frames);
918
919     /* Cleanup frame buffers */
920     if( pv->buf_out[0] )
921     {
922         hb_buffer_close( &pv->buf_out[0] );
923     }
924     if( pv->buf_out[1] )
925     {
926         hb_buffer_close( &pv->buf_out[1] );
927     }
928     if (pv->buf_settings )
929     {
930         hb_buffer_close( &pv->buf_settings );
931     }
932
933     /* Cleanup yadif specific buffers */
934     int i;
935     for( i = 0; i<3*3; i++ )
936     {
937         uint8_t **p = &pv->ref[i%3][i/3];
938         if (*p)
939         {
940             free( *p - 3*pv->ref_stride[i/3] );
941             *p = NULL;
942         }
943     }
944     
945     /* Cleanup combing mask. */
946     for( i = 0; i<3*3; i++ )
947     {
948         uint8_t **p = &pv->mask[i/3];
949         if (*p)
950         {
951             free( *p - 3*pv->ref_stride[i/3] );
952             *p = NULL;
953         }
954     }
955     
956     /* Cleanup mcdeint specific buffers */
957     if( pv->mcdeint_mode >= 0 )
958     {
959         if( pv->mcdeint_avctx_enc )
960         {
961             avcodec_close( pv->mcdeint_avctx_enc );
962             av_freep( &pv->mcdeint_avctx_enc );
963         }
964         if( pv->mcdeint_outbuf )
965         {
966             free( pv->mcdeint_outbuf );
967         }
968     }
969
970     free( pv );
971 }
972
973 int hb_decomb_work( const hb_buffer_t * cbuf_in,
974                     hb_buffer_t ** buf_out,
975                     int pix_fmt,
976                     int width,
977                     int height,
978                     hb_filter_private_t * pv )
979 {
980     hb_buffer_t * buf_in = (hb_buffer_t *)cbuf_in;
981
982     if( !pv ||
983         pix_fmt != pv->pix_fmt ||
984         width   != pv->width[0] ||
985         height  != pv->height[0] )
986     {
987         return FILTER_FAILED;
988     }
989
990     avpicture_fill( &pv->pic_in, buf_in->data,
991                     pix_fmt, width, height );
992
993     /* Determine if top-field first layout */
994     int tff;
995     if( pv->parity < 0 )
996     {
997         tff = !!(buf_in->flags & PIC_FLAG_TOP_FIELD_FIRST);
998     }
999     else
1000     {
1001         tff = (pv->parity & 1) ^ 1;
1002     }
1003
1004     /* Store current frame in yadif cache */
1005     store_ref( (const uint8_t**)pv->pic_in.data, pv );
1006
1007     /* If yadif is not ready, store another ref and return FILTER_DELAY */
1008     if( pv->yadif_ready == 0 )
1009     {
1010         store_ref( (const uint8_t**)pv->pic_in.data, pv );
1011
1012         hb_buffer_copy_settings( pv->buf_settings, buf_in );
1013
1014         /* don't let 'work_loop' send a chapter mark upstream */
1015         buf_in->new_chap  = 0;
1016
1017         pv->yadif_ready = 1;
1018
1019         return FILTER_DELAY;
1020     }
1021
1022     /* Perform yadif filtering */        
1023     int frame;
1024     for( frame = 0; frame <= ( ( pv->mode == 2 || pv->mode == 3 )? 1 : 0 ) ; frame++ )
1025     {
1026         int parity = frame ^ tff ^ 1;
1027
1028         avpicture_fill( &pv->pic_out, pv->buf_out[!(frame^1)]->data,
1029                         pix_fmt, width, height );
1030
1031         yadif_filter( pv->pic_out.data, parity, tff, pv );
1032
1033         if( pv->mcdeint_mode >= 0 )
1034         {
1035             /* Perform mcdeint filtering */
1036             avpicture_fill( &pv->pic_in,  pv->buf_out[(frame^1)]->data,
1037                             pix_fmt, width, height );
1038
1039             mcdeint_filter( pv->pic_in.data, pv->pic_out.data, parity, pv );
1040         }
1041
1042         *buf_out = pv->buf_out[!(frame^1)];
1043     }
1044
1045     /* Copy buffered settings to output buffer settings */
1046     hb_buffer_copy_settings( *buf_out, pv->buf_settings );
1047
1048     /* Replace buffered settings with input buffer settings */
1049     hb_buffer_copy_settings( pv->buf_settings, buf_in );
1050
1051     /* don't let 'work_loop' send a chapter mark upstream */
1052     buf_in->new_chap  = 0;
1053
1054     return FILTER_OK;
1055 }