OSDN Git Service

Code comment clean-up, mostly to clarify the difference between the prev and next...
[handbrake-jp/handbrake-jp-git.git] / libhb / decomb.c
1 /* $Id: decomb.c,v 1.14 2008/04/25 5:00:00 jbrjake Exp $
2
3    This file is part of the HandBrake source code.
4    Homepage: <http://handbrake.fr/>.
5    It may be used under the terms of the GNU General Public License. 
6    
7    The yadif algorithm was created by Michael Niedermayer. */
8 #include "hb.h"
9 #include "libavcodec/avcodec.h"
10 #include "mpeg2dec/mpeg2.h"
11
12 #define SUPPRESS_AV_LOG
13
14 #define MODE_DEFAULT     1
15 #define PARITY_DEFAULT   -1
16
17 #define MCDEINT_MODE_DEFAULT   -1
18 #define MCDEINT_QP_DEFAULT      1
19
20 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
21 #define MIN3(a,b,c) MIN(MIN(a,b),c)
22 #define MAX3(a,b,c) MAX(MAX(a,b),c)
23
24 struct hb_filter_private_s
25 {
26     int              pix_fmt;
27     int              width[3];
28     int              height[3];
29
30     int              mode;
31     int              spatial_metric;
32     int              motion_threshold;
33     int              spatial_threshold;
34     int              block_threshold;
35     int              block_width;
36     int              block_height;
37
38     int              parity;
39     
40     int              yadif_ready;
41
42     int              mcdeint_mode;
43     int              mcdeint_qp;
44
45     int              mcdeint_outbuf_size;
46     uint8_t        * mcdeint_outbuf;
47     AVCodecContext * mcdeint_avctx_enc;
48     AVFrame        * mcdeint_frame;
49     AVFrame        * mcdeint_frame_dec;
50
51     int              yadif_deinterlaced_frames;
52     int              blend_deinterlaced_frames;
53     int              unfiltered_frames;
54
55     uint8_t        * ref[4][3];
56     int              ref_stride[3];
57
58     /* Make a buffer to store a comb mask. */
59     uint8_t        * mask[3];
60
61     AVPicture        pic_in;
62     AVPicture        pic_out;
63     hb_buffer_t *    buf_out[2];
64     hb_buffer_t *    buf_settings;
65 };
66
67 hb_filter_private_t * hb_decomb_init( int pix_fmt,
68                                            int width,
69                                            int height,
70                                            char * settings );
71
72 int hb_decomb_work(      const hb_buffer_t * buf_in,
73                          hb_buffer_t ** buf_out,
74                          int pix_fmt,
75                          int width,
76                          int height,
77                          hb_filter_private_t * pv );
78
79 void hb_decomb_close( hb_filter_private_t * pv );
80
81 hb_filter_object_t hb_filter_decomb =
82 {
83     FILTER_DECOMB,
84     "Deinterlaces selectively with yadif/mcdeint or lowpass5 blending",
85     NULL,
86     hb_decomb_init,
87     hb_decomb_work,
88     hb_decomb_close,
89 };
90
91 int cubic_interpolate( int y0, int y1, int y2, int y3 )
92 {
93     /* From http://www.neuron2.net/library/cubicinterp.html */
94     int result = ( y0 * -3 ) + ( y1 * 23 ) + ( y2 * 23 ) + ( y3 * -3 );
95     result /= 40;
96     
97     if( result > 255 )
98     {
99         result = 255;
100     }
101     else if( result < 0 )
102     {
103         result = 0;
104     }
105     
106     return result;
107 }
108
109 static void store_ref( const uint8_t ** pic,
110                              hb_filter_private_t * pv )
111 {
112     memcpy( pv->ref[3],
113             pv->ref[0],
114             sizeof(uint8_t *)*3 );
115
116     memmove( pv->ref[0],
117              pv->ref[1],
118              sizeof(uint8_t *)*3*3 );
119
120     int i;
121     for( i = 0; i < 3; i++ )
122     {
123         const uint8_t * src = pic[i];
124         uint8_t * ref = pv->ref[2][i];
125
126         int w = pv->width[i];
127         int h = pv->height[i];
128         int ref_stride = pv->ref_stride[i];
129
130         int y;
131         for( y = 0; y < pv->height[i]; y++ )
132         {
133             memcpy(ref, src, w);
134             src = (uint8_t*)src + w;
135             ref = (uint8_t*)ref + ref_stride;
136         }
137     }
138 }
139
140 static void get_ref( uint8_t ** pic, hb_filter_private_t * pv, int frm )
141 {
142     int i;
143     for( i = 0; i < 3; i++ )
144     {
145         uint8_t * dst = pic[i];
146         const uint8_t * ref = pv->ref[frm][i];
147         int w = pv->width[i];
148         int ref_stride = pv->ref_stride[i];
149         
150         int y;
151         for( y = 0; y < pv->height[i]; y++ )
152         {
153             memcpy(dst, ref, w);
154             dst += w;
155             ref += ref_stride;
156         }
157     }
158 }
159
160 int blend_filter_pixel( int up2, int up1, int current, int down1, int down2 )
161 {
162     /* Low-pass 5-tap filter */
163     int result = 0;
164     result += -up2;
165     result += up1 * 2;
166     result += current * 6;
167     result += down1 *2;
168     result += -down2;
169     result /= 8;
170
171     if( result > 255 )
172     {
173         result = 255;
174     }
175     if( result < 0 )
176     {
177         result = 0;
178     }
179     
180     return result;
181 }
182
183 static void blend_filter_line( uint8_t *dst,
184                                uint8_t *cur,
185                                int plane,
186                                int y,
187                                hb_filter_private_t * pv )
188 {
189     int w = pv->width[plane];
190     int refs = pv->ref_stride[plane];
191     int x;
192
193     for( x = 0; x < w; x++)
194     {
195         int a, b, c, d, e;
196         
197         a = cur[-2*refs];
198         b = cur[-refs];
199         c = cur[0];
200         d = cur[+refs];
201         e = cur[2*refs];
202         
203         if( y == 0 )
204         {
205             /* First line, so A and B don't exist.*/
206             a = cur[0];
207             b = cur[0];
208         }
209         else if( y == 1 )
210         {
211             /* Second line, no A. */
212             a = cur[-refs];
213         }
214         else if( y == (pv->height[plane] - 2) )
215         {
216             /* Second to last line, no E. */
217             e = cur[+refs];
218         }
219         else if( y == (pv->height[plane] -1) )
220         {
221             /* Last line, no D or E. */
222             d = cur[0];
223             e = cur[0];
224         }
225                 
226         dst[0] = blend_filter_pixel( a, b, c, d, e );
227
228         dst++;
229         cur++;
230     }
231 }
232
233 int check_combing_mask( hb_filter_private_t * pv )
234 {
235     /* Go through the mask in X*Y blocks. If any of these windows
236        have threshold or more combed pixels, consider the whole
237        frame to be combed and send it on to be deinterlaced.     */
238
239     /* Block mask threshold -- The number of pixels
240        in a block_width * block_height window of
241        he mask that need to show combing for the
242        whole frame to be seen as such.            */
243     int threshold       = pv->block_threshold;
244     int block_width     = pv->block_width;
245     int block_height    = pv->block_height;
246     int block_x, block_y;
247     int block_score = 0; int send_to_blend = 0;
248     
249     int x, y, k;
250
251     for( k = 0; k < 1; k++ )
252     {
253         int ref_stride = pv->ref_stride[k];
254         for( y = 0; y < ( pv->height[k] - block_height ); y = y + block_height )
255         {
256             for( x = 0; x < ( pv->width[k] - block_width ); x = x + block_width )
257             {
258                 block_score = 0;
259                 for( block_y = 0; block_y < block_height; block_y++ )
260                 {
261                     for( block_x = 0; block_x < block_width; block_x++ )
262                     {
263                         int mask_y = y + block_y;
264                         int mask_x = x + block_x;
265                         
266                         /* We only want to mark a pixel in a block as combed
267                            if the pixels above and below are as well. Got to
268                            handle the top and bottom lines separately.       */
269                         if( y + block_y == 0 )
270                         {
271                             if( pv->mask[k][mask_y*ref_stride+mask_x    ] == 255 &&
272                                 pv->mask[k][mask_y*ref_stride+mask_x + 1] == 255 )
273                                     block_score++;
274                         }
275                         else if( y + block_y == pv->height[k] - 1 )
276                         {
277                             if( pv->mask[k][mask_y*ref_stride+mask_x - 1] == 255 &&
278                                 pv->mask[k][mask_y*ref_stride+mask_x    ] == 255 )
279                                     block_score++;
280                         }
281                         else
282                         {
283                             if( pv->mask[k][mask_y*ref_stride+mask_x - 1] == 255 &&
284                                 pv->mask[k][mask_y*ref_stride+mask_x    ] == 255 &&
285                                 pv->mask[k][mask_y*ref_stride+mask_x + 1] == 255 )
286                                     block_score++;
287                         } 
288                     }
289                 }
290
291                 if( block_score >= ( threshold / 2 ) )
292                 {
293 #if 0
294                     hb_log("decomb: frame %i | score %i | type %s", pv->yadif_deinterlaced_frames + pv->blend_deinterlaced_frames +  pv->unfiltered_frames + 1, block_score, pv->buf_settings->flags & 16 ? "Film" : "Video");
295 #endif
296                     if ( block_score <= threshold && !( pv->buf_settings->flags & 16) )
297                     {
298                         /* Blend video content that scores between
299                            ( threshold / 2 ) and threshold.        */
300                         send_to_blend = 1;
301                     }
302                     else if( block_score > threshold )
303                     {
304                         if( pv->buf_settings->flags & 16 )
305                         {
306                             /* Blend progressive content above the threshold.*/
307                             return 2;
308                         }
309                         else
310                         {
311                             /* Yadif deinterlace video content above the threshold. */
312                             return 1;
313                         }
314                     }
315                 }
316             }
317         } 
318     }
319     
320     if( send_to_blend )
321     {
322         return 2;
323     }
324     else
325     {
326         /* Consider this frame to be uncombed. */
327         return 0;
328     }
329 }
330
331 int tritical_detect_comb( hb_filter_private_t * pv )
332 {
333     /* A mish-mash of various comb detection tricks
334        picked up from neuron2's Decomb plugin for
335        AviSynth and tritical's IsCombedT and
336        IsCombedTIVTC plugins.                       */
337        
338     int x, y, k, width, height;
339     
340     /* Comb scoring algorithm */
341     int spatial_metric  = pv->spatial_metric;
342     /* Motion threshold */
343     int mthresh         = pv->motion_threshold;
344     /* Spatial threshold */
345     int athresh         = pv->spatial_threshold;
346     int athresh_squared = athresh * athresh;
347     int athresh6        = 6 *athresh;
348
349     /* One pas for Y, one pass for U, one pass for V */    
350     for( k = 0; k < 1; k++ )
351     {
352         int ref_stride  = pv->ref_stride[k];
353         width           = pv->width[k];
354         height          = pv->height[k];
355
356         for( y = 2; y < ( height - 2 ); y++ )
357         {
358             /* These are just to make the buffer locations easier to read. */
359             int back_2    = ( y - 2 )*ref_stride ;
360             int back_1    = ( y - 1 )*ref_stride;
361             int current   =         y*ref_stride;
362             int forward_1 = ( y + 1 )*ref_stride;
363             int forward_2 = ( y + 2 )*ref_stride;
364             
365             /* We need to examine a column of 5 pixels
366                in the prev, cur, and next frames.      */
367             uint8_t previous_frame[5];
368             uint8_t current_frame[5];
369             uint8_t next_frame[5];
370             
371             for( x = 0; x < width; x++ )
372             {
373                 /* Fill up the current frame array with the current pixel values.*/
374                 current_frame[0] = pv->ref[1][k][back_2    + x];
375                 current_frame[1] = pv->ref[1][k][back_1    + x];
376                 current_frame[2] = pv->ref[1][k][current   + x];
377                 current_frame[3] = pv->ref[1][k][forward_1 + x];
378                 current_frame[4] = pv->ref[1][k][forward_2 + x];
379
380                 int up_diff   = current_frame[2] - current_frame[1];
381                 int down_diff = current_frame[2] - current_frame[3];
382
383                 if( ( up_diff >  athresh && down_diff >  athresh ) ||
384                     ( up_diff < -athresh && down_diff < -athresh ) )
385                 {
386                     /* The pixel above and below are different,
387                        and they change in the same "direction" too.*/
388                     int motion = 0;
389                     if( mthresh > 0 )
390                     {
391                         /* Make sure there's sufficient motion between frame t-1 to frame t+1. */
392                         previous_frame[0] = pv->ref[0][k][back_2    + x];
393                         previous_frame[1] = pv->ref[0][k][back_1    + x];
394                         previous_frame[2] = pv->ref[0][k][current   + x];
395                         previous_frame[3] = pv->ref[0][k][forward_1 + x];
396                         previous_frame[4] = pv->ref[0][k][forward_2 + x];
397                         next_frame[0]     = pv->ref[2][k][back_2    + x];
398                         next_frame[1]     = pv->ref[2][k][back_1    + x];
399                         next_frame[2]     = pv->ref[2][k][current   + x];
400                         next_frame[3]     = pv->ref[2][k][forward_1 + x];
401                         next_frame[4]     = pv->ref[2][k][forward_2 + x];
402                         
403                         if( abs( previous_frame[2] - current_frame[2] ) > mthresh &&
404                             abs(  current_frame[1] - next_frame[1]    ) > mthresh &&
405                             abs(  current_frame[3] - next_frame[3]    ) > mthresh )
406                                 motion++;
407                         if( abs(     next_frame[2] - current_frame[2] ) > mthresh &&
408                             abs( previous_frame[1] - current_frame[1] ) > mthresh &&
409                             abs( previous_frame[3] - current_frame[3] ) > mthresh )
410                                 motion++;
411                     }
412                     else
413                     {
414                         /* User doesn't want to check for motion,
415                            so move on to the spatial check.       */
416                         motion = 1;
417                     }
418                            
419                     if( motion || ( pv->yadif_deinterlaced_frames==0 && pv->blend_deinterlaced_frames==0 && pv->unfiltered_frames==0) )
420                     {
421                            /*That means it's time for the spatial check.
422                            We've got several options here.             */
423                         if( spatial_metric == 0 )
424                         {
425                             /* Simple 32detect style comb detection */
426                             if( ( abs( current_frame[2] - current_frame[4] ) < 10  ) &&
427                                 ( abs( current_frame[2] - current_frame[3] ) > 15 ) )
428                             {
429                                 pv->mask[k][y*ref_stride + x] = 255;
430                             }
431                             else
432                             {
433                                 pv->mask[k][y*ref_stride + x] = 0;
434                             }
435                         }
436                         else if( spatial_metric == 1 )
437                         {
438                             /* This, for comparison, is what IsCombed uses.
439                                It's better, but still noise senstive.      */
440                                int combing = ( current_frame[1] - current_frame[2] ) *
441                                              ( current_frame[3] - current_frame[2] );
442                                
443                                if( combing > athresh_squared )
444                                    pv->mask[k][y*ref_stride + x] = 255; 
445                                else
446                                    pv->mask[k][y*ref_stride + x] = 0;
447                         }
448                         else if( spatial_metric == 2 )
449                         {
450                             /* Tritical's noise-resistant combing scorer.
451                                The check is done on a bob+blur convolution. */
452                             int combing = abs( current_frame[0]
453                                              + ( 4 * current_frame[2] )
454                                              + current_frame[4]
455                                              - ( 3 * ( current_frame[1]
456                                                      + current_frame[3] ) ) );
457
458                             /* If the frame is sufficiently combed,
459                                then mark it down on the mask as 255. */
460                             if( combing > athresh6 )
461                                 pv->mask[k][y*ref_stride + x] = 255; 
462                             else
463                                 pv->mask[k][y*ref_stride + x] = 0;
464                         }
465                     }
466                     else
467                     {
468                         pv->mask[k][y*ref_stride + x] = 0;
469                     }
470                 }
471                 else
472                 {
473                     pv->mask[k][y*ref_stride + x] = 0;
474                 }
475             }
476         }
477     }
478     
479     return check_combing_mask( pv );
480 }
481
482 static void yadif_filter_line( uint8_t *dst,
483                                uint8_t *prev,
484                                uint8_t *cur,
485                                uint8_t *next,
486                                int plane,
487                                int parity,
488                                int y,
489                                hb_filter_private_t * pv )
490 {
491     /* While prev and next point to the previous and next frames,
492        prev2 and next2 will shift depending on the parity, usually 1.
493        They are the previous and next fields, the fields temporally adjacent
494        to the other field in the current frame--the one not being filtered.  */
495     uint8_t *prev2 = parity ? prev : cur ;
496     uint8_t *next2 = parity ? cur  : next;
497     int w = pv->width[plane];
498     int refs = pv->ref_stride[plane];
499     int x;
500     
501     for( x = 0; x < w; x++)
502     {
503         /* Pixel above*/
504         int c              = cur[-refs];
505         /* Temporal average: the current location in the adjacent fields */
506         int d              = (prev2[0] + next2[0])>>1;
507         /* Pixel below */
508         int e              = cur[+refs];
509         
510         /* How the current pixel changes between the adjacent fields */
511         int temporal_diff0 = ABS(prev2[0] - next2[0]);
512         /* The average of how much the pixels above and below change from the frame before to now. */
513         int temporal_diff1 = ( ABS(prev[-refs] - cur[-refs]) + ABS(prev[+refs] - cur[+refs]) ) >> 1;
514         /* The average of how much the pixels above and below change from now to the next frame. */
515         int temporal_diff2 = ( ABS(next[-refs] - cur[-refs]) + ABS(next[+refs] - cur[+refs]) ) >> 1;
516         /* For the actual difference, use the largest of the previous average diffs. */
517         int diff           = MAX3(temporal_diff0>>1, temporal_diff1, temporal_diff2);
518         
519         /* SAD of how the pixel-1, the pixel, and the pixel+1 change from the line above to below. */ 
520         int spatial_score  = ABS(cur[-refs-1] - cur[+refs-1]) + ABS(cur[-refs]-cur[+refs]) +
521                                      ABS(cur[-refs+1] - cur[+refs+1]) - 1;         
522         int spatial_pred;
523          
524         /* Spatial pred is either a bilinear or cubic vertical interpolation. */
525         if( pv->mode > 0 )
526         {
527             spatial_pred = cubic_interpolate( cur[-3*refs], cur[-refs], cur[+refs], cur[3*refs] );
528         }
529         else
530         {
531             spatial_pred = (c+e)>>1;
532         }
533
534 /* EDDI: Edge Directed Deinterlacing Interpolation
535    Uses the Martinez-Lim Line Shift Parametric Modeling algorithm...I think.
536    Checks 4 different slopes to see if there is more similarity along a diagonal
537    than there was vertically. If a diagonal is more similar, then it indicates
538    an edge, so interpolate along that instead of a vertical line, using either
539    linear or cubic interpolation depending on mode. */
540 #define YADIF_CHECK(j)\
541         {   int score = ABS(cur[-refs-1+j] - cur[+refs-1-j])\
542                       + ABS(cur[-refs  +j] - cur[+refs  -j])\
543                       + ABS(cur[-refs+1+j] - cur[+refs+1-j]);\
544             if( score < spatial_score ){\
545                 spatial_score = score;\
546                 if( pv->mode > 0 )\
547                 {\
548                     switch(j)\
549                     {\
550                         case -1:\
551                             spatial_pred = cubic_interpolate(cur[-3 * refs - 3], cur[-refs -1], cur[+refs + 1], cur[3* refs + 3] );\
552                         break;\
553                         case -2:\
554                             spatial_pred = cubic_interpolate( ( ( cur[-3*refs - 4] + cur[-refs - 4] ) / 2 ) , cur[-refs -2], cur[+refs + 2], ( ( cur[3*refs + 4] + cur[refs + 4] ) / 2 ) );\
555                         break;\
556                         case 1:\
557                             spatial_pred = cubic_interpolate(cur[-3 * refs +3], cur[-refs +1], cur[+refs - 1], cur[3* refs -3] );\
558                         break;\
559                         case 2:\
560                             spatial_pred = cubic_interpolate(( ( cur[-3*refs + 4] + cur[-refs + 4] ) / 2 ), cur[-refs +2], cur[+refs - 2], ( ( cur[3*refs - 4] + cur[refs - 4] ) / 2 ) );\
561                         break;\
562                     }\
563                 }\
564                 else\
565                 {\
566                     spatial_pred = ( cur[-refs +j] + cur[+refs -j] ) >>1;\
567                 }\
568                 
569                 YADIF_CHECK(-1) YADIF_CHECK(-2) }} }}
570                 YADIF_CHECK( 1) YADIF_CHECK( 2) }} }}
571                                 
572         /* Temporally adjust the spatial prediction by
573            comparing against lines in the adjacent fields. */
574         int b = (prev2[-2*refs] + next2[-2*refs])>>1;
575         int f = (prev2[+2*refs] + next2[+2*refs])>>1;
576         
577         /* Find the median value */
578         int max = MAX3(d-e, d-c, MIN(b-c, f-e));
579         int min = MIN3(d-e, d-c, MAX(b-c, f-e));
580         diff = MAX3( diff, min, -max );
581         
582         if( spatial_pred > d + diff )
583         {
584             spatial_pred = d + diff;
585         }
586         else if( spatial_pred < d - diff )
587         {
588             spatial_pred = d - diff;
589         }
590         
591         dst[0] = spatial_pred;
592                         
593         dst++;
594         cur++;
595         prev++;
596         next++;
597         prev2++;
598         next2++;
599     }
600 }
601
602 static void yadif_filter( uint8_t ** dst,
603                           int parity,
604                           int tff,
605                           hb_filter_private_t * pv )
606 {
607     
608     int is_combed = tritical_detect_comb( pv );
609     
610     if( is_combed == 1 )
611     {
612         pv->yadif_deinterlaced_frames++;
613     }
614     else if( is_combed == 2 )
615     {
616         pv->blend_deinterlaced_frames++;
617     }
618     else
619     {
620         pv->unfiltered_frames++;
621     }
622
623     int i;
624     for( i = 0; i < 3; i++ )
625     {
626         int w = pv->width[i];
627         int h = pv->height[i];
628         int ref_stride = pv->ref_stride[i];        
629         
630         int y;
631         for( y = 0; y < h; y++ )
632         {
633             if( ( pv->mode == 4 && is_combed ) || is_combed == 2 )
634             {
635                 uint8_t *prev = &pv->ref[0][i][y*ref_stride];
636                 uint8_t *cur  = &pv->ref[1][i][y*ref_stride];
637                 uint8_t *next = &pv->ref[2][i][y*ref_stride];
638                 uint8_t *dst2 = &dst[i][y*w];
639
640                 blend_filter_line( dst2, cur, i, y, pv );
641             }
642             else if( (y ^ parity) & 1 && is_combed == 1 )
643             {
644                 uint8_t *prev = &pv->ref[0][i][y*ref_stride];
645                 uint8_t *cur  = &pv->ref[1][i][y*ref_stride];
646                 uint8_t *next = &pv->ref[2][i][y*ref_stride];
647                 uint8_t *dst2 = &dst[i][y*w];
648
649                 yadif_filter_line( dst2, prev, cur, next, i, parity ^ tff, y, pv );
650             }
651             else
652             {
653                 memcpy( &dst[i][y*w],
654                         &pv->ref[1][i][y*ref_stride],
655                         w * sizeof(uint8_t) );              
656             }
657         }
658     }
659 }
660
661 static void mcdeint_filter( uint8_t ** dst,
662                             uint8_t ** src,
663                             int parity,
664                             hb_filter_private_t * pv )
665 {
666     int x, y, i;
667     int out_size;
668
669 #ifdef SUPPRESS_AV_LOG
670     /* TODO: temporarily change log level to suppress obnoxious debug output */
671     int loglevel = av_log_get_level();
672     av_log_set_level( AV_LOG_QUIET );
673 #endif
674
675     for( i=0; i<3; i++ )
676     {
677         pv->mcdeint_frame->data[i] = src[i];
678         pv->mcdeint_frame->linesize[i] = pv->width[i];
679     }
680     pv->mcdeint_avctx_enc->me_cmp     = FF_CMP_SAD;
681     pv->mcdeint_avctx_enc->me_sub_cmp = FF_CMP_SAD;
682     pv->mcdeint_frame->quality        = pv->mcdeint_qp * FF_QP2LAMBDA;
683
684     out_size = avcodec_encode_video( pv->mcdeint_avctx_enc,
685                                      pv->mcdeint_outbuf,
686                                      pv->mcdeint_outbuf_size,
687                                      pv->mcdeint_frame );
688
689     pv->mcdeint_frame_dec = pv->mcdeint_avctx_enc->coded_frame;
690
691     for( i = 0; i < 3; i++ )
692     {
693         int w    = pv->width[i];
694         int h    = pv->height[i];
695         int fils = pv->mcdeint_frame_dec->linesize[i];
696         int srcs = pv->width[i];
697
698         for( y = 0; y < h; y++ )
699         {
700             if( (y ^ parity) & 1 )
701             {
702                 for( x = 0; x < w; x++ )
703                 {
704                     if( (x-2)+(y-1)*w >= 0 && (x+2)+(y+1)*w < w*h )
705                     {
706                         uint8_t * filp =
707                             &pv->mcdeint_frame_dec->data[i][x + y*fils];
708                         uint8_t * srcp = &src[i][x + y*srcs];
709
710                         int diff0 = filp[-fils] - srcp[-srcs];
711                         int diff1 = filp[+fils] - srcp[+srcs];
712
713                         int spatial_score =
714                               ABS(srcp[-srcs-1] - srcp[+srcs-1])
715                             + ABS(srcp[-srcs  ] - srcp[+srcs  ])
716                             + ABS(srcp[-srcs+1] - srcp[+srcs+1]) - 1;
717
718                         int temp = filp[0];
719
720 #define MCDEINT_CHECK(j)\
721                         {   int score = ABS(srcp[-srcs-1+j] - srcp[+srcs-1-j])\
722                                       + ABS(srcp[-srcs  +j] - srcp[+srcs  -j])\
723                                       + ABS(srcp[-srcs+1+j] - srcp[+srcs+1-j]);\
724                             if( score < spatial_score ) {\
725                                 spatial_score = score;\
726                                 diff0 = filp[-fils+j] - srcp[-srcs+j];\
727                                 diff1 = filp[+fils-j] - srcp[+srcs-j];
728
729                         MCDEINT_CHECK(-1) MCDEINT_CHECK(-2) }} }}
730                         MCDEINT_CHECK( 1) MCDEINT_CHECK( 2) }} }}
731
732                         if(diff0 + diff1 > 0)
733                         {
734                             temp -= (diff0 + diff1 -
735                                      ABS( ABS(diff0) - ABS(diff1) ) / 2) / 2;
736                         }
737                         else
738                         {
739                             temp -= (diff0 + diff1 +
740                                      ABS( ABS(diff0) - ABS(diff1) ) / 2) / 2;
741                         }
742
743                         filp[0] = dst[i][x + y*w] =
744                             temp > 255U ? ~(temp>>31) : temp;
745                     }
746                     else
747                     {
748                         dst[i][x + y*w] =
749                             pv->mcdeint_frame_dec->data[i][x + y*fils];
750                     }
751                 }
752             }
753         }
754
755         for( y = 0; y < h; y++ )
756         {
757             if( !((y ^ parity) & 1) )
758             {
759                 for( x = 0; x < w; x++ )
760                 {
761                     pv->mcdeint_frame_dec->data[i][x + y*fils] =
762                         dst[i][x + y*w]= src[i][x + y*srcs];
763                 }
764             }
765         }
766     }
767
768 #ifdef SUPPRESS_AV_LOG
769     /* TODO: restore previous log level */
770     av_log_set_level(loglevel);
771 #endif
772 }
773
774 hb_filter_private_t * hb_decomb_init( int pix_fmt,
775                                            int width,
776                                            int height,
777                                            char * settings )
778 {
779     if( pix_fmt != PIX_FMT_YUV420P )
780     {
781         return 0;
782     }
783
784     hb_filter_private_t * pv = calloc( 1, sizeof(struct hb_filter_private_s) );
785
786     pv->pix_fmt = pix_fmt;
787
788     pv->width[0]  = width;
789     pv->height[0] = height;
790     pv->width[1]  = pv->width[2]  = width >> 1;
791     pv->height[1] = pv->height[2] = height >> 1;
792
793     int buf_size = 3 * width * height / 2;
794     pv->buf_out[0] = hb_buffer_init( buf_size );
795     pv->buf_out[1] = hb_buffer_init( buf_size );
796     pv->buf_settings = hb_buffer_init( 0 );
797
798     pv->yadif_deinterlaced_frames = 0;
799     pv->blend_deinterlaced_frames = 0;
800     pv->unfiltered_frames = 0;
801
802     pv->yadif_ready    = 0;
803
804     pv->mode     = MODE_DEFAULT;
805     pv->spatial_metric = 2;
806     pv->motion_threshold = 6;
807     pv->spatial_threshold = 9;
808     pv->block_threshold = 80;
809     pv->block_width = 16;
810     pv->block_height = 16;
811     
812     pv->parity   = PARITY_DEFAULT;
813
814     pv->mcdeint_mode   = MCDEINT_MODE_DEFAULT;
815     pv->mcdeint_qp     = MCDEINT_QP_DEFAULT;
816
817     if( settings )
818     {
819         sscanf( settings, "%d:%d:%d:%d:%d:%d:%d",
820                 &pv->mode,
821                 &pv->spatial_metric,
822                 &pv->motion_threshold,
823                 &pv->spatial_threshold,
824                 &pv->block_threshold,
825                 &pv->block_width,
826                 &pv->block_height );
827     }
828
829     if( pv->mode == 2 || pv->mode == 3 )
830     {
831         pv->mcdeint_mode = 0;
832     }
833     
834     /* Allocate yadif specific buffers */
835     int i, j;
836     for( i = 0; i < 3; i++ )
837     {
838         int is_chroma = !!i;
839         int w = ((width   + 31) & (~31))>>is_chroma;
840         int h = ((height+6+ 31) & (~31))>>is_chroma;
841
842         pv->ref_stride[i] = w;
843
844         for( j = 0; j < 3; j++ )
845         {
846             pv->ref[j][i] = malloc( w*h*sizeof(uint8_t) ) + 3*w;
847         }
848     }
849
850     /* Allocate a buffer to store a comb mask. */
851     for( i = 0; i < 3; i++ )
852     {
853         int is_chroma = !!i;
854         int w = ((pv->width[0]   + 31) & (~31))>>is_chroma;
855         int h = ((pv->height[0]+6+ 31) & (~31))>>is_chroma;
856
857         pv->mask[i] = malloc( w*h*sizeof(uint8_t) ) + 3*w;
858     }
859
860     /* Allocate mcdeint specific buffers */
861     if( pv->mcdeint_mode >= 0 )
862     {
863         avcodec_init();
864         avcodec_register_all();
865
866         AVCodec * enc = avcodec_find_encoder( CODEC_ID_SNOW );
867
868         int i;
869         for (i = 0; i < 3; i++ )
870         {
871             AVCodecContext * avctx_enc;
872
873             avctx_enc = pv->mcdeint_avctx_enc = avcodec_alloc_context();
874
875             avctx_enc->width                    = width;
876             avctx_enc->height                   = height;
877             avctx_enc->time_base                = (AVRational){1,25};  // meaningless
878             avctx_enc->gop_size                 = 300;
879             avctx_enc->max_b_frames             = 0;
880             avctx_enc->pix_fmt                  = PIX_FMT_YUV420P;
881             avctx_enc->flags                    = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
882             avctx_enc->strict_std_compliance    = FF_COMPLIANCE_EXPERIMENTAL;
883             avctx_enc->global_quality           = 1;
884             avctx_enc->flags2                   = CODEC_FLAG2_MEMC_ONLY;
885             avctx_enc->me_cmp                   = FF_CMP_SAD; //SSE;
886             avctx_enc->me_sub_cmp               = FF_CMP_SAD; //SSE;
887             avctx_enc->mb_cmp                   = FF_CMP_SSE;
888
889             switch( pv->mcdeint_mode )
890             {
891                 case 3:
892                     avctx_enc->refs = 3;
893                 case 2:
894                     avctx_enc->me_method = ME_UMH;
895                 case 1:
896                     avctx_enc->flags |= CODEC_FLAG_4MV;
897                     avctx_enc->dia_size =2;
898                 case 0:
899                     avctx_enc->flags |= CODEC_FLAG_QPEL;
900             }
901
902             avcodec_open(avctx_enc, enc);
903         }
904
905         pv->mcdeint_frame       = avcodec_alloc_frame();
906         pv->mcdeint_outbuf_size = width * height * 10;
907         pv->mcdeint_outbuf      = malloc( pv->mcdeint_outbuf_size );
908     }
909
910     return pv;
911 }
912
913 void hb_decomb_close( hb_filter_private_t * pv )
914 {
915     if( !pv )
916     {
917         return;
918     }
919     
920     hb_log("decomb: yadif deinterlaced %i | blend deinterlaced %i | unfiltered %i | total %i", pv->yadif_deinterlaced_frames, pv->blend_deinterlaced_frames, pv->unfiltered_frames, pv->yadif_deinterlaced_frames + pv->blend_deinterlaced_frames + pv->unfiltered_frames);
921
922     /* Cleanup frame buffers */
923     if( pv->buf_out[0] )
924     {
925         hb_buffer_close( &pv->buf_out[0] );
926     }
927     if( pv->buf_out[1] )
928     {
929         hb_buffer_close( &pv->buf_out[1] );
930     }
931     if (pv->buf_settings )
932     {
933         hb_buffer_close( &pv->buf_settings );
934     }
935
936     /* Cleanup yadif specific buffers */
937     int i;
938     for( i = 0; i<3*3; i++ )
939     {
940         uint8_t **p = &pv->ref[i%3][i/3];
941         if (*p)
942         {
943             free( *p - 3*pv->ref_stride[i/3] );
944             *p = NULL;
945         }
946     }
947     
948     /* Cleanup combing mask. */
949     for( i = 0; i<3*3; i++ )
950     {
951         uint8_t **p = &pv->mask[i/3];
952         if (*p)
953         {
954             free( *p - 3*pv->ref_stride[i/3] );
955             *p = NULL;
956         }
957     }
958     
959     /* Cleanup mcdeint specific buffers */
960     if( pv->mcdeint_mode >= 0 )
961     {
962         if( pv->mcdeint_avctx_enc )
963         {
964             avcodec_close( pv->mcdeint_avctx_enc );
965             av_freep( &pv->mcdeint_avctx_enc );
966         }
967         if( pv->mcdeint_outbuf )
968         {
969             free( pv->mcdeint_outbuf );
970         }
971     }
972
973     free( pv );
974 }
975
976 int hb_decomb_work( const hb_buffer_t * cbuf_in,
977                     hb_buffer_t ** buf_out,
978                     int pix_fmt,
979                     int width,
980                     int height,
981                     hb_filter_private_t * pv )
982 {
983     hb_buffer_t * buf_in = (hb_buffer_t *)cbuf_in;
984
985     if( !pv ||
986         pix_fmt != pv->pix_fmt ||
987         width   != pv->width[0] ||
988         height  != pv->height[0] )
989     {
990         return FILTER_FAILED;
991     }
992
993     avpicture_fill( &pv->pic_in, buf_in->data,
994                     pix_fmt, width, height );
995
996     /* Determine if top-field first layout */
997     int tff;
998     if( pv->parity < 0 )
999     {
1000         tff = !!(buf_in->flags & PIC_FLAG_TOP_FIELD_FIRST);
1001     }
1002     else
1003     {
1004         tff = (pv->parity & 1) ^ 1;
1005     }
1006
1007     /* Store current frame in yadif cache */
1008     store_ref( (const uint8_t**)pv->pic_in.data, pv );
1009
1010     /* If yadif is not ready, store another ref and return FILTER_DELAY */
1011     if( pv->yadif_ready == 0 )
1012     {
1013         store_ref( (const uint8_t**)pv->pic_in.data, pv );
1014
1015         hb_buffer_copy_settings( pv->buf_settings, buf_in );
1016
1017         /* don't let 'work_loop' send a chapter mark upstream */
1018         buf_in->new_chap  = 0;
1019
1020         pv->yadif_ready = 1;
1021
1022         return FILTER_DELAY;
1023     }
1024
1025     /* Perform yadif filtering */        
1026     int frame;
1027     for( frame = 0; frame <= ( ( pv->mode == 2 || pv->mode == 3 )? 1 : 0 ) ; frame++ )
1028     {
1029         int parity = frame ^ tff ^ 1;
1030
1031         avpicture_fill( &pv->pic_out, pv->buf_out[!(frame^1)]->data,
1032                         pix_fmt, width, height );
1033
1034         yadif_filter( pv->pic_out.data, parity, tff, pv );
1035
1036         if( pv->mcdeint_mode >= 0 )
1037         {
1038             /* Perform mcdeint filtering */
1039             avpicture_fill( &pv->pic_in,  pv->buf_out[(frame^1)]->data,
1040                             pix_fmt, width, height );
1041
1042             mcdeint_filter( pv->pic_in.data, pv->pic_out.data, parity, pv );
1043         }
1044
1045         *buf_out = pv->buf_out[!(frame^1)];
1046     }
1047
1048     /* Copy buffered settings to output buffer settings */
1049     hb_buffer_copy_settings( *buf_out, pv->buf_settings );
1050
1051     /* Replace buffered settings with input buffer settings */
1052     hb_buffer_copy_settings( pv->buf_settings, buf_in );
1053
1054     /* don't let 'work_loop' send a chapter mark upstream */
1055     buf_in->new_chap  = 0;
1056
1057     return FILTER_OK;
1058 }