1 /* $Id: render.c,v 1.17 2005/04/14 17:37:54 titer Exp $
3 This file is part of the HandBrake source code.
4 Homepage: <http://handbrake.fr/>.
5 It may be used under the terms of the GNU General Public License. */
9 #include "ffmpeg/avcodec.h"
10 #include "ffmpeg/swscale.h"
12 struct hb_work_private_s
16 struct SwsContext * context;
18 AVPicture pic_tmp_crop;
19 AVPicture pic_tmp_out;
20 hb_buffer_t * buf_scale;
21 hb_fifo_t * subtitle_queue;
22 hb_fifo_t * delay_queue;
25 uint64_t last_start[4];
26 uint64_t last_stop[4];
27 uint64_t lost_time[4];
28 uint64_t total_lost_time;
29 uint64_t total_gained_time;
34 int renderInit( hb_work_object_t *, hb_job_t * );
35 int renderWork( hb_work_object_t *, hb_buffer_t **, hb_buffer_t ** );
36 void renderClose( hb_work_object_t * );
38 hb_work_object_t hb_render =
50 * Utility function that finds where the U is in the YUV sub-picture
52 * The Y data is at the top, followed by U and V, but the U and V
53 * are half the width of the Y, i.e. each chroma element covers 2x2
56 static uint8_t *getU(uint8_t *data, int width, int height, int x, int y)
58 return(&data[(((y/2) * (width/2)) + (x/2)) + (width*height)]);
61 static uint8_t *getV(uint8_t *data, int width, int height, int x, int y)
63 return(&data[(((y/2) * (width/2)) + (x/2)) + (width*height) +
67 static void ApplySub( hb_job_t * job, hb_buffer_t * buf,
70 hb_buffer_t * sub = *_sub;
71 hb_title_t * title = job->title;
72 int i, j, offset_top, offset_left, margin_top, margin_percent;
73 uint8_t * lum, * alpha, * out, * sub_chromaU, * sub_chromaV;
76 * Percent of height of picture that form a margin that subtitles
77 * should not be displayed within.
87 * If necessary, move the subtitle so it is not in a cropped zone.
88 * When it won't fit, we center it so we lose as much on both ends.
89 * Otherwise we try to leave a 20px or 2% margin around it.
91 margin_top = ( ( title->height - job->crop[0] - job->crop[1] ) *
92 margin_percent ) / 100;
97 * A maximum margin of 20px regardless of height of the picture.
102 if( sub->height > title->height - job->crop[0] - job->crop[1] -
106 * The subtitle won't fit in the cropped zone, so center
107 * it vertically so we fit in as much as we can.
109 offset_top = job->crop[0] + ( title->height - job->crop[0] -
110 job->crop[1] - sub->height ) / 2;
112 else if( sub->y < job->crop[0] + margin_top )
115 * The subtitle fits in the cropped zone, but is currently positioned
116 * within our top margin, so move it outside of our margin.
118 offset_top = job->crop[0] + margin_top;
120 else if( sub->y > title->height - job->crop[1] - margin_top - sub->height )
123 * The subtitle fits in the cropped zone, and is not within the top
124 * margin but is within the bottom margin, so move it to be above
127 offset_top = title->height - job->crop[1] - margin_top - sub->height;
132 * The subtitle is fine where it is.
137 if( sub->width > title->width - job->crop[2] - job->crop[3] - 40 )
138 offset_left = job->crop[2] + ( title->width - job->crop[2] -
139 job->crop[3] - sub->width ) / 2;
140 else if( sub->x < job->crop[2] + 20 )
141 offset_left = job->crop[2] + 20;
142 else if( sub->x > title->width - job->crop[3] - 20 - sub->width )
143 offset_left = title->width - job->crop[3] - 20 - sub->width;
145 offset_left = sub->x;
148 alpha = lum + sub->width * sub->height;
149 sub_chromaU = alpha + sub->width * sub->height;
150 sub_chromaV = sub_chromaU + sub->width * sub->height;
152 out = buf->data + offset_top * title->width + offset_left;
154 for( i = 0; i < sub->height; i++ )
156 if( offset_top + i >= 0 && offset_top + i < title->height )
158 for( j = 0; j < sub->width; j++ )
160 if( offset_left + j >= 0 && offset_left + j < title->width )
162 uint8_t *chromaU, *chromaV;
165 * Merge the luminance and alpha with the picture
167 out[j] = ( (uint16_t) out[j] * ( 16 - (uint16_t) alpha[j] ) +
168 (uint16_t) lum[j] * (uint16_t) alpha[j] ) >> 4;
170 * Set the chroma (colour) based on whether there is
171 * any alpha at all. Don't try to blend with the picture.
173 chromaU = getU(buf->data, title->width, title->height,
174 offset_left+j, offset_top+i);
176 chromaV = getV(buf->data, title->width, title->height,
177 offset_left+j, offset_top+i);
182 * Add the chroma from the sub-picture, as this is
183 * not a transparent element.
185 *chromaU = sub_chromaU[j];
186 *chromaV = sub_chromaV[j];
194 sub_chromaU += sub->width;
195 sub_chromaV += sub->width;
199 hb_buffer_close( _sub );
202 int renderWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
203 hb_buffer_t ** buf_out )
205 hb_work_private_t * pv = w->private_data;
206 hb_job_t * job = pv->job;
207 hb_title_t * title = job->title;
208 hb_buffer_t * in = *buf_in, * buf_tmp_in = *buf_in;
209 hb_buffer_t * ivtc_buffer = NULL;
213 /* If the input buffer is end of stream, send out an empty one
214 * to the next stage as well. Note that this will result in us
215 * losing the current contents of the delay queue.
217 *buf_out = job->indepth_scan? NULL : hb_buffer_init(0);
222 * During the indepth_scan ditch the buffers here before applying filters or attempting to
225 if( job->indepth_scan )
231 /* Push subtitles onto queue just in case we need to delay a frame */
234 hb_fifo_push( pv->subtitle_queue, in->sub );
238 hb_fifo_push( pv->subtitle_queue, hb_buffer_init(0) );
241 /* If there's a chapter mark remember it in case we delay or drop its frame */
242 if( in->new_chap && job->vfr )
244 pv->chapter_time = in->start;
245 pv->chapter_val = in->new_chap;
249 /* Setup render buffer */
250 hb_buffer_t * buf_render = hb_buffer_init( 3 * job->width * job->height / 2 );
255 int filter_count = hb_list_count( job->filters );
258 for( i = 0; i < filter_count; i++ )
260 hb_filter_object_t * filter = hb_list_item( job->filters, i );
267 hb_buffer_t * buf_tmp_out = NULL;
269 int result = filter->work( buf_tmp_in,
274 filter->private_data );
277 * FILTER_OK: set temp buffer to filter buffer, continue
278 * FILTER_DELAY: set temp buffer to NULL, abort
279 * FILTER_DROP: set temp buffer to NULL, pop subtitle, abort
280 * FILTER_FAILED: leave temp buffer alone, continue
282 if( result == FILTER_OK )
284 buf_tmp_in = buf_tmp_out;
286 else if( result == FILTER_DELAY )
291 else if( result == FILTER_DROP )
295 /* We need to compensate for the time lost by dropping this frame.
296 Spread its duration out in quarters, because usually dropped frames
297 maintain a 1-out-of-5 pattern and this spreads it out amongst the remaining ones.
298 Store these in the lost_time array, which has 4 slots in it.
299 Because not every frame duration divides evenly by 4, and we can't lose the
300 remainder, we have to go through an awkward process to preserve it in the 4th array index. */
301 uint64_t temp_duration = buf_tmp_out->stop - buf_tmp_out->start;
302 pv->lost_time[0] += (temp_duration / 4);
303 pv->lost_time[1] += (temp_duration / 4);
304 pv->lost_time[2] += (temp_duration / 4);
305 pv->lost_time[3] += ( temp_duration - (temp_duration / 4) - (temp_duration / 4) - (temp_duration / 4) );
307 pv->total_lost_time += temp_duration;
308 pv->dropped_frames++;
310 /* Pop the frame's subtitle and dispose of it. */
311 hb_buffer_t * subtitles = hb_fifo_get( pv->subtitle_queue );
312 hb_buffer_close( &subtitles );
318 buf_tmp_in = buf_tmp_out;
326 /* Cache frame start and stop times, so we can renumber
327 time stamps if dropping frames for VFR. */
329 for( i = 3; i >= 1; i-- )
331 pv->last_start[i] = pv->last_start[i-1];
332 pv->last_stop[i] = pv->last_stop[i-1];
335 /* In order to make sure we have continuous time stamps, store
336 the current frame's duration as starting when the last one stopped. */
337 pv->last_start[0] = pv->last_stop[1];
338 pv->last_stop[0] = pv->last_start[0] + (in->stop - in->start);
341 /* Apply subtitles */
344 hb_buffer_t * subtitles = hb_fifo_get( pv->subtitle_queue );
347 ApplySub( job, buf_tmp_in, &subtitles );
351 /* Apply crop/scale if specified */
352 if( buf_tmp_in && pv->context )
354 avpicture_fill( &pv->pic_tmp_in, buf_tmp_in->data,
356 title->width, title->height );
358 avpicture_fill( &pv->pic_tmp_out, buf_render->data,
360 job->width, job->height );
362 // Crop; this alters the pointer to the data to point to the correct place for cropped frame
363 av_picture_crop( &pv->pic_tmp_crop, &pv->pic_tmp_in, PIX_FMT_YUV420P,
364 job->crop[0], job->crop[2] );
366 // Scale pic_crop into pic_render according to the context set up in renderInit
367 sws_scale(pv->context,
368 pv->pic_tmp_crop.data, pv->pic_tmp_crop.linesize,
369 0, title->height - (job->crop[0] + job->crop[1]),
370 pv->pic_tmp_out.data, pv->pic_tmp_out.linesize);
372 hb_buffer_copy_settings( buf_render, buf_tmp_in );
374 buf_tmp_in = buf_render;
377 /* Set output to render buffer */
378 (*buf_out) = buf_render;
380 if( buf_tmp_in == NULL )
382 /* Teardown and cleanup buffers if we are emitting NULL */
383 if( buf_in && *buf_in )
385 hb_buffer_close( buf_in );
388 if( buf_out && *buf_out )
390 hb_buffer_close( buf_out );
394 else if( buf_tmp_in != buf_render )
396 /* Copy temporary results and settings into render buffer */
397 memcpy( buf_render->data, buf_tmp_in->data, buf_render->size );
398 hb_buffer_copy_settings( buf_render, buf_tmp_in );
401 if (*buf_out && job->vfr)
403 hb_fifo_push( pv->delay_queue, *buf_out );
408 * Keep the last three frames in our queue, this ensures that we have the last
409 * two always in there should we need to rewrite the durations on them.
414 if( hb_fifo_size( pv->delay_queue ) >= 3 )
416 *buf_out = hb_fifo_get( pv->delay_queue );
420 if( *buf_out && job->vfr)
422 /* The current frame exists. That means it hasn't been dropped by a filter.
423 Make it accessible as ivtc_buffer so we can edit its duration if needed. */
424 ivtc_buffer = *buf_out;
426 if( pv->lost_time[3] > 0 )
429 * A frame's been dropped earlier by VFR detelecine.
430 * Gotta make up the lost time. This will also
431 * slow down the video.
432 * The dropped frame's has to be accounted for, so
433 * divvy it up amongst the 4 frames left behind.
434 * This is what the delay_queue is for;
435 * telecined sequences start 2 frames before
436 * the dropped frame, so to slow down the right
437 * ones you need a 2 frame delay between
438 * reading input and writing output.
441 /* We want to extend the outputted frame's duration by the value
442 stored in the 4th slot of the lost_time array. Because we need
443 to adjust all the values in the array so they're contiguous,
444 extend the duration inside the array first, before applying
445 it to the current frame buffer. */
446 pv->last_stop[3] += pv->lost_time[3];
448 /* Log how much time has been added back in to the video. */
449 pv->total_gained_time += pv->lost_time[3];
451 /* We've pulled the 4th value from the lost_time array
452 and added it to the last_stop array's 4th slot. Now, rotate the
453 lost_time array so the 4th slot now holds the 3rd's value, and
454 so on down the line, and set the 0 index to a value of 0. */
456 for( i=2; i >= 0; i--)
458 pv->lost_time[i+1] = pv->lost_time[i];
460 pv->lost_time[0] = 0;
462 /* Log how many frames have had their durations extended. */
463 pv->extended_frames++;
466 /* We can't use the given time stamps. Previous frames
467 might already have been extended, throwing off the
468 raw values fed to render.c. Instead, their
469 stop and start times are stored in arrays.
470 The 4th cached frame will be the to use.
471 If it needed its duration extended to make up
472 lost time, it will have happened above. */
473 ivtc_buffer->start = pv->last_start[3];
474 ivtc_buffer->stop = pv->last_stop[3];
476 /* Set the 3rd cached frame to start when this one stops,
477 and so on down the line. If any of them need to be
478 extended as well to make up lost time, it'll be handled
479 on the next loop through the renderer. */
481 for (i = 2; i >= 0; i--)
483 int temp_duration = pv->last_stop[i] - pv->last_start[i];
484 pv->last_start[i] = pv->last_stop[i+1];
485 pv->last_stop[i] = pv->last_start[i] + temp_duration;
488 /* If we have a pending chapter mark and this frame is at
489 or after the time of the mark, mark this frame & clear
491 if( pv->chapter_time && pv->chapter_time <= ivtc_buffer->start )
493 ivtc_buffer->new_chap = pv->chapter_val;
494 pv->chapter_time = 0;
502 void renderClose( hb_work_object_t * w )
504 hb_work_private_t * pv = w->private_data;
506 hb_log("render: lost time: %lld (%i frames)", pv->total_lost_time, pv->dropped_frames);
507 hb_log("render: gained time: %lld (%i frames) (%lld not accounted for)", pv->total_gained_time, pv->extended_frames, pv->total_lost_time - pv->total_gained_time);
508 if (pv->dropped_frames)
509 hb_log("render: average dropped frame duration: %lld", (pv->total_lost_time / pv->dropped_frames) );
511 /* Cleanup subtitle queue */
512 if( pv->subtitle_queue )
514 hb_fifo_close( &pv->subtitle_queue );
517 if( pv->delay_queue )
519 hb_fifo_close( &pv->delay_queue );
522 /* Cleanup render work structure */
524 w->private_data = NULL;
527 int renderInit( hb_work_object_t * w, hb_job_t * job )
529 /* Allocate new private work object */
530 hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
532 w->private_data = pv;
535 swsflags = SWS_LANCZOS;
537 swsflags |= SWS_ACCURATE_RND;
538 #endif /* __x86_64__ */
540 /* Get title and title size */
541 hb_title_t * title = job->title;
543 /* If crop or scale is specified, setup rescale context */
544 if( job->crop[0] || job->crop[1] || job->crop[2] || job->crop[3] ||
545 job->width != title->width || job->height != title->height )
547 pv->context = sws_getContext(title->width - (job->crop[2] + job->crop[3]),
548 title->height - (job->crop[0] + job->crop[1]),
550 job->width, job->height, PIX_FMT_YUV420P,
551 swsflags, NULL, NULL, NULL);
554 /* Setup FIFO queue for subtitle cache */
555 pv->subtitle_queue = hb_fifo_init( 8 );
556 pv->delay_queue = hb_fifo_init( 8 );
558 /* VFR IVTC needs a bunch of time-keeping variables to track
559 how many frames are dropped, how many are extended, what the
560 last 4 start and stop times were (so they can be modified),
561 how much time has been lost and gained overall, how much time
562 the latest 4 frames should be extended by, and where chapter
563 markers are (so they can be saved if their frames are dropped.) */
564 pv->dropped_frames = 0;
565 pv->extended_frames = 0;
566 pv->last_start[0] = 0;
567 pv->last_stop[0] = 0;
568 pv->total_lost_time = 0;
569 pv->total_gained_time = 0;
570 pv->lost_time[0] = 0; pv->lost_time[1] = 0; pv->lost_time[2] = 0; pv->lost_time[3] = 0;
571 pv->chapter_time = 0;
575 /* TODO: Move to work.c? */
578 int filter_count = hb_list_count( job->filters );
581 for( i = 0; i < filter_count; i++ )
583 hb_filter_object_t * filter = hb_list_item( job->filters, i );
585 if( !filter ) continue;
587 filter->private_data = filter->init( PIX_FMT_YUV420P,