1 /* $Id: render.c,v 1.17 2005/04/14 17:37:54 titer Exp $
3 This file is part of the HandBrake source code.
4 Homepage: <http://handbrake.m0k.org/>.
5 It may be used under the terms of the GNU General Public License. */
9 #include "ffmpeg/avcodec.h"
10 #include "ffmpeg/swscale.h"
12 struct hb_work_private_s
16 struct SwsContext * context;
18 AVPicture pic_tmp_crop;
19 AVPicture pic_tmp_out;
20 hb_buffer_t * buf_scale;
21 hb_fifo_t * subtitle_queue;
22 hb_fifo_t * delay_queue;
25 uint64_t last_start[4];
26 uint64_t last_stop[4];
27 uint64_t lost_time[4];
28 uint64_t total_lost_time;
29 uint64_t total_gained_time;
34 int renderInit( hb_work_object_t *, hb_job_t * );
35 int renderWork( hb_work_object_t *, hb_buffer_t **, hb_buffer_t ** );
36 void renderClose( hb_work_object_t * );
38 hb_work_object_t hb_render =
50 * Utility function that finds where the U is in the YUV sub-picture
52 * The Y data is at the top, followed by U and V, but the U and V
53 * are half the width of the Y, i.e. each chroma element covers 2x2
56 static uint8_t *getU(uint8_t *data, int width, int height, int x, int y)
58 return(&data[(((y/2) * (width/2)) + (x/2)) + (width*height)]);
61 static uint8_t *getV(uint8_t *data, int width, int height, int x, int y)
63 return(&data[(((y/2) * (width/2)) + (x/2)) + (width*height) +
67 static void ApplySub( hb_job_t * job, hb_buffer_t * buf,
70 hb_buffer_t * sub = *_sub;
71 hb_title_t * title = job->title;
72 int i, j, offset_top, offset_left, margin_top, margin_percent;
73 uint8_t * lum, * alpha, * out, * sub_chromaU, * sub_chromaV;
76 * Percent of height of picture that form a margin that subtitles
77 * should not be displayed within.
87 * If necessary, move the subtitle so it is not in a cropped zone.
88 * When it won't fit, we center it so we lose as much on both ends.
89 * Otherwise we try to leave a 20px or 2% margin around it.
91 margin_top = ( ( title->height - job->crop[0] - job->crop[1] ) *
92 margin_percent ) / 100;
97 * A maximum margin of 20px regardless of height of the picture.
102 if( sub->height > title->height - job->crop[0] - job->crop[1] -
106 * The subtitle won't fit in the cropped zone, so center
107 * it vertically so we fit in as much as we can.
109 offset_top = job->crop[0] + ( title->height - job->crop[0] -
110 job->crop[1] - sub->height ) / 2;
112 else if( sub->y < job->crop[0] + margin_top )
115 * The subtitle fits in the cropped zone, but is currently positioned
116 * within our top margin, so move it outside of our margin.
118 offset_top = job->crop[0] + margin_top;
120 else if( sub->y > title->height - job->crop[1] - margin_top - sub->height )
123 * The subtitle fits in the cropped zone, and is not within the top
124 * margin but is within the bottom margin, so move it to be above
127 offset_top = title->height - job->crop[1] - margin_top - sub->height;
132 * The subtitle is fine where it is.
137 if( sub->width > title->width - job->crop[2] - job->crop[3] - 40 )
138 offset_left = job->crop[2] + ( title->width - job->crop[2] -
139 job->crop[3] - sub->width ) / 2;
140 else if( sub->x < job->crop[2] + 20 )
141 offset_left = job->crop[2] + 20;
142 else if( sub->x > title->width - job->crop[3] - 20 - sub->width )
143 offset_left = title->width - job->crop[3] - 20 - sub->width;
145 offset_left = sub->x;
148 alpha = lum + sub->width * sub->height;
149 sub_chromaU = alpha + sub->width * sub->height;
150 sub_chromaV = sub_chromaU + sub->width * sub->height;
152 out = buf->data + offset_top * title->width + offset_left;
154 for( i = 0; i < sub->height; i++ )
156 if( offset_top + i >= 0 && offset_top + i < title->height )
158 for( j = 0; j < sub->width; j++ )
160 if( offset_left + j >= 0 && offset_left + j < title->width )
162 uint8_t *chromaU, *chromaV;
165 * Merge the luminance and alpha with the picture
167 out[j] = ( (uint16_t) out[j] * ( 16 - (uint16_t) alpha[j] ) +
168 (uint16_t) lum[j] * (uint16_t) alpha[j] ) >> 4;
170 * Set the chroma (colour) based on whether there is
171 * any alpha at all. Don't try to blend with the picture.
173 chromaU = getU(buf->data, title->width, title->height,
174 offset_left+j, offset_top+i);
176 chromaV = getV(buf->data, title->width, title->height,
177 offset_left+j, offset_top+i);
182 * Add the chroma from the sub-picture, as this is
183 * not a transparent element.
185 *chromaU = sub_chromaU[j];
186 *chromaV = sub_chromaV[j];
194 sub_chromaU += sub->width;
195 sub_chromaV += sub->width;
199 hb_buffer_close( _sub );
202 int renderWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
203 hb_buffer_t ** buf_out )
205 hb_work_private_t * pv = w->private_data;
206 hb_job_t * job = pv->job;
207 hb_title_t * title = job->title;
208 hb_buffer_t * in = *buf_in, * buf_tmp_in = *buf_in;
209 hb_buffer_t * ivtc_buffer = NULL;
213 /* If the input buffer is end of stream, send out an empty one
214 * to the next stage as well. Note that this will result in us
215 * losing the current contents of the delay queue.
217 *buf_out = hb_buffer_init(0);
222 * During the indepth_scan ditch the buffers here before applying filters or attempting to
225 if( job->indepth_scan )
231 /* Push subtitles onto queue just in case we need to delay a frame */
234 hb_fifo_push( pv->subtitle_queue, in->sub );
238 hb_fifo_push( pv->subtitle_queue, hb_buffer_init(0) );
241 /* If there's a chapter mark remember it in case we delay or drop its frame */
244 pv->chapter_time = in->start;
245 pv->chapter_val = in->new_chap;
249 /* Setup render buffer */
250 hb_buffer_t * buf_render = hb_buffer_init( 3 * job->width * job->height / 2 );
255 int filter_count = hb_list_count( job->filters );
258 for( i = 0; i < filter_count; i++ )
260 hb_filter_object_t * filter = hb_list_item( job->filters, i );
267 hb_buffer_t * buf_tmp_out = NULL;
269 int result = filter->work( buf_tmp_in,
274 filter->private_data );
277 * FILTER_OK: set temp buffer to filter buffer, continue
278 * FILTER_DELAY: set temp buffer to NULL, abort
279 * FILTER_DROP: set temp buffer to NULL, pop subtitle, abort
280 * FILTER_FAILED: leave temp buffer alone, continue
282 if( result == FILTER_OK )
284 buf_tmp_in = buf_tmp_out;
286 else if( result == FILTER_DELAY )
291 else if( result == FILTER_DROP )
295 /* We need to compensate for the time lost by dropping this frame.
296 Spread its duration out in quarters, because usually dropped frames
297 maintain a 1-out-of-5 pattern and this spreads it out amongst the remaining ones.
298 Store these in the lost_time array, which has 4 slots in it.
299 Because not every frame duration divides evenly by 4, and we can't lose the
300 remainder, we have to go through an awkward process to preserve it in the 4th array index. */
301 uint64_t temp_duration = buf_tmp_out->stop - buf_tmp_out->start;
302 pv->lost_time[0] += (temp_duration / 4);
303 pv->lost_time[1] += (temp_duration / 4);
304 pv->lost_time[2] += (temp_duration / 4);
305 pv->lost_time[3] += ( temp_duration - (temp_duration / 4) - (temp_duration / 4) - (temp_duration / 4) );
307 pv->total_lost_time += temp_duration;
308 pv->dropped_frames++;
310 hb_fifo_get( pv->subtitle_queue );
315 buf_tmp_in = buf_tmp_out;
324 /* Cache frame start and stop times, so we can renumber
325 time stamps if dropping frames for VFR. */
327 for( i = 3; i >= 1; i-- )
329 pv->last_start[i] = pv->last_start[i-1];
330 pv->last_stop[i] = pv->last_stop[i-1];
333 /* In order to make sure we have continuous time stamps, store
334 the current frame's duration as starting when the last one stopped. */
335 pv->last_start[0] = pv->last_stop[1];
336 pv->last_stop[0] = pv->last_start[0] + (in->stop - in->start);
339 /* Apply subtitles */
342 hb_buffer_t * subtitles = hb_fifo_get( pv->subtitle_queue );
345 ApplySub( job, buf_tmp_in, &subtitles );
349 /* Apply crop/scale if specified */
350 if( buf_tmp_in && pv->context )
352 avpicture_fill( &pv->pic_tmp_in, buf_tmp_in->data,
354 title->width, title->height );
356 avpicture_fill( &pv->pic_tmp_out, buf_render->data,
358 job->width, job->height );
360 // Crop; this alters the pointer to the data to point to the correct place for cropped frame
361 av_picture_crop( &pv->pic_tmp_crop, &pv->pic_tmp_in, PIX_FMT_YUV420P,
362 job->crop[0], job->crop[2] );
364 // Scale pic_crop into pic_render according to the context set up in renderInit
365 sws_scale(pv->context,
366 pv->pic_tmp_crop.data, pv->pic_tmp_crop.linesize,
367 0, title->height - (job->crop[0] + job->crop[1]),
368 pv->pic_tmp_out.data, pv->pic_tmp_out.linesize);
370 hb_buffer_copy_settings( buf_render, buf_tmp_in );
372 buf_tmp_in = buf_render;
375 /* Set output to render buffer */
376 (*buf_out) = buf_render;
378 if( buf_tmp_in == NULL )
380 /* Teardown and cleanup buffers if we are emitting NULL */
381 if( buf_in && *buf_in )
383 hb_buffer_close( buf_in );
386 if( buf_out && *buf_out )
388 hb_buffer_close( buf_out );
392 else if( buf_tmp_in != buf_render )
394 /* Copy temporary results and settings into render buffer */
395 memcpy( buf_render->data, buf_tmp_in->data, buf_render->size );
396 hb_buffer_copy_settings( buf_render, buf_tmp_in );
401 hb_fifo_push( pv->delay_queue, *buf_out );
406 * Keep the last three frames in our queue, this ensures that we have the last
407 * two always in there should we need to rewrite the durations on them.
409 if( hb_fifo_size( pv->delay_queue ) >= 3 )
411 *buf_out = hb_fifo_get( pv->delay_queue );
416 /* The current frame exists. That means it hasn't been dropped by a filter.
417 Make it accessible as ivtc_buffer so we can edit its duration if needed. */
418 ivtc_buffer = *buf_out;
420 if( pv->lost_time[3] > 0 )
423 * A frame's been dropped earlier by VFR detelecine.
424 * Gotta make up the lost time. This will also
425 * slow down the video.
426 * The dropped frame's has to be accounted for, so
427 * divvy it up amongst the 4 frames left behind.
428 * This is what the delay_queue is for;
429 * telecined sequences start 2 frames before
430 * the dropped frame, so to slow down the right
431 * ones you need a 2 frame delay between
432 * reading input and writing output.
435 /* We want to extend the outputted frame's duration by the value
436 stored in the 4th slot of the lost_time array. Because we need
437 to adjust all the values in the array so they're contiguous,
438 extend the duration inside the array first, before applying
439 it to the current frame buffer. */
440 pv->last_stop[3] += pv->lost_time[3];
442 /* Log how much time has been added back in to the video. */
443 pv->total_gained_time += pv->lost_time[3];
445 /* We've pulled the 4th value from the lost_time array
446 and added it to the last_stop array's 4th slot. Now, rotate the
447 lost_time array so the 4th slot now holds the 3rd's value, and
448 so on down the line, and set the 0 index to a value of 0. */
450 for( i=2; i >= 0; i--)
452 pv->lost_time[i+1] = pv->lost_time[i];
454 pv->lost_time[0] = 0;
456 /* Log how many frames have had their durations extended. */
457 pv->extended_frames++;
460 /* We can't use the given time stamps. Previous frames
461 might already have been extended, throwing off the
462 raw values fed to render.c. Instead, their
463 stop and start times are stored in arrays.
464 The 4th cached frame will be the to use.
465 If it needed its duration extended to make up
466 lost time, it will have happened above. */
467 ivtc_buffer->start = pv->last_start[3];
468 ivtc_buffer->stop = pv->last_stop[3];
470 /* Set the 3rd cached frame to start when this one stops,
471 and so on down the line. If any of them need to be
472 extended as well to make up lost time, it'll be handled
473 on the next loop through the renderer. */
475 for (i = 2; i >= 0; i--)
477 int temp_duration = pv->last_stop[i] - pv->last_start[i];
478 pv->last_start[i] = pv->last_stop[i+1];
479 pv->last_stop[i] = pv->last_start[i] + temp_duration;
482 /* If we have a pending chapter mark and this frame is at
483 or after the time of the mark, mark this frame & clear
485 if( pv->chapter_time && pv->chapter_time <= ivtc_buffer->start )
487 ivtc_buffer->new_chap = pv->chapter_val;
488 pv->chapter_time = 0;
496 void renderClose( hb_work_object_t * w )
498 hb_work_private_t * pv = w->private_data;
500 hb_log("render: lost time: %lld (%i frames)", pv->total_lost_time, pv->dropped_frames);
501 hb_log("render: gained time: %lld (%i frames) (%lld not accounted for)", pv->total_gained_time, pv->extended_frames, pv->total_lost_time - pv->total_gained_time);
502 if (pv->dropped_frames)
503 hb_log("render: average dropped frame duration: %lld", (pv->total_lost_time / pv->dropped_frames) );
505 /* Cleanup subtitle queue */
506 if( pv->subtitle_queue )
508 hb_fifo_close( &pv->subtitle_queue );
511 if( pv->delay_queue )
513 hb_fifo_close( &pv->delay_queue );
516 /* Cleanup render work structure */
518 w->private_data = NULL;
521 int renderInit( hb_work_object_t * w, hb_job_t * job )
523 /* Allocate new private work object */
524 hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
526 w->private_data = pv;
528 /* Get title and title size */
529 hb_title_t * title = job->title;
531 /* If crop or scale is specified, setup rescale context */
532 if( job->crop[0] || job->crop[1] || job->crop[2] || job->crop[3] ||
533 job->width != title->width || job->height != title->height )
535 pv->context = sws_getContext(title->width - (job->crop[2] + job->crop[3]),
536 title->height - (job->crop[0] + job->crop[1]),
538 job->width, job->height, PIX_FMT_YUV420P,
539 (uint16_t)(SWS_LANCZOS|SWS_ACCURATE_RND), NULL, NULL, NULL);
542 /* Setup FIFO queue for subtitle cache */
543 pv->subtitle_queue = hb_fifo_init( 8 );
544 pv->delay_queue = hb_fifo_init( 8 );
546 /* VFR IVTC needs a bunch of time-keeping variables to track
547 how many frames are dropped, how many are extended, what the
548 last 4 start and stop times were (so they can be modified),
549 how much time has been lost and gained overall, how much time
550 the latest 4 frames should be extended by, and where chapter
551 markers are (so they can be saved if their frames are dropped.) */
552 pv->dropped_frames = 0;
553 pv->extended_frames = 0;
554 pv->last_start[0] = 0;
555 pv->last_stop[0] = 0;
556 pv->total_lost_time = 0;
557 pv->total_gained_time = 0;
558 pv->lost_time[0] = 0; pv->lost_time[1] = 0; pv->lost_time[2] = 0; pv->lost_time[3] = 0;
559 pv->chapter_time = 0;
563 /* TODO: Move to work.c? */
566 int filter_count = hb_list_count( job->filters );
569 for( i = 0; i < filter_count; i++ )
571 hb_filter_object_t * filter = hb_list_item( job->filters, i );
573 if( !filter ) continue;
575 filter->private_data = filter->init( PIX_FMT_YUV420P,