1 /* $Id: render.c,v 1.17 2005/04/14 17:37:54 titer Exp $
3 This file is part of the HandBrake source code.
4 Homepage: <http://handbrake.fr/>.
5 It may be used under the terms of the GNU General Public License. */
10 struct hb_work_private_s
14 struct SwsContext * context;
16 AVPicture pic_tmp_crop;
17 AVPicture pic_tmp_out;
18 hb_buffer_t * buf_scale;
19 hb_fifo_t * subtitle_queue;
20 hb_fifo_t * delay_queue;
23 uint64_t last_start[4];
24 uint64_t last_stop[4];
25 uint64_t lost_time[4];
26 uint64_t total_lost_time;
27 uint64_t total_gained_time;
32 int renderInit( hb_work_object_t *, hb_job_t * );
33 int renderWork( hb_work_object_t *, hb_buffer_t **, hb_buffer_t ** );
34 void renderClose( hb_work_object_t * );
36 hb_work_object_t hb_render =
48 * Utility function that finds where the U is in the YUV sub-picture
50 * The Y data is at the top, followed by U and V, but the U and V
51 * are half the width of the Y, i.e. each chroma element covers 2x2
54 static uint8_t *getU(uint8_t *data, int width, int height, int x, int y)
56 return(&data[(y>>1) * ((width+1)>>1) + (x>>1) + width*height]);
59 static uint8_t *getV(uint8_t *data, int width, int height, int x, int y)
61 int w2 = (width+1) >> 1, h2 = (height+1) >> 1;
62 return(&data[(y>>1) * w2 + (x>>1) + width*height + w2*h2]);
65 static void ApplySub( hb_job_t * job, hb_buffer_t * buf,
68 hb_buffer_t * sub = *_sub;
69 hb_title_t * title = job->title;
70 int i, j, offset_top, offset_left, margin_top, margin_percent;
71 uint8_t * lum, * alpha, * out, * sub_chromaU, * sub_chromaV;
74 * Percent of height of picture that form a margin that subtitles
75 * should not be displayed within.
85 * If necessary, move the subtitle so it is not in a cropped zone.
86 * When it won't fit, we center it so we lose as much on both ends.
87 * Otherwise we try to leave a 20px or 2% margin around it.
89 margin_top = ( ( title->height - job->crop[0] - job->crop[1] ) *
90 margin_percent ) / 100;
95 * A maximum margin of 20px regardless of height of the picture.
100 if( sub->height > title->height - job->crop[0] - job->crop[1] -
104 * The subtitle won't fit in the cropped zone, so center
105 * it vertically so we fit in as much as we can.
107 offset_top = job->crop[0] + ( title->height - job->crop[0] -
108 job->crop[1] - sub->height ) / 2;
110 else if( sub->y < job->crop[0] + margin_top )
113 * The subtitle fits in the cropped zone, but is currently positioned
114 * within our top margin, so move it outside of our margin.
116 offset_top = job->crop[0] + margin_top;
118 else if( sub->y > title->height - job->crop[1] - margin_top - sub->height )
121 * The subtitle fits in the cropped zone, and is not within the top
122 * margin but is within the bottom margin, so move it to be above
125 offset_top = title->height - job->crop[1] - margin_top - sub->height;
130 * The subtitle is fine where it is.
135 if( sub->width > title->width - job->crop[2] - job->crop[3] - 40 )
136 offset_left = job->crop[2] + ( title->width - job->crop[2] -
137 job->crop[3] - sub->width ) / 2;
138 else if( sub->x < job->crop[2] + 20 )
139 offset_left = job->crop[2] + 20;
140 else if( sub->x > title->width - job->crop[3] - 20 - sub->width )
141 offset_left = title->width - job->crop[3] - 20 - sub->width;
143 offset_left = sub->x;
146 alpha = lum + sub->width * sub->height;
147 sub_chromaU = alpha + sub->width * sub->height;
148 sub_chromaV = sub_chromaU + sub->width * sub->height;
150 out = buf->data + offset_top * title->width + offset_left;
152 for( i = 0; i < sub->height; i++ )
154 if( offset_top + i >= 0 && offset_top + i < title->height )
156 for( j = 0; j < sub->width; j++ )
158 if( offset_left + j >= 0 && offset_left + j < title->width )
160 uint8_t *chromaU, *chromaV;
163 * Merge the luminance and alpha with the picture
165 out[j] = ( (uint16_t) out[j] * ( 16 - (uint16_t) alpha[j] ) +
166 (uint16_t) lum[j] * (uint16_t) alpha[j] ) >> 4;
168 * Set the chroma (colour) based on whether there is
169 * any alpha at all. Don't try to blend with the picture.
171 chromaU = getU(buf->data, title->width, title->height,
172 offset_left+j, offset_top+i);
174 chromaV = getV(buf->data, title->width, title->height,
175 offset_left+j, offset_top+i);
180 * Add the chroma from the sub-picture, as this is
181 * not a transparent element.
183 *chromaU = sub_chromaU[j];
184 *chromaV = sub_chromaV[j];
192 sub_chromaU += sub->width;
193 sub_chromaV += sub->width;
197 hb_buffer_close( _sub );
200 int renderWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
201 hb_buffer_t ** buf_out )
203 hb_work_private_t * pv = w->private_data;
204 hb_job_t * job = pv->job;
205 hb_title_t * title = job->title;
206 hb_buffer_t * in = *buf_in, * buf_tmp_in = *buf_in;
207 hb_buffer_t * ivtc_buffer = NULL;
211 /* If the input buffer is end of stream, send out an empty one
212 * to the next stage as well. Note that this will result in us
213 * losing the current contents of the delay queue.
221 * During the indepth_scan ditch the buffers here before applying filters or attempting to
224 if( job->indepth_scan )
230 /* Push subtitles onto queue just in case we need to delay a frame */
233 hb_fifo_push( pv->subtitle_queue, in->sub );
237 hb_fifo_push( pv->subtitle_queue, hb_buffer_init(0) );
240 /* If there's a chapter mark remember it in case we delay or drop its frame */
241 if( in->new_chap && job->vfr )
243 pv->chapter_time = in->start;
244 pv->chapter_val = in->new_chap;
248 /* Setup render buffer */
249 hb_buffer_t * buf_render = hb_video_buffer_init( job->width, job->height );
254 int filter_count = hb_list_count( job->filters );
257 for( i = 0; i < filter_count; i++ )
259 hb_filter_object_t * filter = hb_list_item( job->filters, i );
266 hb_buffer_t * buf_tmp_out = NULL;
268 int result = filter->work( buf_tmp_in,
273 filter->private_data );
276 * FILTER_OK: set temp buffer to filter buffer, continue
277 * FILTER_DELAY: set temp buffer to NULL, abort
278 * FILTER_DROP: set temp buffer to NULL, pop subtitle, abort
279 * FILTER_FAILED: leave temp buffer alone, continue
281 if( result == FILTER_OK )
283 buf_tmp_in = buf_tmp_out;
285 else if( result == FILTER_DELAY )
290 else if( result == FILTER_DROP )
294 /* We need to compensate for the time lost by dropping this frame.
295 Spread its duration out in quarters, because usually dropped frames
296 maintain a 1-out-of-5 pattern and this spreads it out amongst the remaining ones.
297 Store these in the lost_time array, which has 4 slots in it.
298 Because not every frame duration divides evenly by 4, and we can't lose the
299 remainder, we have to go through an awkward process to preserve it in the 4th array index. */
300 uint64_t temp_duration = buf_tmp_out->stop - buf_tmp_out->start;
301 pv->lost_time[0] += (temp_duration / 4);
302 pv->lost_time[1] += (temp_duration / 4);
303 pv->lost_time[2] += (temp_duration / 4);
304 pv->lost_time[3] += ( temp_duration - (temp_duration / 4) - (temp_duration / 4) - (temp_duration / 4) );
306 pv->total_lost_time += temp_duration;
307 pv->dropped_frames++;
309 /* Pop the frame's subtitle and dispose of it. */
310 hb_buffer_t * subtitles = hb_fifo_get( pv->subtitle_queue );
311 hb_buffer_close( &subtitles );
317 buf_tmp_in = buf_tmp_out;
325 /* Cache frame start and stop times, so we can renumber
326 time stamps if dropping frames for VFR. */
328 for( i = 3; i >= 1; i-- )
330 pv->last_start[i] = pv->last_start[i-1];
331 pv->last_stop[i] = pv->last_stop[i-1];
334 /* In order to make sure we have continuous time stamps, store
335 the current frame's duration as starting when the last one stopped. */
336 pv->last_start[0] = pv->last_stop[1];
337 pv->last_stop[0] = pv->last_start[0] + (in->stop - in->start);
340 /* Apply subtitles */
343 hb_buffer_t * subtitles = hb_fifo_get( pv->subtitle_queue );
346 ApplySub( job, buf_tmp_in, &subtitles );
350 /* Apply crop/scale if specified */
351 if( buf_tmp_in && pv->context )
353 avpicture_fill( &pv->pic_tmp_in, buf_tmp_in->data,
355 title->width, title->height );
357 avpicture_fill( &pv->pic_tmp_out, buf_render->data,
359 job->width, job->height );
361 // Crop; this alters the pointer to the data to point to the correct place for cropped frame
362 av_picture_crop( &pv->pic_tmp_crop, &pv->pic_tmp_in, PIX_FMT_YUV420P,
363 job->crop[0], job->crop[2] );
365 // Scale pic_crop into pic_render according to the context set up in renderInit
366 sws_scale(pv->context,
367 pv->pic_tmp_crop.data, pv->pic_tmp_crop.linesize,
368 0, title->height - (job->crop[0] + job->crop[1]),
369 pv->pic_tmp_out.data, pv->pic_tmp_out.linesize);
371 hb_buffer_copy_settings( buf_render, buf_tmp_in );
373 buf_tmp_in = buf_render;
376 /* Set output to render buffer */
377 (*buf_out) = buf_render;
379 if( buf_tmp_in == NULL )
381 /* Teardown and cleanup buffers if we are emitting NULL */
382 if( buf_in && *buf_in )
384 hb_buffer_close( buf_in );
387 if( buf_out && *buf_out )
389 hb_buffer_close( buf_out );
393 else if( buf_tmp_in != buf_render )
395 /* Copy temporary results and settings into render buffer */
396 memcpy( buf_render->data, buf_tmp_in->data, buf_render->size );
397 hb_buffer_copy_settings( buf_render, buf_tmp_in );
400 if (*buf_out && job->vfr)
402 hb_fifo_push( pv->delay_queue, *buf_out );
407 * Keep the last three frames in our queue, this ensures that we have the last
408 * two always in there should we need to rewrite the durations on them.
413 if( hb_fifo_size( pv->delay_queue ) >= 4 )
415 *buf_out = hb_fifo_get( pv->delay_queue );
419 if( *buf_out && job->vfr)
421 /* The current frame exists. That means it hasn't been dropped by a filter.
422 Make it accessible as ivtc_buffer so we can edit its duration if needed. */
423 ivtc_buffer = *buf_out;
425 if( pv->lost_time[3] > 0 )
428 * A frame's been dropped earlier by VFR detelecine.
429 * Gotta make up the lost time. This will also
430 * slow down the video.
431 * The dropped frame's has to be accounted for, so
432 * divvy it up amongst the 4 frames left behind.
433 * This is what the delay_queue is for;
434 * telecined sequences start 2 frames before
435 * the dropped frame, so to slow down the right
436 * ones you need a 2 frame delay between
437 * reading input and writing output.
440 /* We want to extend the outputted frame's duration by the value
441 stored in the 4th slot of the lost_time array. Because we need
442 to adjust all the values in the array so they're contiguous,
443 extend the duration inside the array first, before applying
444 it to the current frame buffer. */
445 pv->last_stop[3] += pv->lost_time[3];
447 /* Log how much time has been added back in to the video. */
448 pv->total_gained_time += pv->lost_time[3];
450 /* We've pulled the 4th value from the lost_time array
451 and added it to the last_stop array's 4th slot. Now, rotate the
452 lost_time array so the 4th slot now holds the 3rd's value, and
453 so on down the line, and set the 0 index to a value of 0. */
455 for( i=2; i >= 0; i--)
457 pv->lost_time[i+1] = pv->lost_time[i];
459 pv->lost_time[0] = 0;
461 /* Log how many frames have had their durations extended. */
462 pv->extended_frames++;
465 /* We can't use the given time stamps. Previous frames
466 might already have been extended, throwing off the
467 raw values fed to render.c. Instead, their
468 stop and start times are stored in arrays.
469 The 4th cached frame will be the to use.
470 If it needed its duration extended to make up
471 lost time, it will have happened above. */
472 ivtc_buffer->start = pv->last_start[3];
473 ivtc_buffer->stop = pv->last_stop[3];
475 /* Set the 3rd cached frame to start when this one stops,
476 and so on down the line. If any of them need to be
477 extended as well to make up lost time, it'll be handled
478 on the next loop through the renderer. */
480 for (i = 2; i >= 0; i--)
482 int temp_duration = pv->last_stop[i] - pv->last_start[i];
483 pv->last_start[i] = pv->last_stop[i+1];
484 pv->last_stop[i] = pv->last_start[i] + temp_duration;
487 /* If we have a pending chapter mark and this frame is at
488 or after the time of the mark, mark this frame & clear
490 if( pv->chapter_time && pv->chapter_time <= ivtc_buffer->start )
492 ivtc_buffer->new_chap = pv->chapter_val;
493 pv->chapter_time = 0;
501 void renderClose( hb_work_object_t * w )
503 hb_work_private_t * pv = w->private_data;
505 hb_log("render: lost time: %lld (%i frames)", pv->total_lost_time, pv->dropped_frames);
506 hb_log("render: gained time: %lld (%i frames) (%lld not accounted for)", pv->total_gained_time, pv->extended_frames, pv->total_lost_time - pv->total_gained_time);
507 if (pv->dropped_frames)
508 hb_log("render: average dropped frame duration: %lld", (pv->total_lost_time / pv->dropped_frames) );
510 /* Cleanup subtitle queue */
511 if( pv->subtitle_queue )
513 hb_fifo_close( &pv->subtitle_queue );
516 if( pv->delay_queue )
518 hb_fifo_close( &pv->delay_queue );
521 /* Cleanup render work structure */
523 w->private_data = NULL;
526 int renderInit( hb_work_object_t * w, hb_job_t * job )
528 /* Allocate new private work object */
529 hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
531 w->private_data = pv;
534 swsflags = SWS_LANCZOS;
536 swsflags |= SWS_ACCURATE_RND;
537 #endif /* __x86_64__ */
539 /* Get title and title size */
540 hb_title_t * title = job->title;
542 /* If crop or scale is specified, setup rescale context */
543 if( job->crop[0] || job->crop[1] || job->crop[2] || job->crop[3] ||
544 job->width != title->width || job->height != title->height )
546 pv->context = sws_getContext(title->width - (job->crop[2] + job->crop[3]),
547 title->height - (job->crop[0] + job->crop[1]),
549 job->width, job->height, PIX_FMT_YUV420P,
550 swsflags, NULL, NULL, NULL);
553 /* Setup FIFO queue for subtitle cache */
554 pv->subtitle_queue = hb_fifo_init( 8 );
555 pv->delay_queue = hb_fifo_init( 8 );
557 /* VFR IVTC needs a bunch of time-keeping variables to track
558 how many frames are dropped, how many are extended, what the
559 last 4 start and stop times were (so they can be modified),
560 how much time has been lost and gained overall, how much time
561 the latest 4 frames should be extended by, and where chapter
562 markers are (so they can be saved if their frames are dropped.) */
563 pv->dropped_frames = 0;
564 pv->extended_frames = 0;
565 pv->last_start[0] = 0;
566 pv->last_stop[0] = 0;
567 pv->total_lost_time = 0;
568 pv->total_gained_time = 0;
569 pv->lost_time[0] = 0; pv->lost_time[1] = 0; pv->lost_time[2] = 0; pv->lost_time[3] = 0;
570 pv->chapter_time = 0;
574 /* TODO: Move to work.c? */
577 int filter_count = hb_list_count( job->filters );
580 for( i = 0; i < filter_count; i++ )
582 hb_filter_object_t * filter = hb_list_item( job->filters, i );
584 if( !filter ) continue;
586 filter->private_data = filter->init( PIX_FMT_YUV420P,