mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
32 #include "avcodec.h"
33 #include "dsputil.h"
34 #include "internal.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
37 #include "mjpegenc.h"
38 #include "msmpeg4.h"
39 #include "faandct.h"
40 #include "xvmc_internal.h"
41 #include "thread.h"
42 #include <limits.h>
43 
44 //#undef NDEBUG
45 //#include <assert.h>
46 
48  DCTELEM *block, int n, int qscale);
50  DCTELEM *block, int n, int qscale);
52  DCTELEM *block, int n, int qscale);
54  DCTELEM *block, int n, int qscale);
56  DCTELEM *block, int n, int qscale);
58  DCTELEM *block, int n, int qscale);
60  DCTELEM *block, int n, int qscale);
61 
62 
63 /* enable all paranoid tests for rounding, overflows, etc... */
64 //#define PARANOID
65 
66 //#define DEBUG
67 
68 
69 static const uint8_t ff_default_chroma_qscale_table[32] = {
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
71  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
72  16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
73 };
74 
75 const uint8_t ff_mpeg1_dc_scale_table[128] = {
76 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
77  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
84  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
85 };
86 
87 static const uint8_t mpeg2_dc_scale_table1[128] = {
88 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
89  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
96  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
97 };
98 
99 static const uint8_t mpeg2_dc_scale_table2[128] = {
100 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
101  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
109 };
110 
111 static const uint8_t mpeg2_dc_scale_table3[128] = {
112 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
113  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 };
122 
123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
128 };
129 
133 };
134 
141 };
142 
143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
144  const uint8_t *end,
145  uint32_t * restrict state)
146 {
147  int i;
148 
149  assert(p <= end);
150  if (p >= end)
151  return end;
152 
153  for (i = 0; i < 3; i++) {
154  uint32_t tmp = *state << 8;
155  *state = tmp + *(p++);
156  if (tmp == 0x100 || p == end)
157  return p;
158  }
159 
160  while (p < end) {
161  if (p[-1] > 1 ) p += 3;
162  else if (p[-2] ) p += 2;
163  else if (p[-3]|(p[-1]-1)) p++;
164  else {
165  p++;
166  break;
167  }
168  }
169 
170  p = FFMIN(p, end) - 4;
171  *state = AV_RB32(p);
172 
173  return p + 4;
174 }
175 
176 /* init common dct for both encoder and decoder */
178 {
179  dsputil_init(&s->dsp, s->avctx);
180 
186  if (s->flags & CODEC_FLAG_BITEXACT)
189 
190 #if HAVE_MMX
192 #elif ARCH_ALPHA
194 #elif CONFIG_MLIB
196 #elif HAVE_MMI
198 #elif ARCH_ARM
200 #elif HAVE_ALTIVEC
202 #elif ARCH_BFIN
204 #endif
205 
206  /* load & permutate scantables
207  * note: only wmv uses different ones
208  */
209  if (s->alternate_scan) {
212  } else {
215  }
218 
219  return 0;
220 }
221 
223 {
224  *dst = *src;
225  dst->f.type = FF_BUFFER_TYPE_COPY;
226 }
227 
232 {
233  /* Windows Media Image codecs allocate internal buffers with different
234  * dimensions; ignore user defined callbacks for these
235  */
238  else
241 }
242 
247 {
248  int r;
249 
250  if (s->avctx->hwaccel) {
251  assert(!pic->f.hwaccel_picture_private);
252  if (s->avctx->hwaccel->priv_data_size) {
254  if (!pic->f.hwaccel_picture_private) {
255  av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
256  return -1;
257  }
258  }
259  }
260 
262  r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
263  else
264  r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
265 
266  if (r < 0 || !pic->f.type || !pic->f.data[0]) {
267  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
268  r, pic->f.type, pic->f.data[0]);
270  return -1;
271  }
272 
273  if (s->linesize && (s->linesize != pic->f.linesize[0] ||
274  s->uvlinesize != pic->f.linesize[1])) {
276  "get_buffer() failed (stride changed)\n");
277  free_frame_buffer(s, pic);
278  return -1;
279  }
280 
281  if (pic->f.linesize[1] != pic->f.linesize[2]) {
283  "get_buffer() failed (uv stride mismatch)\n");
284  free_frame_buffer(s, pic);
285  return -1;
286  }
287 
288  return 0;
289 }
290 
295 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
296 {
297  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
298 
299  // the + 1 is needed so memset(,,stride*height) does not sig11
300 
301  const int mb_array_size = s->mb_stride * s->mb_height;
302  const int b8_array_size = s->b8_stride * s->mb_height * 2;
303  const int b4_array_size = s->b4_stride * s->mb_height * 4;
304  int i;
305  int r = -1;
306 
307  if (shared) {
308  assert(pic->f.data[0]);
309  assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
311  } else {
312  assert(!pic->f.data[0]);
313 
314  if (alloc_frame_buffer(s, pic) < 0)
315  return -1;
316 
317  s->linesize = pic->f.linesize[0];
318  s->uvlinesize = pic->f.linesize[1];
319  }
320 
321  if (pic->f.qscale_table == NULL) {
322  if (s->encoding) {
323  FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
324  mb_array_size * sizeof(int16_t), fail)
326  mb_array_size * sizeof(int16_t), fail)
328  mb_array_size * sizeof(int8_t ), fail)
329  }
330 
332  mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
334  (big_mb_num + s->mb_stride) * sizeof(uint8_t),
335  fail)
337  (big_mb_num + s->mb_stride) * sizeof(uint32_t),
338  fail)
339  pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
340  pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
341  if (s->out_format == FMT_H264) {
342  for (i = 0; i < 2; i++) {
344  2 * (b4_array_size + 4) * sizeof(int16_t),
345  fail)
346  pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
347  FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
348  4 * mb_array_size * sizeof(uint8_t), fail)
349  }
350  pic->f.motion_subsample_log2 = 2;
351  } else if (s->out_format == FMT_H263 || s->encoding ||
352  (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
353  for (i = 0; i < 2; i++) {
355  2 * (b8_array_size + 4) * sizeof(int16_t),
356  fail)
357  pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
358  FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
359  4 * mb_array_size * sizeof(uint8_t), fail)
360  }
361  pic->f.motion_subsample_log2 = 3;
362  }
363  if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
365  64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
366  }
367  pic->f.qstride = s->mb_stride;
369  1 * sizeof(AVPanScan), fail)
370  }
371 
372  pic->owner2 = s;
373 
374  return 0;
375 fail: // for the FF_ALLOCZ_OR_GOTO macro
376  if (r >= 0)
377  free_frame_buffer(s, pic);
378  return -1;
379 }
380 
384 static void free_picture(MpegEncContext *s, Picture *pic)
385 {
386  int i;
387 
388  if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
389  free_frame_buffer(s, pic);
390  }
391 
392  av_freep(&pic->mb_var);
393  av_freep(&pic->mc_mb_var);
394  av_freep(&pic->mb_mean);
395  av_freep(&pic->f.mbskip_table);
397  av_freep(&pic->mb_type_base);
398  av_freep(&pic->f.dct_coeff);
399  av_freep(&pic->f.pan_scan);
400  pic->f.mb_type = NULL;
401  for (i = 0; i < 2; i++) {
402  av_freep(&pic->motion_val_base[i]);
403  av_freep(&pic->f.ref_index[i]);
404  }
405 
406  if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
407  for (i = 0; i < 4; i++) {
408  pic->f.base[i] =
409  pic->f.data[i] = NULL;
410  }
411  pic->f.type = 0;
412  }
413 }
414 
416 {
417  int y_size = s->b8_stride * (2 * s->mb_height + 1);
418  int c_size = s->mb_stride * (s->mb_height + 1);
419  int yc_size = y_size + 2 * c_size;
420  int i;
421 
422  // edge emu needs blocksize + filter length - 1
423  // (= 17x17 for halfpel / 21x21 for h264)
425  (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
426 
427  // FIXME should be linesize instead of s->width * 2
428  // but that is not known before get_buffer()
430  (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
431  s->me.temp = s->me.scratchpad;
432  s->rd_scratchpad = s->me.scratchpad;
433  s->b_scratchpad = s->me.scratchpad;
434  s->obmc_scratchpad = s->me.scratchpad + 16;
435  if (s->encoding) {
436  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
437  ME_MAP_SIZE * sizeof(uint32_t), fail)
439  ME_MAP_SIZE * sizeof(uint32_t), fail)
440  if (s->avctx->noise_reduction) {
442  2 * 64 * sizeof(int), fail)
443  }
444  }
445  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
446  s->block = s->blocks[0];
447 
448  for (i = 0; i < 12; i++) {
449  s->pblocks[i] = &s->block[i];
450  }
451 
452  if (s->out_format == FMT_H263) {
453  /* ac values */
455  yc_size * sizeof(int16_t) * 16, fail);
456  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
457  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
458  s->ac_val[2] = s->ac_val[1] + c_size;
459  }
460 
461  return 0;
462 fail:
463  return -1; // free() through MPV_common_end()
464 }
465 
467 {
468  if (s == NULL)
469  return;
470 
472  av_freep(&s->me.scratchpad);
473  s->me.temp =
474  s->rd_scratchpad =
475  s->b_scratchpad =
476  s->obmc_scratchpad = NULL;
477 
478  av_freep(&s->dct_error_sum);
479  av_freep(&s->me.map);
480  av_freep(&s->me.score_map);
481  av_freep(&s->blocks);
482  av_freep(&s->ac_val_base);
483  s->block = NULL;
484 }
485 
487 {
488 #define COPY(a) bak->a = src->a
489  COPY(edge_emu_buffer);
490  COPY(me.scratchpad);
491  COPY(me.temp);
492  COPY(rd_scratchpad);
493  COPY(b_scratchpad);
494  COPY(obmc_scratchpad);
495  COPY(me.map);
496  COPY(me.score_map);
497  COPY(blocks);
498  COPY(block);
499  COPY(start_mb_y);
500  COPY(end_mb_y);
501  COPY(me.map_generation);
502  COPY(pb);
503  COPY(dct_error_sum);
504  COPY(dct_count[0]);
505  COPY(dct_count[1]);
506  COPY(ac_val_base);
507  COPY(ac_val[0]);
508  COPY(ac_val[1]);
509  COPY(ac_val[2]);
510 #undef COPY
511 }
512 
514 {
515  MpegEncContext bak;
516  int i;
517  // FIXME copy only needed parts
518  // START_TIMER
519  backup_duplicate_context(&bak, dst);
520  memcpy(dst, src, sizeof(MpegEncContext));
521  backup_duplicate_context(dst, &bak);
522  for (i = 0; i < 12; i++) {
523  dst->pblocks[i] = &dst->block[i];
524  }
525  // STOP_TIMER("update_duplicate_context")
526  // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
527 }
528 
530  const AVCodecContext *src)
531 {
532  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
533 
534  if (dst == src || !s1->context_initialized)
535  return 0;
536 
537  // FIXME can parameters change on I-frames?
538  // in that case dst may need a reinit
539  if (!s->context_initialized) {
540  memcpy(s, s1, sizeof(MpegEncContext));
541 
542  s->avctx = dst;
545  s->bitstream_buffer = NULL;
547 
548  MPV_common_init(s);
549  }
550 
551  s->avctx->coded_height = s1->avctx->coded_height;
552  s->avctx->coded_width = s1->avctx->coded_width;
553  s->avctx->width = s1->avctx->width;
554  s->avctx->height = s1->avctx->height;
555 
556  s->coded_picture_number = s1->coded_picture_number;
557  s->picture_number = s1->picture_number;
558  s->input_picture_number = s1->input_picture_number;
559 
560  memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
561  memcpy(&s->last_picture, &s1->last_picture,
562  (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
563 
564  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
565  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
566  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
567 
568  // Error/bug resilience
569  s->next_p_frame_damaged = s1->next_p_frame_damaged;
570  s->workaround_bugs = s1->workaround_bugs;
571 
572  // MPEG4 timing info
573  memcpy(&s->time_increment_bits, &s1->time_increment_bits,
574  (char *) &s1->shape - (char *) &s1->time_increment_bits);
575 
576  // B-frame info
577  s->max_b_frames = s1->max_b_frames;
578  s->low_delay = s1->low_delay;
579  s->dropable = s1->dropable;
580 
581  // DivX handling (doesn't work)
582  s->divx_packed = s1->divx_packed;
583 
584  if (s1->bitstream_buffer) {
585  if (s1->bitstream_buffer_size +
589  s1->allocated_bitstream_buffer_size);
590  s->bitstream_buffer_size = s1->bitstream_buffer_size;
591  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
592  s1->bitstream_buffer_size);
593  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
595  }
596 
597  // MPEG2/interlacing info
598  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
599  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
600 
601  if (!s1->first_field) {
602  s->last_pict_type = s1->pict_type;
603  if (s1->current_picture_ptr)
604  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
605 
606  if (s1->pict_type != AV_PICTURE_TYPE_B) {
607  s->last_non_b_pict_type = s1->pict_type;
608  }
609  }
610 
611  return 0;
612 }
613 
621 {
622  s->y_dc_scale_table =
625  s->progressive_frame = 1;
626  s->progressive_sequence = 1;
628 
629  s->coded_picture_number = 0;
630  s->picture_number = 0;
631  s->input_picture_number = 0;
632 
633  s->picture_in_gop_number = 0;
634 
635  s->f_code = 1;
636  s->b_code = 1;
637 
638  s->picture_range_start = 0;
640 
641  s->slice_context_count = 1;
642 }
643 
650 {
652 }
653 
659 {
660  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
661  int nb_slices = (HAVE_THREADS &&
663  s->avctx->thread_count : 1;
664 
665  if (s->encoding && s->avctx->slices)
666  nb_slices = s->avctx->slices;
667 
669  s->mb_height = (s->height + 31) / 32 * 2;
670  else if (s->codec_id != CODEC_ID_H264)
671  s->mb_height = (s->height + 15) / 16;
672 
673  if (s->avctx->pix_fmt == PIX_FMT_NONE) {
675  "decoding to PIX_FMT_NONE is not supported.\n");
676  return -1;
677  }
678 
679  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
680  int max_slices;
681  if (s->mb_height)
682  max_slices = FFMIN(MAX_THREADS, s->mb_height);
683  else
684  max_slices = MAX_THREADS;
685  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
686  " reducing to %d\n", nb_slices, max_slices);
687  nb_slices = max_slices;
688  }
689 
690  if ((s->width || s->height) &&
691  av_image_check_size(s->width, s->height, 0, s->avctx))
692  return -1;
693 
695 
696  s->flags = s->avctx->flags;
697  s->flags2 = s->avctx->flags2;
698 
699  if (s->width && s->height) {
700  s->mb_width = (s->width + 15) / 16;
701  s->mb_stride = s->mb_width + 1;
702  s->b8_stride = s->mb_width * 2 + 1;
703  s->b4_stride = s->mb_width * 4 + 1;
704  mb_array_size = s->mb_height * s->mb_stride;
705  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
706 
707  /* set chroma shifts */
709  &s->chroma_y_shift);
710 
711  /* set default edge pos, will be overriden
712  * in decode_header if needed */
713  s->h_edge_pos = s->mb_width * 16;
714  s->v_edge_pos = s->mb_height * 16;
715 
716  s->mb_num = s->mb_width * s->mb_height;
717 
718  s->block_wrap[0] =
719  s->block_wrap[1] =
720  s->block_wrap[2] =
721  s->block_wrap[3] = s->b8_stride;
722  s->block_wrap[4] =
723  s->block_wrap[5] = s->mb_stride;
724 
725  y_size = s->b8_stride * (2 * s->mb_height + 1);
726  c_size = s->mb_stride * (s->mb_height + 1);
727  yc_size = y_size + 2 * c_size;
728 
729  /* convert fourcc to upper case */
731 
733 
735 
736  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
737  fail); // error ressilience code looks cleaner with this
738  for (y = 0; y < s->mb_height; y++)
739  for (x = 0; x < s->mb_width; x++)
740  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
741 
742  s->mb_index2xy[s->mb_height * s->mb_width] =
743  (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
744 
745  if (s->encoding) {
746  /* Allocate MV tables */
748  mv_table_size * 2 * sizeof(int16_t), fail);
750  mv_table_size * 2 * sizeof(int16_t), fail);
752  mv_table_size * 2 * sizeof(int16_t), fail);
754  mv_table_size * 2 * sizeof(int16_t), fail);
756  mv_table_size * 2 * sizeof(int16_t), fail);
758  mv_table_size * 2 * sizeof(int16_t), fail);
759  s->p_mv_table = s->p_mv_table_base +
760  s->mb_stride + 1;
762  s->mb_stride + 1;
764  s->mb_stride + 1;
766  s->mb_stride + 1;
768  s->mb_stride + 1;
770  s->mb_stride + 1;
771 
772  if (s->msmpeg4_version) {
774  2 * 2 * (MAX_LEVEL + 1) *
775  (MAX_RUN + 1) * 2 * sizeof(int), fail);
776  }
777  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
778 
779  /* Allocate MB type table */
780  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
781  sizeof(uint16_t), fail); // needed for encoding
782 
783  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
784  sizeof(int), fail);
785 
787  64 * 32 * sizeof(int), fail);
789  64 * 32 * sizeof(int), fail);
791  64 * 32 * 2 * sizeof(uint16_t), fail);
793  64 * 32 * 2 * sizeof(uint16_t), fail);
795  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
797  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
798 
799  if (s->avctx->noise_reduction) {
801  2 * 64 * sizeof(uint16_t), fail);
802  }
803  }
804  }
805 
808  s->picture_count * sizeof(Picture), fail);
809  for (i = 0; i < s->picture_count; i++) {
811  }
812 
813  if (s->width && s->height) {
815  mb_array_size * sizeof(uint8_t), fail);
816 
817  if (s->codec_id == CODEC_ID_MPEG4 ||
819  /* interlaced direct mode decoding tables */
820  for (i = 0; i < 2; i++) {
821  int j, k;
822  for (j = 0; j < 2; j++) {
823  for (k = 0; k < 2; k++) {
825  s->b_field_mv_table_base[i][j][k],
826  mv_table_size * 2 * sizeof(int16_t),
827  fail);
828  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
829  s->mb_stride + 1;
830  }
832  mb_array_size * 2 * sizeof(uint8_t),
833  fail);
835  mv_table_size * 2 * sizeof(int16_t),
836  fail);
837  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
838  + s->mb_stride + 1;
839  }
841  mb_array_size * 2 * sizeof(uint8_t),
842  fail);
843  }
844  }
845  if (s->out_format == FMT_H263) {
846  /* cbp values */
847  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
848  s->coded_block = s->coded_block_base + s->b8_stride + 1;
849 
850  /* cbp, ac_pred, pred_dir */
852  mb_array_size * sizeof(uint8_t), fail);
854  mb_array_size * sizeof(uint8_t), fail);
855  }
856 
857  if (s->h263_pred || s->h263_plus || !s->encoding) {
858  /* dc values */
859  // MN: we need these for error resilience of intra-frames
861  yc_size * sizeof(int16_t), fail);
862  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
863  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
864  s->dc_val[2] = s->dc_val[1] + c_size;
865  for (i = 0; i < yc_size; i++)
866  s->dc_val_base[i] = 1024;
867  }
868 
869  /* which mb is a intra block */
870  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
871  memset(s->mbintra_table, 1, mb_array_size);
872 
873  /* init macroblock skip table */
874  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
875  // Note the + 1 is for a quicker mpeg4 slice_end detection
876 
877  s->parse_context.state = -1;
879  s->avctx->debug_mv) {
880  s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
881  2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
882  s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
883  2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
884  s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
885  2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
886  }
887  }
888 
889  s->context_initialized = 1;
890  s->thread_context[0] = s;
891 
892  if (s->width && s->height) {
893  if (nb_slices > 1) {
894  for (i = 1; i < nb_slices; i++) {
895  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
896  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
897  }
898 
899  for (i = 0; i < nb_slices; i++) {
900  if (init_duplicate_context(s->thread_context[i], s) < 0)
901  goto fail;
902  s->thread_context[i]->start_mb_y =
903  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
904  s->thread_context[i]->end_mb_y =
905  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
906  }
907  } else {
908  if (init_duplicate_context(s, s) < 0)
909  goto fail;
910  s->start_mb_y = 0;
911  s->end_mb_y = s->mb_height;
912  }
913  s->slice_context_count = nb_slices;
914  }
915 
916  return 0;
917  fail:
918  MPV_common_end(s);
919  return -1;
920 }
921 
922 /* init common structure for both encoder and decoder */
924 {
925  int i, j, k;
926 
927  if (s->slice_context_count > 1) {
928  for (i = 0; i < s->slice_context_count; i++) {
930  }
931  for (i = 1; i < s->slice_context_count; i++) {
932  av_freep(&s->thread_context[i]);
933  }
934  s->slice_context_count = 1;
935  } else free_duplicate_context(s);
936 
938  s->parse_context.buffer_size = 0;
939 
940  av_freep(&s->mb_type);
947  s->p_mv_table = NULL;
948  s->b_forw_mv_table = NULL;
949  s->b_back_mv_table = NULL;
952  s->b_direct_mv_table = NULL;
953  for (i = 0; i < 2; i++) {
954  for (j = 0; j < 2; j++) {
955  for (k = 0; k < 2; k++) {
956  av_freep(&s->b_field_mv_table_base[i][j][k]);
957  s->b_field_mv_table[i][j][k] = NULL;
958  }
959  av_freep(&s->b_field_select_table[i][j]);
960  av_freep(&s->p_field_mv_table_base[i][j]);
961  s->p_field_mv_table[i][j] = NULL;
962  }
964  }
965 
966  av_freep(&s->dc_val_base);
968  av_freep(&s->mbintra_table);
969  av_freep(&s->cbp_table);
971 
972  av_freep(&s->mbskip_table);
975 
976  av_freep(&s->avctx->stats_out);
977  av_freep(&s->ac_stats);
979  av_freep(&s->mb_index2xy);
980  av_freep(&s->lambda_table);
985  av_freep(&s->input_picture);
987  av_freep(&s->dct_offset);
988 
989  if (s->picture && !s->avctx->internal->is_copy) {
990  for (i = 0; i < s->picture_count; i++) {
991  free_picture(s, &s->picture[i]);
992  }
993  }
994  av_freep(&s->picture);
995  s->context_initialized = 0;
996  s->last_picture_ptr =
997  s->next_picture_ptr =
999  s->linesize = s->uvlinesize = 0;
1000 
1001  for (i = 0; i < 3; i++)
1003 
1006 }
1007 
1008 void init_rl(RLTable *rl,
1009  uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1010 {
1011  int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1012  uint8_t index_run[MAX_RUN + 1];
1013  int last, run, level, start, end, i;
1014 
1015  /* If table is static, we can quit if rl->max_level[0] is not NULL */
1016  if (static_store && rl->max_level[0])
1017  return;
1018 
1019  /* compute max_level[], max_run[] and index_run[] */
1020  for (last = 0; last < 2; last++) {
1021  if (last == 0) {
1022  start = 0;
1023  end = rl->last;
1024  } else {
1025  start = rl->last;
1026  end = rl->n;
1027  }
1028 
1029  memset(max_level, 0, MAX_RUN + 1);
1030  memset(max_run, 0, MAX_LEVEL + 1);
1031  memset(index_run, rl->n, MAX_RUN + 1);
1032  for (i = start; i < end; i++) {
1033  run = rl->table_run[i];
1034  level = rl->table_level[i];
1035  if (index_run[run] == rl->n)
1036  index_run[run] = i;
1037  if (level > max_level[run])
1038  max_level[run] = level;
1039  if (run > max_run[level])
1040  max_run[level] = run;
1041  }
1042  if (static_store)
1043  rl->max_level[last] = static_store[last];
1044  else
1045  rl->max_level[last] = av_malloc(MAX_RUN + 1);
1046  memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1047  if (static_store)
1048  rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1049  else
1050  rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1051  memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1052  if (static_store)
1053  rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1054  else
1055  rl->index_run[last] = av_malloc(MAX_RUN + 1);
1056  memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1057  }
1058 }
1059 
1061 {
1062  int i, q;
1063 
1064  for (q = 0; q < 32; q++) {
1065  int qmul = q * 2;
1066  int qadd = (q - 1) | 1;
1067 
1068  if (q == 0) {
1069  qmul = 1;
1070  qadd = 0;
1071  }
1072  for (i = 0; i < rl->vlc.table_size; i++) {
1073  int code = rl->vlc.table[i][0];
1074  int len = rl->vlc.table[i][1];
1075  int level, run;
1076 
1077  if (len == 0) { // illegal code
1078  run = 66;
1079  level = MAX_LEVEL;
1080  } else if (len < 0) { // more bits needed
1081  run = 0;
1082  level = code;
1083  } else {
1084  if (code == rl->n) { // esc
1085  run = 66;
1086  level = 0;
1087  } else {
1088  run = rl->table_run[code] + 1;
1089  level = rl->table_level[code] * qmul + qadd;
1090  if (code >= rl->last) run += 192;
1091  }
1092  }
1093  rl->rl_vlc[q][i].len = len;
1094  rl->rl_vlc[q][i].level = level;
1095  rl->rl_vlc[q][i].run = run;
1096  }
1097  }
1098 }
1099 
1100 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1101 {
1102  int i;
1103 
1104  /* release non reference frames */
1105  for (i = 0; i < s->picture_count; i++) {
1106  if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1107  (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1108  (remove_current || &s->picture[i] != s->current_picture_ptr)
1109  /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1110  free_frame_buffer(s, &s->picture[i]);
1111  }
1112  }
1113 }
1114 
1116 {
1117  int i;
1118 
1119  if (shared) {
1120  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1121  if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1122  return i;
1123  }
1124  } else {
1125  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1126  if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1127  return i; // FIXME
1128  }
1129  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1130  if (s->picture[i].f.data[0] == NULL)
1131  return i;
1132  }
1133  }
1134 
1135  return AVERROR_INVALIDDATA;
1136 }
1137 
1139 {
1140  int intra, i;
1141 
1142  for (intra = 0; intra < 2; intra++) {
1143  if (s->dct_count[intra] > (1 << 16)) {
1144  for (i = 0; i < 64; i++) {
1145  s->dct_error_sum[intra][i] >>= 1;
1146  }
1147  s->dct_count[intra] >>= 1;
1148  }
1149 
1150  for (i = 0; i < 64; i++) {
1151  s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1152  s->dct_count[intra] +
1153  s->dct_error_sum[intra][i] / 2) /
1154  (s->dct_error_sum[intra][i] + 1);
1155  }
1156  }
1157 }
1158 
1164 {
1165  int i;
1166  Picture *pic;
1167  s->mb_skipped = 0;
1168 
1169  assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
1170  s->codec_id == CODEC_ID_SVQ3);
1171 
1172  /* mark & release old frames */
1173  if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
1174  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1176  s->last_picture_ptr->f.data[0]) {
1177  if (s->last_picture_ptr->owner2 == s)
1179  }
1180 
1181  /* release forgotten pictures */
1182  /* if (mpeg124/h263) */
1183  if (!s->encoding) {
1184  for (i = 0; i < s->picture_count; i++) {
1185  if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1186  &s->picture[i] != s->last_picture_ptr &&
1187  &s->picture[i] != s->next_picture_ptr &&
1188  s->picture[i].f.reference) {
1189  if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1190  av_log(avctx, AV_LOG_ERROR,
1191  "releasing zombie picture\n");
1192  free_frame_buffer(s, &s->picture[i]);
1193  }
1194  }
1195  }
1196  }
1197 
1198  if (!s->encoding) {
1200 
1201  if (s->current_picture_ptr &&
1202  s->current_picture_ptr->f.data[0] == NULL) {
1203  // we already have a unused image
1204  // (maybe it was set before reading the header)
1205  pic = s->current_picture_ptr;
1206  } else {
1207  i = ff_find_unused_picture(s, 0);
1208  pic = &s->picture[i];
1209  }
1210 
1211  pic->f.reference = 0;
1212  if (!s->dropable) {
1213  if (s->codec_id == CODEC_ID_H264)
1214  pic->f.reference = s->picture_structure;
1215  else if (s->pict_type != AV_PICTURE_TYPE_B)
1216  pic->f.reference = 3;
1217  }
1218 
1220 
1221  if (ff_alloc_picture(s, pic, 0) < 0)
1222  return -1;
1223 
1224  s->current_picture_ptr = pic;
1225  // FIXME use only the vars from current_pic
1227  if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
1228  s->codec_id == CODEC_ID_MPEG2VIDEO) {
1229  if (s->picture_structure != PICT_FRAME)
1232  }
1236  }
1237 
1239  // if (s->flags && CODEC_FLAG_QSCALE)
1240  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1242 
1244 
1245  if (s->pict_type != AV_PICTURE_TYPE_B) {
1247  if (!s->dropable)
1249  }
1250  /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1251  s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1252  s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1253  s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1254  s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1255  s->pict_type, s->dropable); */
1256 
1257  if (s->codec_id != CODEC_ID_H264) {
1258  if ((s->last_picture_ptr == NULL ||
1259  s->last_picture_ptr->f.data[0] == NULL) &&
1260  (s->pict_type != AV_PICTURE_TYPE_I ||
1261  s->picture_structure != PICT_FRAME)) {
1262  if (s->pict_type != AV_PICTURE_TYPE_I)
1263  av_log(avctx, AV_LOG_ERROR,
1264  "warning: first frame is no keyframe\n");
1265  else if (s->picture_structure != PICT_FRAME)
1266  av_log(avctx, AV_LOG_INFO,
1267  "allocate dummy last picture for field based first keyframe\n");
1268 
1269  /* Allocate a dummy frame */
1270  i = ff_find_unused_picture(s, 0);
1271  s->last_picture_ptr = &s->picture[i];
1272  if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1273  return -1;
1275  INT_MAX, 0);
1277  INT_MAX, 1);
1278  }
1279  if ((s->next_picture_ptr == NULL ||
1280  s->next_picture_ptr->f.data[0] == NULL) &&
1281  s->pict_type == AV_PICTURE_TYPE_B) {
1282  /* Allocate a dummy frame */
1283  i = ff_find_unused_picture(s, 0);
1284  s->next_picture_ptr = &s->picture[i];
1285  if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1286  return -1;
1288  INT_MAX, 0);
1290  INT_MAX, 1);
1291  }
1292  }
1293 
1294  if (s->last_picture_ptr)
1296  if (s->next_picture_ptr)
1298 
1299  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
1300  (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
1301  if (s->next_picture_ptr)
1302  s->next_picture_ptr->owner2 = s;
1303  if (s->last_picture_ptr)
1304  s->last_picture_ptr->owner2 = s;
1305  }
1306 
1307  assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1308  s->last_picture_ptr->f.data[0]));
1309 
1310  if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1311  int i;
1312  for (i = 0; i < 4; i++) {
1314  s->current_picture.f.data[i] +=
1315  s->current_picture.f.linesize[i];
1316  }
1317  s->current_picture.f.linesize[i] *= 2;
1318  s->last_picture.f.linesize[i] *= 2;
1319  s->next_picture.f.linesize[i] *= 2;
1320  }
1321  }
1322 
1323  s->err_recognition = avctx->err_recognition;
1324 
1325  /* set dequantizer, we can't do it during init as
1326  * it might change for mpeg4 and we can't do it in the header
1327  * decode as init is not called for mpeg4 there yet */
1328  if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1331  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1334  } else {
1337  }
1338 
1339  if (s->dct_error_sum) {
1340  assert(s->avctx->noise_reduction && s->encoding);
1342  }
1343 
1344  if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1345  return ff_xvmc_field_start(s, avctx);
1346 
1347  return 0;
1348 }
1349 
1350 /* generic function for encode/decode called after a
1351  * frame has been coded/decoded. */
1353 {
1354  int i;
1355  /* redraw edges for the frame if decoding didn't complete */
1356  // just to make sure that all data is rendered.
1357  if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1358  ff_xvmc_field_end(s);
1359  } else if ((s->error_count || s->encoding) &&
1360  !s->avctx->hwaccel &&
1362  s->unrestricted_mv &&
1364  !s->intra_only &&
1365  !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1369  s->h_edge_pos, s->v_edge_pos,
1371  EDGE_TOP | EDGE_BOTTOM);
1373  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1374  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1375  EDGE_TOP | EDGE_BOTTOM);
1377  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1378  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1379  EDGE_TOP | EDGE_BOTTOM);
1380  }
1381 
1382  emms_c();
1383 
1384  s->last_pict_type = s->pict_type;
1386  if (s->pict_type!= AV_PICTURE_TYPE_B) {
1388  }
1389 #if 0
1390  /* copy back current_picture variables */
1391  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1392  if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1393  s->picture[i] = s->current_picture;
1394  break;
1395  }
1396  }
1397  assert(i < MAX_PICTURE_COUNT);
1398 #endif
1399 
1400  if (s->encoding) {
1401  /* release non-reference frames */
1402  for (i = 0; i < s->picture_count; i++) {
1403  if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1404  /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1405  free_frame_buffer(s, &s->picture[i]);
1406  }
1407  }
1408  }
1409  // clear copies, to avoid confusion
1410 #if 0
1411  memset(&s->last_picture, 0, sizeof(Picture));
1412  memset(&s->next_picture, 0, sizeof(Picture));
1413  memset(&s->current_picture, 0, sizeof(Picture));
1414 #endif
1416 
1417  if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1419  }
1420 }
1421 
1429 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1430  int w, int h, int stride, int color)
1431 {
1432  int x, y, fr, f;
1433 
1434  sx = av_clip(sx, 0, w - 1);
1435  sy = av_clip(sy, 0, h - 1);
1436  ex = av_clip(ex, 0, w - 1);
1437  ey = av_clip(ey, 0, h - 1);
1438 
1439  buf[sy * stride + sx] += color;
1440 
1441  if (FFABS(ex - sx) > FFABS(ey - sy)) {
1442  if (sx > ex) {
1443  FFSWAP(int, sx, ex);
1444  FFSWAP(int, sy, ey);
1445  }
1446  buf += sx + sy * stride;
1447  ex -= sx;
1448  f = ((ey - sy) << 16) / ex;
1449  for (x = 0; x = ex; x++) {
1450  y = (x * f) >> 16;
1451  fr = (x * f) & 0xFFFF;
1452  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1453  buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1454  }
1455  } else {
1456  if (sy > ey) {
1457  FFSWAP(int, sx, ex);
1458  FFSWAP(int, sy, ey);
1459  }
1460  buf += sx + sy * stride;
1461  ey -= sy;
1462  if (ey)
1463  f = ((ex - sx) << 16) / ey;
1464  else
1465  f = 0;
1466  for (y = 0; y = ey; y++) {
1467  x = (y * f) >> 16;
1468  fr = (y * f) & 0xFFFF;
1469  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1470  buf[y * stride + x + 1] += (color * fr ) >> 16;
1471  }
1472  }
1473 }
1474 
1482 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1483  int ey, int w, int h, int stride, int color)
1484 {
1485  int dx,dy;
1486 
1487  sx = av_clip(sx, -100, w + 100);
1488  sy = av_clip(sy, -100, h + 100);
1489  ex = av_clip(ex, -100, w + 100);
1490  ey = av_clip(ey, -100, h + 100);
1491 
1492  dx = ex - sx;
1493  dy = ey - sy;
1494 
1495  if (dx * dx + dy * dy > 3 * 3) {
1496  int rx = dx + dy;
1497  int ry = -dx + dy;
1498  int length = ff_sqrt((rx * rx + ry * ry) << 8);
1499 
1500  // FIXME subpixel accuracy
1501  rx = ROUNDED_DIV(rx * 3 << 4, length);
1502  ry = ROUNDED_DIV(ry * 3 << 4, length);
1503 
1504  draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1505  draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1506  }
1507  draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1508 }
1509 
1514 {
1515  if (s->avctx->hwaccel || !pict || !pict->mb_type)
1516  return;
1517 
1519  int x,y;
1520 
1521  av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1522  switch (pict->pict_type) {
1523  case AV_PICTURE_TYPE_I:
1524  av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1525  break;
1526  case AV_PICTURE_TYPE_P:
1527  av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1528  break;
1529  case AV_PICTURE_TYPE_B:
1530  av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1531  break;
1532  case AV_PICTURE_TYPE_S:
1533  av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1534  break;
1535  case AV_PICTURE_TYPE_SI:
1536  av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1537  break;
1538  case AV_PICTURE_TYPE_SP:
1539  av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1540  break;
1541  }
1542  for (y = 0; y < s->mb_height; y++) {
1543  for (x = 0; x < s->mb_width; x++) {
1544  if (s->avctx->debug & FF_DEBUG_SKIP) {
1545  int count = s->mbskip_table[x + y * s->mb_stride];
1546  if (count > 9)
1547  count = 9;
1548  av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1549  }
1550  if (s->avctx->debug & FF_DEBUG_QP) {
1551  av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1552  pict->qscale_table[x + y * s->mb_stride]);
1553  }
1554  if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1555  int mb_type = pict->mb_type[x + y * s->mb_stride];
1556  // Type & MV direction
1557  if (IS_PCM(mb_type))
1558  av_log(s->avctx, AV_LOG_DEBUG, "P");
1559  else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1560  av_log(s->avctx, AV_LOG_DEBUG, "A");
1561  else if (IS_INTRA4x4(mb_type))
1562  av_log(s->avctx, AV_LOG_DEBUG, "i");
1563  else if (IS_INTRA16x16(mb_type))
1564  av_log(s->avctx, AV_LOG_DEBUG, "I");
1565  else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1566  av_log(s->avctx, AV_LOG_DEBUG, "d");
1567  else if (IS_DIRECT(mb_type))
1568  av_log(s->avctx, AV_LOG_DEBUG, "D");
1569  else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1570  av_log(s->avctx, AV_LOG_DEBUG, "g");
1571  else if (IS_GMC(mb_type))
1572  av_log(s->avctx, AV_LOG_DEBUG, "G");
1573  else if (IS_SKIP(mb_type))
1574  av_log(s->avctx, AV_LOG_DEBUG, "S");
1575  else if (!USES_LIST(mb_type, 1))
1576  av_log(s->avctx, AV_LOG_DEBUG, ">");
1577  else if (!USES_LIST(mb_type, 0))
1578  av_log(s->avctx, AV_LOG_DEBUG, "<");
1579  else {
1580  assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1581  av_log(s->avctx, AV_LOG_DEBUG, "X");
1582  }
1583 
1584  // segmentation
1585  if (IS_8X8(mb_type))
1586  av_log(s->avctx, AV_LOG_DEBUG, "+");
1587  else if (IS_16X8(mb_type))
1588  av_log(s->avctx, AV_LOG_DEBUG, "-");
1589  else if (IS_8X16(mb_type))
1590  av_log(s->avctx, AV_LOG_DEBUG, "|");
1591  else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1592  av_log(s->avctx, AV_LOG_DEBUG, " ");
1593  else
1594  av_log(s->avctx, AV_LOG_DEBUG, "?");
1595 
1596 
1597  if (IS_INTERLACED(mb_type))
1598  av_log(s->avctx, AV_LOG_DEBUG, "=");
1599  else
1600  av_log(s->avctx, AV_LOG_DEBUG, " ");
1601  }
1602  // av_log(s->avctx, AV_LOG_DEBUG, " ");
1603  }
1604  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1605  }
1606  }
1607 
1608  if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1609  (s->avctx->debug_mv)) {
1610  const int shift = 1 + s->quarter_sample;
1611  int mb_y;
1612  uint8_t *ptr;
1613  int i;
1614  int h_chroma_shift, v_chroma_shift, block_height;
1615  const int width = s->avctx->width;
1616  const int height = s->avctx->height;
1617  const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1618  const int mv_stride = (s->mb_width << mv_sample_log2) +
1619  (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1620  s->low_delay = 0; // needed to see the vectors without trashing the buffers
1621 
1623  &h_chroma_shift, &v_chroma_shift);
1624  for (i = 0; i < 3; i++) {
1625  memcpy(s->visualization_buffer[i], pict->data[i],
1626  (i == 0) ? pict->linesize[i] * height:
1627  pict->linesize[i] * height >> v_chroma_shift);
1628  pict->data[i] = s->visualization_buffer[i];
1629  }
1630  pict->type = FF_BUFFER_TYPE_COPY;
1631  ptr = pict->data[0];
1632  block_height = 16 >> v_chroma_shift;
1633 
1634  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1635  int mb_x;
1636  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1637  const int mb_index = mb_x + mb_y * s->mb_stride;
1638  if ((s->avctx->debug_mv) && pict->motion_val) {
1639  int type;
1640  for (type = 0; type < 3; type++) {
1641  int direction = 0;
1642  switch (type) {
1643  case 0:
1644  if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1645  (pict->pict_type!= AV_PICTURE_TYPE_P))
1646  continue;
1647  direction = 0;
1648  break;
1649  case 1:
1650  if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1651  (pict->pict_type!= AV_PICTURE_TYPE_B))
1652  continue;
1653  direction = 0;
1654  break;
1655  case 2:
1656  if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1657  (pict->pict_type!= AV_PICTURE_TYPE_B))
1658  continue;
1659  direction = 1;
1660  break;
1661  }
1662  if (!USES_LIST(pict->mb_type[mb_index], direction))
1663  continue;
1664 
1665  if (IS_8X8(pict->mb_type[mb_index])) {
1666  int i;
1667  for (i = 0; i < 4; i++) {
1668  int sx = mb_x * 16 + 4 + 8 * (i & 1);
1669  int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1670  int xy = (mb_x * 2 + (i & 1) +
1671  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1672  int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1673  int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1674  draw_arrow(ptr, sx, sy, mx, my, width,
1675  height, s->linesize, 100);
1676  }
1677  } else if (IS_16X8(pict->mb_type[mb_index])) {
1678  int i;
1679  for (i = 0; i < 2; i++) {
1680  int sx = mb_x * 16 + 8;
1681  int sy = mb_y * 16 + 4 + 8 * i;
1682  int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1683  int mx = (pict->motion_val[direction][xy][0] >> shift);
1684  int my = (pict->motion_val[direction][xy][1] >> shift);
1685 
1686  if (IS_INTERLACED(pict->mb_type[mb_index]))
1687  my *= 2;
1688 
1689  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1690  height, s->linesize, 100);
1691  }
1692  } else if (IS_8X16(pict->mb_type[mb_index])) {
1693  int i;
1694  for (i = 0; i < 2; i++) {
1695  int sx = mb_x * 16 + 4 + 8 * i;
1696  int sy = mb_y * 16 + 8;
1697  int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1698  int mx = pict->motion_val[direction][xy][0] >> shift;
1699  int my = pict->motion_val[direction][xy][1] >> shift;
1700 
1701  if (IS_INTERLACED(pict->mb_type[mb_index]))
1702  my *= 2;
1703 
1704  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1705  height, s->linesize, 100);
1706  }
1707  } else {
1708  int sx = mb_x * 16 + 8;
1709  int sy = mb_y * 16 + 8;
1710  int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1711  int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1712  int my = pict->motion_val[direction][xy][1] >> shift + sy;
1713  draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1714  }
1715  }
1716  }
1717  if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1718  uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1719  0x0101010101010101ULL;
1720  int y;
1721  for (y = 0; y < block_height; y++) {
1722  *(uint64_t *)(pict->data[1] + 8 * mb_x +
1723  (block_height * mb_y + y) *
1724  pict->linesize[1]) = c;
1725  *(uint64_t *)(pict->data[2] + 8 * mb_x +
1726  (block_height * mb_y + y) *
1727  pict->linesize[2]) = c;
1728  }
1729  }
1730  if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1731  pict->motion_val) {
1732  int mb_type = pict->mb_type[mb_index];
1733  uint64_t u,v;
1734  int y;
1735 #define COLOR(theta, r) \
1736  u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1737  v = (int)(128 + r * sin(theta * 3.141592 / 180));
1738 
1739 
1740  u = v = 128;
1741  if (IS_PCM(mb_type)) {
1742  COLOR(120, 48)
1743  } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1744  IS_INTRA16x16(mb_type)) {
1745  COLOR(30, 48)
1746  } else if (IS_INTRA4x4(mb_type)) {
1747  COLOR(90, 48)
1748  } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1749  // COLOR(120, 48)
1750  } else if (IS_DIRECT(mb_type)) {
1751  COLOR(150, 48)
1752  } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1753  COLOR(170, 48)
1754  } else if (IS_GMC(mb_type)) {
1755  COLOR(190, 48)
1756  } else if (IS_SKIP(mb_type)) {
1757  // COLOR(180, 48)
1758  } else if (!USES_LIST(mb_type, 1)) {
1759  COLOR(240, 48)
1760  } else if (!USES_LIST(mb_type, 0)) {
1761  COLOR(0, 48)
1762  } else {
1763  assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1764  COLOR(300,48)
1765  }
1766 
1767  u *= 0x0101010101010101ULL;
1768  v *= 0x0101010101010101ULL;
1769  for (y = 0; y < block_height; y++) {
1770  *(uint64_t *)(pict->data[1] + 8 * mb_x +
1771  (block_height * mb_y + y) * pict->linesize[1]) = u;
1772  *(uint64_t *)(pict->data[2] + 8 * mb_x +
1773  (block_height * mb_y + y) * pict->linesize[2]) = v;
1774  }
1775 
1776  // segmentation
1777  if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1778  *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1779  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1780  *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1781  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1782  }
1783  if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1784  for (y = 0; y < 16; y++)
1785  pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1786  pict->linesize[0]] ^= 0x80;
1787  }
1788  if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1789  int dm = 1 << (mv_sample_log2 - 2);
1790  for (i = 0; i < 4; i++) {
1791  int sx = mb_x * 16 + 8 * (i & 1);
1792  int sy = mb_y * 16 + 8 * (i >> 1);
1793  int xy = (mb_x * 2 + (i & 1) +
1794  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1795  // FIXME bidir
1796  int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
1797  if (mv[0] != mv[dm] ||
1798  mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1799  for (y = 0; y < 8; y++)
1800  pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
1801  if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1802  *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
1803  pict->linesize[0]) ^= 0x8080808080808080ULL;
1804  }
1805  }
1806 
1807  if (IS_INTERLACED(mb_type) &&
1808  s->codec_id == CODEC_ID_H264) {
1809  // hmm
1810  }
1811  }
1812  s->mbskip_table[mb_index] = 0;
1813  }
1814  }
1815  }
1816 }
1817 
1818 static inline int hpel_motion_lowres(MpegEncContext *s,
1819  uint8_t *dest, uint8_t *src,
1820  int field_based, int field_select,
1821  int src_x, int src_y,
1822  int width, int height, int stride,
1823  int h_edge_pos, int v_edge_pos,
1824  int w, int h, h264_chroma_mc_func *pix_op,
1825  int motion_x, int motion_y)
1826 {
1827  const int lowres = s->avctx->lowres;
1828  const int op_index = FFMIN(lowres, 2);
1829  const int s_mask = (2 << lowres) - 1;
1830  int emu = 0;
1831  int sx, sy;
1832 
1833  if (s->quarter_sample) {
1834  motion_x /= 2;
1835  motion_y /= 2;
1836  }
1837 
1838  sx = motion_x & s_mask;
1839  sy = motion_y & s_mask;
1840  src_x += motion_x >> lowres + 1;
1841  src_y += motion_y >> lowres + 1;
1842 
1843  src += src_y * stride + src_x;
1844 
1845  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
1846  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1847  s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
1848  (h + 1) << field_based, src_x,
1849  src_y << field_based,
1850  h_edge_pos,
1851  v_edge_pos);
1852  src = s->edge_emu_buffer;
1853  emu = 1;
1854  }
1855 
1856  sx = (sx << 2) >> lowres;
1857  sy = (sy << 2) >> lowres;
1858  if (field_select)
1859  src += s->linesize;
1860  pix_op[op_index](dest, src, stride, h, sx, sy);
1861  return emu;
1862 }
1863 
1864 /* apply one mpeg motion vector to the three components */
1866  uint8_t *dest_y,
1867  uint8_t *dest_cb,
1868  uint8_t *dest_cr,
1869  int field_based,
1870  int bottom_field,
1871  int field_select,
1872  uint8_t **ref_picture,
1873  h264_chroma_mc_func *pix_op,
1874  int motion_x, int motion_y,
1875  int h, int mb_y)
1876 {
1877  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1878  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
1879  uvsx, uvsy;
1880  const int lowres = s->avctx->lowres;
1881  const int op_index = FFMIN(lowres, 2);
1882  const int block_s = 8>>lowres;
1883  const int s_mask = (2 << lowres) - 1;
1884  const int h_edge_pos = s->h_edge_pos >> lowres;
1885  const int v_edge_pos = s->v_edge_pos >> lowres;
1886  linesize = s->current_picture.f.linesize[0] << field_based;
1887  uvlinesize = s->current_picture.f.linesize[1] << field_based;
1888 
1889  // FIXME obviously not perfect but qpel will not work in lowres anyway
1890  if (s->quarter_sample) {
1891  motion_x /= 2;
1892  motion_y /= 2;
1893  }
1894 
1895  if (field_based) {
1896  motion_y += (bottom_field - field_select) * (1 << lowres - 1);
1897  }
1898 
1899  sx = motion_x & s_mask;
1900  sy = motion_y & s_mask;
1901  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
1902  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1903 
1904  if (s->out_format == FMT_H263) {
1905  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1906  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1907  uvsrc_x = src_x >> 1;
1908  uvsrc_y = src_y >> 1;
1909  } else if (s->out_format == FMT_H261) {
1910  // even chroma mv's are full pel in H261
1911  mx = motion_x / 4;
1912  my = motion_y / 4;
1913  uvsx = (2 * mx) & s_mask;
1914  uvsy = (2 * my) & s_mask;
1915  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
1916  uvsrc_y = mb_y * block_s + (my >> lowres);
1917  } else {
1918  mx = motion_x / 2;
1919  my = motion_y / 2;
1920  uvsx = mx & s_mask;
1921  uvsy = my & s_mask;
1922  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
1923  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1924  }
1925 
1926  ptr_y = ref_picture[0] + src_y * linesize + src_x;
1927  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1928  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1929 
1930  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
1931  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1932  s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
1933  s->linesize, 17, 17 + field_based,
1934  src_x, src_y << field_based, h_edge_pos,
1935  v_edge_pos);
1936  ptr_y = s->edge_emu_buffer;
1937  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1938  uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
1939  s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
1940  9 + field_based,
1941  uvsrc_x, uvsrc_y << field_based,
1942  h_edge_pos >> 1, v_edge_pos >> 1);
1943  s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
1944  9 + field_based,
1945  uvsrc_x, uvsrc_y << field_based,
1946  h_edge_pos >> 1, v_edge_pos >> 1);
1947  ptr_cb = uvbuf;
1948  ptr_cr = uvbuf + 16;
1949  }
1950  }
1951 
1952  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1953  if (bottom_field) {
1954  dest_y += s->linesize;
1955  dest_cb += s->uvlinesize;
1956  dest_cr += s->uvlinesize;
1957  }
1958 
1959  if (field_select) {
1960  ptr_y += s->linesize;
1961  ptr_cb += s->uvlinesize;
1962  ptr_cr += s->uvlinesize;
1963  }
1964 
1965  sx = (sx << 2) >> lowres;
1966  sy = (sy << 2) >> lowres;
1967  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
1968 
1969  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1970  uvsx = (uvsx << 2) >> lowres;
1971  uvsy = (uvsy << 2) >> lowres;
1972  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift,
1973  uvsx, uvsy);
1974  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift,
1975  uvsx, uvsy);
1976  }
1977  // FIXME h261 lowres loop filter
1978 }
1979 
1981  uint8_t *dest_cb, uint8_t *dest_cr,
1982  uint8_t **ref_picture,
1983  h264_chroma_mc_func * pix_op,
1984  int mx, int my)
1985 {
1986  const int lowres = s->avctx->lowres;
1987  const int op_index = FFMIN(lowres, 2);
1988  const int block_s = 8 >> lowres;
1989  const int s_mask = (2 << lowres) - 1;
1990  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
1991  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
1992  int emu = 0, src_x, src_y, offset, sx, sy;
1993  uint8_t *ptr;
1994 
1995  if (s->quarter_sample) {
1996  mx /= 2;
1997  my /= 2;
1998  }
1999 
2000  /* In case of 8X8, we construct a single chroma motion vector
2001  with a special rounding */
2002  mx = ff_h263_round_chroma(mx);
2003  my = ff_h263_round_chroma(my);
2004 
2005  sx = mx & s_mask;
2006  sy = my & s_mask;
2007  src_x = s->mb_x * block_s + (mx >> lowres + 1);
2008  src_y = s->mb_y * block_s + (my >> lowres + 1);
2009 
2010  offset = src_y * s->uvlinesize + src_x;
2011  ptr = ref_picture[1] + offset;
2012  if (s->flags & CODEC_FLAG_EMU_EDGE) {
2013  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2014  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2016  9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2017  ptr = s->edge_emu_buffer;
2018  emu = 1;
2019  }
2020  }
2021  sx = (sx << 2) >> lowres;
2022  sy = (sy << 2) >> lowres;
2023  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2024 
2025  ptr = ref_picture[2] + offset;
2026  if (emu) {
2027  s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2028  src_x, src_y, h_edge_pos, v_edge_pos);
2029  ptr = s->edge_emu_buffer;
2030  }
2031  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2032 }
2033 
2045 static inline void MPV_motion_lowres(MpegEncContext *s,
2046  uint8_t *dest_y, uint8_t *dest_cb,
2047  uint8_t *dest_cr,
2048  int dir, uint8_t **ref_picture,
2049  h264_chroma_mc_func *pix_op)
2050 {
2051  int mx, my;
2052  int mb_x, mb_y, i;
2053  const int lowres = s->avctx->lowres;
2054  const int block_s = 8 >>lowres;
2055 
2056  mb_x = s->mb_x;
2057  mb_y = s->mb_y;
2058 
2059  switch (s->mv_type) {
2060  case MV_TYPE_16X16:
2061  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2062  0, 0, 0,
2063  ref_picture, pix_op,
2064  s->mv[dir][0][0], s->mv[dir][0][1],
2065  2 * block_s, mb_y);
2066  break;
2067  case MV_TYPE_8X8:
2068  mx = 0;
2069  my = 0;
2070  for (i = 0; i < 4; i++) {
2071  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2072  s->linesize) * block_s,
2073  ref_picture[0], 0, 0,
2074  (2 * mb_x + (i & 1)) * block_s,
2075  (2 * mb_y + (i >> 1)) * block_s,
2076  s->width, s->height, s->linesize,
2077  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2078  block_s, block_s, pix_op,
2079  s->mv[dir][i][0], s->mv[dir][i][1]);
2080 
2081  mx += s->mv[dir][i][0];
2082  my += s->mv[dir][i][1];
2083  }
2084 
2085  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2086  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2087  pix_op, mx, my);
2088  break;
2089  case MV_TYPE_FIELD:
2090  if (s->picture_structure == PICT_FRAME) {
2091  /* top field */
2092  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2093  1, 0, s->field_select[dir][0],
2094  ref_picture, pix_op,
2095  s->mv[dir][0][0], s->mv[dir][0][1],
2096  block_s, mb_y);
2097  /* bottom field */
2098  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2099  1, 1, s->field_select[dir][1],
2100  ref_picture, pix_op,
2101  s->mv[dir][1][0], s->mv[dir][1][1],
2102  block_s, mb_y);
2103  } else {
2104  if (s->picture_structure != s->field_select[dir][0] + 1 &&
2105  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2106  ref_picture = s->current_picture_ptr->f.data;
2107 
2108  }
2109  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2110  0, 0, s->field_select[dir][0],
2111  ref_picture, pix_op,
2112  s->mv[dir][0][0],
2113  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2114  }
2115  break;
2116  case MV_TYPE_16X8:
2117  for (i = 0; i < 2; i++) {
2118  uint8_t **ref2picture;
2119 
2120  if (s->picture_structure == s->field_select[dir][i] + 1 ||
2121  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2122  ref2picture = ref_picture;
2123  } else {
2124  ref2picture = s->current_picture_ptr->f.data;
2125  }
2126 
2127  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2128  0, 0, s->field_select[dir][i],
2129  ref2picture, pix_op,
2130  s->mv[dir][i][0], s->mv[dir][i][1] +
2131  2 * block_s * i, block_s, mb_y >> 1);
2132 
2133  dest_y += 2 * block_s * s->linesize;
2134  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2135  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2136  }
2137  break;
2138  case MV_TYPE_DMV:
2139  if (s->picture_structure == PICT_FRAME) {
2140  for (i = 0; i < 2; i++) {
2141  int j;
2142  for (j = 0; j < 2; j++) {
2143  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2144  1, j, j ^ i,
2145  ref_picture, pix_op,
2146  s->mv[dir][2 * i + j][0],
2147  s->mv[dir][2 * i + j][1],
2148  block_s, mb_y);
2149  }
2150  pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2151  }
2152  } else {
2153  for (i = 0; i < 2; i++) {
2154  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2155  0, 0, s->picture_structure != i + 1,
2156  ref_picture, pix_op,
2157  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2158  2 * block_s, mb_y >> 1);
2159 
2160  // after put we make avg of the same block
2161  pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2162 
2163  // opposite parity is always in the same
2164  // frame if this is second field
2165  if (!s->first_field) {
2166  ref_picture = s->current_picture_ptr->f.data;
2167  }
2168  }
2169  }
2170  break;
2171  default:
2172  assert(0);
2173  }
2174 }
2175 
2180 {
2181  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2182  int my, off, i, mvs;
2183 
2184  if (s->picture_structure != PICT_FRAME) goto unhandled;
2185 
2186  switch (s->mv_type) {
2187  case MV_TYPE_16X16:
2188  mvs = 1;
2189  break;
2190  case MV_TYPE_16X8:
2191  mvs = 2;
2192  break;
2193  case MV_TYPE_8X8:
2194  mvs = 4;
2195  break;
2196  default:
2197  goto unhandled;
2198  }
2199 
2200  for (i = 0; i < mvs; i++) {
2201  my = s->mv[dir][i][1]<<qpel_shift;
2202  my_max = FFMAX(my_max, my);
2203  my_min = FFMIN(my_min, my);
2204  }
2205 
2206  off = (FFMAX(-my_min, my_max) + 63) >> 6;
2207 
2208  return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2209 unhandled:
2210  return s->mb_height-1;
2211 }
2212 
2213 /* put block[] to dest[] */
2214 static inline void put_dct(MpegEncContext *s,
2215  DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2216 {
2217  s->dct_unquantize_intra(s, block, i, qscale);
2218  s->dsp.idct_put (dest, line_size, block);
2219 }
2220 
2221 /* add block[] to dest[] */
2222 static inline void add_dct(MpegEncContext *s,
2223  DCTELEM *block, int i, uint8_t *dest, int line_size)
2224 {
2225  if (s->block_last_index[i] >= 0) {
2226  s->dsp.idct_add (dest, line_size, block);
2227  }
2228 }
2229 
2230 static inline void add_dequant_dct(MpegEncContext *s,
2231  DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2232 {
2233  if (s->block_last_index[i] >= 0) {
2234  s->dct_unquantize_inter(s, block, i, qscale);
2235 
2236  s->dsp.idct_add (dest, line_size, block);
2237  }
2238 }
2239 
2244 {
2245  int wrap = s->b8_stride;
2246  int xy = s->block_index[0];
2247 
2248  s->dc_val[0][xy ] =
2249  s->dc_val[0][xy + 1 ] =
2250  s->dc_val[0][xy + wrap] =
2251  s->dc_val[0][xy + 1 + wrap] = 1024;
2252  /* ac pred */
2253  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2254  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2255  if (s->msmpeg4_version>=3) {
2256  s->coded_block[xy ] =
2257  s->coded_block[xy + 1 ] =
2258  s->coded_block[xy + wrap] =
2259  s->coded_block[xy + 1 + wrap] = 0;
2260  }
2261  /* chroma */
2262  wrap = s->mb_stride;
2263  xy = s->mb_x + s->mb_y * wrap;
2264  s->dc_val[1][xy] =
2265  s->dc_val[2][xy] = 1024;
2266  /* ac pred */
2267  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2268  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2269 
2270  s->mbintra_table[xy]= 0;
2271 }
2272 
2273 /* generic function called after a macroblock has been parsed by the
2274  decoder or after it has been encoded by the encoder.
2275 
2276  Important variables used:
2277  s->mb_intra : true if intra macroblock
2278  s->mv_dir : motion vector direction
2279  s->mv_type : motion vector type
2280  s->mv : motion vector
2281  s->interlaced_dct : true if interlaced dct used (mpeg2)
2282  */
2283 static av_always_inline
2285  int lowres_flag, int is_mpeg12)
2286 {
2287  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2288  if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2289  ff_xvmc_decode_mb(s);//xvmc uses pblocks
2290  return;
2291  }
2292 
2293  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2294  /* save DCT coefficients */
2295  int i,j;
2296  DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2297  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2298  for(i=0; i<6; i++){
2299  for(j=0; j<64; j++){
2300  *dct++ = block[i][s->dsp.idct_permutation[j]];
2301  av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2302  }
2303  av_log(s->avctx, AV_LOG_DEBUG, "\n");
2304  }
2305  }
2306 
2307  s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2308 
2309  /* update DC predictors for P macroblocks */
2310  if (!s->mb_intra) {
2311  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2312  if(s->mbintra_table[mb_xy])
2314  } else {
2315  s->last_dc[0] =
2316  s->last_dc[1] =
2317  s->last_dc[2] = 128 << s->intra_dc_precision;
2318  }
2319  }
2320  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2321  s->mbintra_table[mb_xy]=1;
2322 
2323  if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2324  uint8_t *dest_y, *dest_cb, *dest_cr;
2325  int dct_linesize, dct_offset;
2326  op_pixels_func (*op_pix)[4];
2327  qpel_mc_func (*op_qpix)[16];
2328  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2329  const int uvlinesize = s->current_picture.f.linesize[1];
2330  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2331  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2332 
2333  /* avoid copy if macroblock skipped in last frame too */
2334  /* skip only during decoding as we might trash the buffers during encoding a bit */
2335  if(!s->encoding){
2336  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2337 
2338  if (s->mb_skipped) {
2339  s->mb_skipped= 0;
2340  assert(s->pict_type!=AV_PICTURE_TYPE_I);
2341  *mbskip_ptr = 1;
2342  } else if(!s->current_picture.f.reference) {
2343  *mbskip_ptr = 1;
2344  } else{
2345  *mbskip_ptr = 0; /* not skipped */
2346  }
2347  }
2348 
2349  dct_linesize = linesize << s->interlaced_dct;
2350  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2351 
2352  if(readable){
2353  dest_y= s->dest[0];
2354  dest_cb= s->dest[1];
2355  dest_cr= s->dest[2];
2356  }else{
2357  dest_y = s->b_scratchpad;
2358  dest_cb= s->b_scratchpad+16*linesize;
2359  dest_cr= s->b_scratchpad+32*linesize;
2360  }
2361 
2362  if (!s->mb_intra) {
2363  /* motion handling */
2364  /* decoding or more than one mb_type (MC was already done otherwise) */
2365  if(!s->encoding){
2366 
2367  if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2368  if (s->mv_dir & MV_DIR_FORWARD) {
2370  }
2371  if (s->mv_dir & MV_DIR_BACKWARD) {
2373  }
2374  }
2375 
2376  if(lowres_flag){
2378 
2379  if (s->mv_dir & MV_DIR_FORWARD) {
2380  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2381  op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2382  }
2383  if (s->mv_dir & MV_DIR_BACKWARD) {
2384  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2385  }
2386  }else{
2387  op_qpix= s->me.qpel_put;
2388  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2389  op_pix = s->dsp.put_pixels_tab;
2390  }else{
2391  op_pix = s->dsp.put_no_rnd_pixels_tab;
2392  }
2393  if (s->mv_dir & MV_DIR_FORWARD) {
2394  MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2395  op_pix = s->dsp.avg_pixels_tab;
2396  op_qpix= s->me.qpel_avg;
2397  }
2398  if (s->mv_dir & MV_DIR_BACKWARD) {
2399  MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2400  }
2401  }
2402  }
2403 
2404  /* skip dequant / idct if we are really late ;) */
2405  if(s->avctx->skip_idct){
2408  || s->avctx->skip_idct >= AVDISCARD_ALL)
2409  goto skip_idct;
2410  }
2411 
2412  /* add dct residue */
2414  || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2415  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2416  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2417  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2418  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2419 
2420  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2421  if (s->chroma_y_shift){
2422  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2423  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2424  }else{
2425  dct_linesize >>= 1;
2426  dct_offset >>=1;
2427  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2428  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2429  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2430  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2431  }
2432  }
2433  } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2434  add_dct(s, block[0], 0, dest_y , dct_linesize);
2435  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2436  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2437  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2438 
2439  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2440  if(s->chroma_y_shift){//Chroma420
2441  add_dct(s, block[4], 4, dest_cb, uvlinesize);
2442  add_dct(s, block[5], 5, dest_cr, uvlinesize);
2443  }else{
2444  //chroma422
2445  dct_linesize = uvlinesize << s->interlaced_dct;
2446  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2447 
2448  add_dct(s, block[4], 4, dest_cb, dct_linesize);
2449  add_dct(s, block[5], 5, dest_cr, dct_linesize);
2450  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2451  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2452  if(!s->chroma_x_shift){//Chroma444
2453  add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2454  add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2455  add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2456  add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2457  }
2458  }
2459  }//fi gray
2460  }
2461  else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2462  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2463  }
2464  } else {
2465  /* dct only in intra block */
2467  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2468  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2469  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2470  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2471 
2472  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2473  if(s->chroma_y_shift){
2474  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2475  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2476  }else{
2477  dct_offset >>=1;
2478  dct_linesize >>=1;
2479  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2480  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2481  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2482  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2483  }
2484  }
2485  }else{
2486  s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2487  s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2488  s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2489  s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2490 
2491  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2492  if(s->chroma_y_shift){
2493  s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2494  s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2495  }else{
2496 
2497  dct_linesize = uvlinesize << s->interlaced_dct;
2498  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2499 
2500  s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2501  s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2502  s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2503  s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2504  if(!s->chroma_x_shift){//Chroma444
2505  s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2506  s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2507  s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2508  s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2509  }
2510  }
2511  }//gray
2512  }
2513  }
2514 skip_idct:
2515  if(!readable){
2516  s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2517  s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2518  s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2519  }
2520  }
2521 }
2522 
2524 #if !CONFIG_SMALL
2525  if(s->out_format == FMT_MPEG1) {
2526  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2527  else MPV_decode_mb_internal(s, block, 0, 1);
2528  } else
2529 #endif
2530  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2531  else MPV_decode_mb_internal(s, block, 0, 0);
2532 }
2533 
2537 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2538  const int field_pic= s->picture_structure != PICT_FRAME;
2539  if(field_pic){
2540  h <<= 1;
2541  y <<= 1;
2542  }
2543 
2544  if (!s->avctx->hwaccel
2546  && s->unrestricted_mv
2548  && !s->intra_only
2549  && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2550  int sides = 0, edge_h;
2553  if (y==0) sides |= EDGE_TOP;
2554  if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2555 
2556  edge_h= FFMIN(h, s->v_edge_pos - y);
2557 
2558  s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2559  s->linesize, s->h_edge_pos, edge_h,
2560  EDGE_WIDTH, EDGE_WIDTH, sides);
2561  s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2562  s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2563  EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2564  s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2565  s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2566  EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2567  }
2568 
2569  h= FFMIN(h, s->avctx->height - y);
2570 
2571  if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2572 
2573  if (s->avctx->draw_horiz_band) {
2574  AVFrame *src;
2575  int offset[AV_NUM_DATA_POINTERS];
2576  int i;
2577 
2579  src= (AVFrame*)s->current_picture_ptr;
2580  else if(s->last_picture_ptr)
2581  src= (AVFrame*)s->last_picture_ptr;
2582  else
2583  return;
2584 
2586  for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2587  offset[i] = 0;
2588  }else{
2589  offset[0]= y * s->linesize;
2590  offset[1]=
2591  offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2592  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2593  offset[i] = 0;
2594  }
2595 
2596  emms_c();
2597 
2598  s->avctx->draw_horiz_band(s->avctx, src, offset,
2599  y, s->picture_structure, h);
2600  }
2601 }
2602 
2603 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2604  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2605  const int uvlinesize = s->current_picture.f.linesize[1];
2606  const int mb_size= 4 - s->avctx->lowres;
2607 
2608  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2609  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2610  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2611  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2612  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2613  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2614  //block_index is not used by mpeg2, so it is not affected by chroma_format
2615 
2616  s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2617  s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2618  s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2619 
2621  {
2622  if(s->picture_structure==PICT_FRAME){
2623  s->dest[0] += s->mb_y * linesize << mb_size;
2624  s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2625  s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2626  }else{
2627  s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2628  s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2629  s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2630  assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2631  }
2632  }
2633 }
2634 
2636  int i;
2637  MpegEncContext *s = avctx->priv_data;
2638 
2639  if(s==NULL || s->picture==NULL)
2640  return;
2641 
2642  for(i=0; i<s->picture_count; i++){
2643  if (s->picture[i].f.data[0] &&
2644  (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2645  s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2646  free_frame_buffer(s, &s->picture[i]);
2647  }
2649 
2650  s->mb_x= s->mb_y= 0;
2651 
2652  s->parse_context.state= -1;
2654  s->parse_context.overread= 0;
2656  s->parse_context.index= 0;
2657  s->parse_context.last_index= 0;
2658  s->bitstream_buffer_size=0;
2659  s->pp_time=0;
2660 }
2661 
2663  DCTELEM *block, int n, int qscale)
2664 {
2665  int i, level, nCoeffs;
2666  const uint16_t *quant_matrix;
2667 
2668  nCoeffs= s->block_last_index[n];
2669 
2670  if (n < 4)
2671  block[0] = block[0] * s->y_dc_scale;
2672  else
2673  block[0] = block[0] * s->c_dc_scale;
2674  /* XXX: only mpeg1 */
2675  quant_matrix = s->intra_matrix;
2676  for(i=1;i<=nCoeffs;i++) {
2677  int j= s->intra_scantable.permutated[i];
2678  level = block[j];
2679  if (level) {
2680  if (level < 0) {
2681  level = -level;
2682  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2683  level = (level - 1) | 1;
2684  level = -level;
2685  } else {
2686  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2687  level = (level - 1) | 1;
2688  }
2689  block[j] = level;
2690  }
2691  }
2692 }
2693 
2695  DCTELEM *block, int n, int qscale)
2696 {
2697  int i, level, nCoeffs;
2698  const uint16_t *quant_matrix;
2699 
2700  nCoeffs= s->block_last_index[n];
2701 
2702  quant_matrix = s->inter_matrix;
2703  for(i=0; i<=nCoeffs; i++) {
2704  int j= s->intra_scantable.permutated[i];
2705  level = block[j];
2706  if (level) {
2707  if (level < 0) {
2708  level = -level;
2709  level = (((level << 1) + 1) * qscale *
2710  ((int) (quant_matrix[j]))) >> 4;
2711  level = (level - 1) | 1;
2712  level = -level;
2713  } else {
2714  level = (((level << 1) + 1) * qscale *
2715  ((int) (quant_matrix[j]))) >> 4;
2716  level = (level - 1) | 1;
2717  }
2718  block[j] = level;
2719  }
2720  }
2721 }
2722 
2724  DCTELEM *block, int n, int qscale)
2725 {
2726  int i, level, nCoeffs;
2727  const uint16_t *quant_matrix;
2728 
2729  if(s->alternate_scan) nCoeffs= 63;
2730  else nCoeffs= s->block_last_index[n];
2731 
2732  if (n < 4)
2733  block[0] = block[0] * s->y_dc_scale;
2734  else
2735  block[0] = block[0] * s->c_dc_scale;
2736  quant_matrix = s->intra_matrix;
2737  for(i=1;i<=nCoeffs;i++) {
2738  int j= s->intra_scantable.permutated[i];
2739  level = block[j];
2740  if (level) {
2741  if (level < 0) {
2742  level = -level;
2743  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2744  level = -level;
2745  } else {
2746  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2747  }
2748  block[j] = level;
2749  }
2750  }
2751 }
2752 
2754  DCTELEM *block, int n, int qscale)
2755 {
2756  int i, level, nCoeffs;
2757  const uint16_t *quant_matrix;
2758  int sum=-1;
2759 
2760  if(s->alternate_scan) nCoeffs= 63;
2761  else nCoeffs= s->block_last_index[n];
2762 
2763  if (n < 4)
2764  block[0] = block[0] * s->y_dc_scale;
2765  else
2766  block[0] = block[0] * s->c_dc_scale;
2767  quant_matrix = s->intra_matrix;
2768  for(i=1;i<=nCoeffs;i++) {
2769  int j= s->intra_scantable.permutated[i];
2770  level = block[j];
2771  if (level) {
2772  if (level < 0) {
2773  level = -level;
2774  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2775  level = -level;
2776  } else {
2777  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2778  }
2779  block[j] = level;
2780  sum+=level;
2781  }
2782  }
2783  block[63]^=sum&1;
2784 }
2785 
2787  DCTELEM *block, int n, int qscale)
2788 {
2789  int i, level, nCoeffs;
2790  const uint16_t *quant_matrix;
2791  int sum=-1;
2792 
2793  if(s->alternate_scan) nCoeffs= 63;
2794  else nCoeffs= s->block_last_index[n];
2795 
2796  quant_matrix = s->inter_matrix;
2797  for(i=0; i<=nCoeffs; i++) {
2798  int j= s->intra_scantable.permutated[i];
2799  level = block[j];
2800  if (level) {
2801  if (level < 0) {
2802  level = -level;
2803  level = (((level << 1) + 1) * qscale *
2804  ((int) (quant_matrix[j]))) >> 4;
2805  level = -level;
2806  } else {
2807  level = (((level << 1) + 1) * qscale *
2808  ((int) (quant_matrix[j]))) >> 4;
2809  }
2810  block[j] = level;
2811  sum+=level;
2812  }
2813  }
2814  block[63]^=sum&1;
2815 }
2816 
2818  DCTELEM *block, int n, int qscale)
2819 {
2820  int i, level, qmul, qadd;
2821  int nCoeffs;
2822 
2823  assert(s->block_last_index[n]>=0);
2824 
2825  qmul = qscale << 1;
2826 
2827  if (!s->h263_aic) {
2828  if (n < 4)
2829  block[0] = block[0] * s->y_dc_scale;
2830  else
2831  block[0] = block[0] * s->c_dc_scale;
2832  qadd = (qscale - 1) | 1;
2833  }else{
2834  qadd = 0;
2835  }
2836  if(s->ac_pred)
2837  nCoeffs=63;
2838  else
2839  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2840 
2841  for(i=1; i<=nCoeffs; i++) {
2842  level = block[i];
2843  if (level) {
2844  if (level < 0) {
2845  level = level * qmul - qadd;
2846  } else {
2847  level = level * qmul + qadd;
2848  }
2849  block[i] = level;
2850  }
2851  }
2852 }
2853 
2855  DCTELEM *block, int n, int qscale)
2856 {
2857  int i, level, qmul, qadd;
2858  int nCoeffs;
2859 
2860  assert(s->block_last_index[n]>=0);
2861 
2862  qadd = (qscale - 1) | 1;
2863  qmul = qscale << 1;
2864 
2865  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2866 
2867  for(i=0; i<=nCoeffs; i++) {
2868  level = block[i];
2869  if (level) {
2870  if (level < 0) {
2871  level = level * qmul - qadd;
2872  } else {
2873  level = level * qmul + qadd;
2874  }
2875  block[i] = level;
2876  }
2877  }
2878 }
2879 
2883 void ff_set_qscale(MpegEncContext * s, int qscale)
2884 {
2885  if (qscale < 1)
2886  qscale = 1;
2887  else if (qscale > 31)
2888  qscale = 31;
2889 
2890  s->qscale = qscale;
2891  s->chroma_qscale= s->chroma_qscale_table[qscale];
2892 
2893  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2895 }
2896 
2898 {
2901 }