00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/imgutils.h"
00032 #include "avcodec.h"
00033 #include "dsputil.h"
00034 #include "internal.h"
00035 #include "mpegvideo.h"
00036 #include "mpegvideo_common.h"
00037 #include "mjpegenc.h"
00038 #include "msmpeg4.h"
00039 #include "faandct.h"
00040 #include "xvmc_internal.h"
00041 #include "thread.h"
00042 #include <limits.h>
00043
00044
00045
00046
00047 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00048 DCTELEM *block, int n, int qscale);
00049 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00050 DCTELEM *block, int n, int qscale);
00051 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00052 DCTELEM *block, int n, int qscale);
00053 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00054 DCTELEM *block, int n, int qscale);
00055 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00056 DCTELEM *block, int n, int qscale);
00057 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00058 DCTELEM *block, int n, int qscale);
00059 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00060 DCTELEM *block, int n, int qscale);
00061
00062
00063
00064
00065
00066
00067
00068
00069 static const uint8_t ff_default_chroma_qscale_table[32] = {
00070
00071 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
00072 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
00073 };
00074
00075 const uint8_t ff_mpeg1_dc_scale_table[128] = {
00076
00077 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00078 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00079 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00080 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00081 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00082 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00083 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00084 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00085 };
00086
00087 static const uint8_t mpeg2_dc_scale_table1[128] = {
00088
00089 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00090 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00091 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00092 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00093 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00094 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00095 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00096 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00097 };
00098
00099 static const uint8_t mpeg2_dc_scale_table2[128] = {
00100
00101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00108 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00109 };
00110
00111 static const uint8_t mpeg2_dc_scale_table3[128] = {
00112
00113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00120 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00121 };
00122
00123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
00124 ff_mpeg1_dc_scale_table,
00125 mpeg2_dc_scale_table1,
00126 mpeg2_dc_scale_table2,
00127 mpeg2_dc_scale_table3,
00128 };
00129
00130 const enum PixelFormat ff_pixfmt_list_420[] = {
00131 PIX_FMT_YUV420P,
00132 PIX_FMT_NONE
00133 };
00134
00135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00136 PIX_FMT_DXVA2_VLD,
00137 PIX_FMT_VAAPI_VLD,
00138 PIX_FMT_VDA_VLD,
00139 PIX_FMT_YUV420P,
00140 PIX_FMT_NONE
00141 };
00142
00143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
00144 const uint8_t *end,
00145 uint32_t * restrict state)
00146 {
00147 int i;
00148
00149 assert(p <= end);
00150 if (p >= end)
00151 return end;
00152
00153 for (i = 0; i < 3; i++) {
00154 uint32_t tmp = *state << 8;
00155 *state = tmp + *(p++);
00156 if (tmp == 0x100 || p == end)
00157 return p;
00158 }
00159
00160 while (p < end) {
00161 if (p[-1] > 1 ) p += 3;
00162 else if (p[-2] ) p += 2;
00163 else if (p[-3]|(p[-1]-1)) p++;
00164 else {
00165 p++;
00166 break;
00167 }
00168 }
00169
00170 p = FFMIN(p, end) - 4;
00171 *state = AV_RB32(p);
00172
00173 return p + 4;
00174 }
00175
00176
00177 av_cold int ff_dct_common_init(MpegEncContext *s)
00178 {
00179 dsputil_init(&s->dsp, s->avctx);
00180
00181 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00182 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00183 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00184 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00186 if (s->flags & CODEC_FLAG_BITEXACT)
00187 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00188 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00189
00190 #if HAVE_MMX
00191 MPV_common_init_mmx(s);
00192 #elif ARCH_ALPHA
00193 MPV_common_init_axp(s);
00194 #elif CONFIG_MLIB
00195 MPV_common_init_mlib(s);
00196 #elif HAVE_MMI
00197 MPV_common_init_mmi(s);
00198 #elif ARCH_ARM
00199 MPV_common_init_arm(s);
00200 #elif HAVE_ALTIVEC
00201 MPV_common_init_altivec(s);
00202 #elif ARCH_BFIN
00203 MPV_common_init_bfin(s);
00204 #endif
00205
00206
00207
00208
00209 if (s->alternate_scan) {
00210 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
00211 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
00212 } else {
00213 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
00214 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
00215 }
00216 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00217 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00218
00219 return 0;
00220 }
00221
00222 void ff_copy_picture(Picture *dst, Picture *src)
00223 {
00224 *dst = *src;
00225 dst->f.type = FF_BUFFER_TYPE_COPY;
00226 }
00227
00231 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00232 {
00233
00234
00235
00236 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
00237 ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
00238 else
00239 avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
00240 av_freep(&pic->f.hwaccel_picture_private);
00241 }
00242
00246 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00247 {
00248 int r;
00249
00250 if (s->avctx->hwaccel) {
00251 assert(!pic->f.hwaccel_picture_private);
00252 if (s->avctx->hwaccel->priv_data_size) {
00253 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00254 if (!pic->f.hwaccel_picture_private) {
00255 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00256 return -1;
00257 }
00258 }
00259 }
00260
00261 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
00262 r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
00263 else
00264 r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
00265
00266 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
00267 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
00268 r, pic->f.type, pic->f.data[0]);
00269 av_freep(&pic->f.hwaccel_picture_private);
00270 return -1;
00271 }
00272
00273 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
00274 s->uvlinesize != pic->f.linesize[1])) {
00275 av_log(s->avctx, AV_LOG_ERROR,
00276 "get_buffer() failed (stride changed)\n");
00277 free_frame_buffer(s, pic);
00278 return -1;
00279 }
00280
00281 if (pic->f.linesize[1] != pic->f.linesize[2]) {
00282 av_log(s->avctx, AV_LOG_ERROR,
00283 "get_buffer() failed (uv stride mismatch)\n");
00284 free_frame_buffer(s, pic);
00285 return -1;
00286 }
00287
00288 return 0;
00289 }
00290
00295 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
00296 {
00297 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
00298
00299
00300
00301 const int mb_array_size = s->mb_stride * s->mb_height;
00302 const int b8_array_size = s->b8_stride * s->mb_height * 2;
00303 const int b4_array_size = s->b4_stride * s->mb_height * 4;
00304 int i;
00305 int r = -1;
00306
00307 if (shared) {
00308 assert(pic->f.data[0]);
00309 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
00310 pic->f.type = FF_BUFFER_TYPE_SHARED;
00311 } else {
00312 assert(!pic->f.data[0]);
00313
00314 if (alloc_frame_buffer(s, pic) < 0)
00315 return -1;
00316
00317 s->linesize = pic->f.linesize[0];
00318 s->uvlinesize = pic->f.linesize[1];
00319 }
00320
00321 if (pic->f.qscale_table == NULL) {
00322 if (s->encoding) {
00323 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
00324 mb_array_size * sizeof(int16_t), fail)
00325 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
00326 mb_array_size * sizeof(int16_t), fail)
00327 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
00328 mb_array_size * sizeof(int8_t ), fail)
00329 }
00330
00331 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
00332 mb_array_size * sizeof(uint8_t) + 2, fail)
00333 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
00334 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
00335 fail)
00336 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
00337 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
00338 fail)
00339 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
00340 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
00341 if (s->out_format == FMT_H264) {
00342 for (i = 0; i < 2; i++) {
00343 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00344 2 * (b4_array_size + 4) * sizeof(int16_t),
00345 fail)
00346 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00347 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00348 4 * mb_array_size * sizeof(uint8_t), fail)
00349 }
00350 pic->f.motion_subsample_log2 = 2;
00351 } else if (s->out_format == FMT_H263 || s->encoding ||
00352 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
00353 for (i = 0; i < 2; i++) {
00354 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00355 2 * (b8_array_size + 4) * sizeof(int16_t),
00356 fail)
00357 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00358 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00359 4 * mb_array_size * sizeof(uint8_t), fail)
00360 }
00361 pic->f.motion_subsample_log2 = 3;
00362 }
00363 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00364 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
00365 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
00366 }
00367 pic->f.qstride = s->mb_stride;
00368 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
00369 1 * sizeof(AVPanScan), fail)
00370 }
00371
00372 pic->owner2 = s;
00373
00374 return 0;
00375 fail:
00376 if (r >= 0)
00377 free_frame_buffer(s, pic);
00378 return -1;
00379 }
00380
00384 static void free_picture(MpegEncContext *s, Picture *pic)
00385 {
00386 int i;
00387
00388 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
00389 free_frame_buffer(s, pic);
00390 }
00391
00392 av_freep(&pic->mb_var);
00393 av_freep(&pic->mc_mb_var);
00394 av_freep(&pic->mb_mean);
00395 av_freep(&pic->f.mbskip_table);
00396 av_freep(&pic->qscale_table_base);
00397 av_freep(&pic->mb_type_base);
00398 av_freep(&pic->f.dct_coeff);
00399 av_freep(&pic->f.pan_scan);
00400 pic->f.mb_type = NULL;
00401 for (i = 0; i < 2; i++) {
00402 av_freep(&pic->motion_val_base[i]);
00403 av_freep(&pic->f.ref_index[i]);
00404 }
00405
00406 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
00407 for (i = 0; i < 4; i++) {
00408 pic->f.base[i] =
00409 pic->f.data[i] = NULL;
00410 }
00411 pic->f.type = 0;
00412 }
00413 }
00414
00415 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
00416 {
00417 int y_size = s->b8_stride * (2 * s->mb_height + 1);
00418 int c_size = s->mb_stride * (s->mb_height + 1);
00419 int yc_size = y_size + 2 * c_size;
00420 int i;
00421
00422
00423
00424 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
00425 (s->width + 64) * 2 * 21 * 2, fail);
00426
00427
00428
00429 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
00430 (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
00431 s->me.temp = s->me.scratchpad;
00432 s->rd_scratchpad = s->me.scratchpad;
00433 s->b_scratchpad = s->me.scratchpad;
00434 s->obmc_scratchpad = s->me.scratchpad + 16;
00435 if (s->encoding) {
00436 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
00437 ME_MAP_SIZE * sizeof(uint32_t), fail)
00438 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
00439 ME_MAP_SIZE * sizeof(uint32_t), fail)
00440 if (s->avctx->noise_reduction) {
00441 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
00442 2 * 64 * sizeof(int), fail)
00443 }
00444 }
00445 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
00446 s->block = s->blocks[0];
00447
00448 for (i = 0; i < 12; i++) {
00449 s->pblocks[i] = &s->block[i];
00450 }
00451
00452 if (s->out_format == FMT_H263) {
00453
00454 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
00455 yc_size * sizeof(int16_t) * 16, fail);
00456 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00457 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00458 s->ac_val[2] = s->ac_val[1] + c_size;
00459 }
00460
00461 return 0;
00462 fail:
00463 return -1;
00464 }
00465
00466 static void free_duplicate_context(MpegEncContext *s)
00467 {
00468 if (s == NULL)
00469 return;
00470
00471 av_freep(&s->edge_emu_buffer);
00472 av_freep(&s->me.scratchpad);
00473 s->me.temp =
00474 s->rd_scratchpad =
00475 s->b_scratchpad =
00476 s->obmc_scratchpad = NULL;
00477
00478 av_freep(&s->dct_error_sum);
00479 av_freep(&s->me.map);
00480 av_freep(&s->me.score_map);
00481 av_freep(&s->blocks);
00482 av_freep(&s->ac_val_base);
00483 s->block = NULL;
00484 }
00485
00486 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
00487 {
00488 #define COPY(a) bak->a = src->a
00489 COPY(edge_emu_buffer);
00490 COPY(me.scratchpad);
00491 COPY(me.temp);
00492 COPY(rd_scratchpad);
00493 COPY(b_scratchpad);
00494 COPY(obmc_scratchpad);
00495 COPY(me.map);
00496 COPY(me.score_map);
00497 COPY(blocks);
00498 COPY(block);
00499 COPY(start_mb_y);
00500 COPY(end_mb_y);
00501 COPY(me.map_generation);
00502 COPY(pb);
00503 COPY(dct_error_sum);
00504 COPY(dct_count[0]);
00505 COPY(dct_count[1]);
00506 COPY(ac_val_base);
00507 COPY(ac_val[0]);
00508 COPY(ac_val[1]);
00509 COPY(ac_val[2]);
00510 #undef COPY
00511 }
00512
00513 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
00514 {
00515 MpegEncContext bak;
00516 int i;
00517
00518
00519 backup_duplicate_context(&bak, dst);
00520 memcpy(dst, src, sizeof(MpegEncContext));
00521 backup_duplicate_context(dst, &bak);
00522 for (i = 0; i < 12; i++) {
00523 dst->pblocks[i] = &dst->block[i];
00524 }
00525
00526
00527 }
00528
00529 int ff_mpeg_update_thread_context(AVCodecContext *dst,
00530 const AVCodecContext *src)
00531 {
00532 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
00533
00534 if (dst == src || !s1->context_initialized)
00535 return 0;
00536
00537
00538
00539 if (!s->context_initialized) {
00540 memcpy(s, s1, sizeof(MpegEncContext));
00541
00542 s->avctx = dst;
00543 s->picture_range_start += MAX_PICTURE_COUNT;
00544 s->picture_range_end += MAX_PICTURE_COUNT;
00545 s->bitstream_buffer = NULL;
00546 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
00547
00548 MPV_common_init(s);
00549 }
00550
00551 s->avctx->coded_height = s1->avctx->coded_height;
00552 s->avctx->coded_width = s1->avctx->coded_width;
00553 s->avctx->width = s1->avctx->width;
00554 s->avctx->height = s1->avctx->height;
00555
00556 s->coded_picture_number = s1->coded_picture_number;
00557 s->picture_number = s1->picture_number;
00558 s->input_picture_number = s1->input_picture_number;
00559
00560 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
00561 memcpy(&s->last_picture, &s1->last_picture,
00562 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
00563
00564 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
00565 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
00566 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
00567
00568
00569 s->next_p_frame_damaged = s1->next_p_frame_damaged;
00570 s->workaround_bugs = s1->workaround_bugs;
00571
00572
00573 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
00574 (char *) &s1->shape - (char *) &s1->time_increment_bits);
00575
00576
00577 s->max_b_frames = s1->max_b_frames;
00578 s->low_delay = s1->low_delay;
00579 s->dropable = s1->dropable;
00580
00581
00582 s->divx_packed = s1->divx_packed;
00583
00584 if (s1->bitstream_buffer) {
00585 if (s1->bitstream_buffer_size +
00586 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
00587 av_fast_malloc(&s->bitstream_buffer,
00588 &s->allocated_bitstream_buffer_size,
00589 s1->allocated_bitstream_buffer_size);
00590 s->bitstream_buffer_size = s1->bitstream_buffer_size;
00591 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
00592 s1->bitstream_buffer_size);
00593 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
00594 FF_INPUT_BUFFER_PADDING_SIZE);
00595 }
00596
00597
00598 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
00599 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
00600
00601 if (!s1->first_field) {
00602 s->last_pict_type = s1->pict_type;
00603 if (s1->current_picture_ptr)
00604 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
00605
00606 if (s1->pict_type != AV_PICTURE_TYPE_B) {
00607 s->last_non_b_pict_type = s1->pict_type;
00608 }
00609 }
00610
00611 return 0;
00612 }
00613
00620 void MPV_common_defaults(MpegEncContext *s)
00621 {
00622 s->y_dc_scale_table =
00623 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
00624 s->chroma_qscale_table = ff_default_chroma_qscale_table;
00625 s->progressive_frame = 1;
00626 s->progressive_sequence = 1;
00627 s->picture_structure = PICT_FRAME;
00628
00629 s->coded_picture_number = 0;
00630 s->picture_number = 0;
00631 s->input_picture_number = 0;
00632
00633 s->picture_in_gop_number = 0;
00634
00635 s->f_code = 1;
00636 s->b_code = 1;
00637
00638 s->picture_range_start = 0;
00639 s->picture_range_end = MAX_PICTURE_COUNT;
00640
00641 s->slice_context_count = 1;
00642 }
00643
00649 void MPV_decode_defaults(MpegEncContext *s)
00650 {
00651 MPV_common_defaults(s);
00652 }
00653
00658 av_cold int MPV_common_init(MpegEncContext *s)
00659 {
00660 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
00661 int nb_slices = (HAVE_THREADS &&
00662 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
00663 s->avctx->thread_count : 1;
00664
00665 if (s->encoding && s->avctx->slices)
00666 nb_slices = s->avctx->slices;
00667
00668 if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00669 s->mb_height = (s->height + 31) / 32 * 2;
00670 else if (s->codec_id != CODEC_ID_H264)
00671 s->mb_height = (s->height + 15) / 16;
00672
00673 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
00674 av_log(s->avctx, AV_LOG_ERROR,
00675 "decoding to PIX_FMT_NONE is not supported.\n");
00676 return -1;
00677 }
00678
00679 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
00680 int max_slices;
00681 if (s->mb_height)
00682 max_slices = FFMIN(MAX_THREADS, s->mb_height);
00683 else
00684 max_slices = MAX_THREADS;
00685 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
00686 " reducing to %d\n", nb_slices, max_slices);
00687 nb_slices = max_slices;
00688 }
00689
00690 if ((s->width || s->height) &&
00691 av_image_check_size(s->width, s->height, 0, s->avctx))
00692 return -1;
00693
00694 ff_dct_common_init(s);
00695
00696 s->flags = s->avctx->flags;
00697 s->flags2 = s->avctx->flags2;
00698
00699
00700 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
00701 &s->chroma_y_shift);
00702
00703
00704 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
00705
00706 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
00707
00708 if (s->width && s->height) {
00709 s->mb_width = (s->width + 15) / 16;
00710 s->mb_stride = s->mb_width + 1;
00711 s->b8_stride = s->mb_width * 2 + 1;
00712 s->b4_stride = s->mb_width * 4 + 1;
00713 mb_array_size = s->mb_height * s->mb_stride;
00714 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
00715
00716
00717
00718 s->h_edge_pos = s->mb_width * 16;
00719 s->v_edge_pos = s->mb_height * 16;
00720
00721 s->mb_num = s->mb_width * s->mb_height;
00722
00723 s->block_wrap[0] =
00724 s->block_wrap[1] =
00725 s->block_wrap[2] =
00726 s->block_wrap[3] = s->b8_stride;
00727 s->block_wrap[4] =
00728 s->block_wrap[5] = s->mb_stride;
00729
00730 y_size = s->b8_stride * (2 * s->mb_height + 1);
00731 c_size = s->mb_stride * (s->mb_height + 1);
00732 yc_size = y_size + 2 * c_size;
00733
00734 s->avctx->coded_frame = (AVFrame *)&s->current_picture;
00735
00736 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
00737 fail);
00738 for (y = 0; y < s->mb_height; y++)
00739 for (x = 0; x < s->mb_width; x++)
00740 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
00741
00742 s->mb_index2xy[s->mb_height * s->mb_width] =
00743 (s->mb_height - 1) * s->mb_stride + s->mb_width;
00744
00745 if (s->encoding) {
00746
00747 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
00748 mv_table_size * 2 * sizeof(int16_t), fail);
00749 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
00750 mv_table_size * 2 * sizeof(int16_t), fail);
00751 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
00752 mv_table_size * 2 * sizeof(int16_t), fail);
00753 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
00754 mv_table_size * 2 * sizeof(int16_t), fail);
00755 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
00756 mv_table_size * 2 * sizeof(int16_t), fail);
00757 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
00758 mv_table_size * 2 * sizeof(int16_t), fail);
00759 s->p_mv_table = s->p_mv_table_base +
00760 s->mb_stride + 1;
00761 s->b_forw_mv_table = s->b_forw_mv_table_base +
00762 s->mb_stride + 1;
00763 s->b_back_mv_table = s->b_back_mv_table_base +
00764 s->mb_stride + 1;
00765 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
00766 s->mb_stride + 1;
00767 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
00768 s->mb_stride + 1;
00769 s->b_direct_mv_table = s->b_direct_mv_table_base +
00770 s->mb_stride + 1;
00771
00772 if (s->msmpeg4_version) {
00773 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
00774 2 * 2 * (MAX_LEVEL + 1) *
00775 (MAX_RUN + 1) * 2 * sizeof(int), fail);
00776 }
00777 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00778
00779
00780 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
00781 sizeof(uint16_t), fail);
00782
00783 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
00784 sizeof(int), fail);
00785
00786 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
00787 64 * 32 * sizeof(int), fail);
00788 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
00789 64 * 32 * sizeof(int), fail);
00790 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
00791 64 * 32 * 2 * sizeof(uint16_t), fail);
00792 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
00793 64 * 32 * 2 * sizeof(uint16_t), fail);
00794 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
00795 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
00796 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
00797 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
00798
00799 if (s->avctx->noise_reduction) {
00800 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
00801 2 * 64 * sizeof(uint16_t), fail);
00802 }
00803 }
00804 }
00805
00806 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
00807 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
00808 s->picture_count * sizeof(Picture), fail);
00809 for (i = 0; i < s->picture_count; i++) {
00810 avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
00811 }
00812
00813 if (s->width && s->height) {
00814 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
00815 mb_array_size * sizeof(uint8_t), fail);
00816
00817 if (s->codec_id == CODEC_ID_MPEG4 ||
00818 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
00819
00820 for (i = 0; i < 2; i++) {
00821 int j, k;
00822 for (j = 0; j < 2; j++) {
00823 for (k = 0; k < 2; k++) {
00824 FF_ALLOCZ_OR_GOTO(s->avctx,
00825 s->b_field_mv_table_base[i][j][k],
00826 mv_table_size * 2 * sizeof(int16_t),
00827 fail);
00828 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
00829 s->mb_stride + 1;
00830 }
00831 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
00832 mb_array_size * 2 * sizeof(uint8_t),
00833 fail);
00834 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
00835 mv_table_size * 2 * sizeof(int16_t),
00836 fail);
00837 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
00838 + s->mb_stride + 1;
00839 }
00840 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
00841 mb_array_size * 2 * sizeof(uint8_t),
00842 fail);
00843 }
00844 }
00845 if (s->out_format == FMT_H263) {
00846
00847 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00848 s->coded_block = s->coded_block_base + s->b8_stride + 1;
00849
00850
00851 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
00852 mb_array_size * sizeof(uint8_t), fail);
00853 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
00854 mb_array_size * sizeof(uint8_t), fail);
00855 }
00856
00857 if (s->h263_pred || s->h263_plus || !s->encoding) {
00858
00859
00860 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
00861 yc_size * sizeof(int16_t), fail);
00862 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00863 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00864 s->dc_val[2] = s->dc_val[1] + c_size;
00865 for (i = 0; i < yc_size; i++)
00866 s->dc_val_base[i] = 1024;
00867 }
00868
00869
00870 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00871 memset(s->mbintra_table, 1, mb_array_size);
00872
00873
00874 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
00875
00876
00877 s->parse_context.state = -1;
00878 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
00879 s->avctx->debug_mv) {
00880 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
00881 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
00882 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
00883 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
00884 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
00885 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
00886 }
00887 }
00888
00889 s->context_initialized = 1;
00890 s->thread_context[0] = s;
00891
00892 if (s->width && s->height) {
00893 if (nb_slices > 1) {
00894 for (i = 1; i < nb_slices; i++) {
00895 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
00896 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00897 }
00898
00899 for (i = 0; i < nb_slices; i++) {
00900 if (init_duplicate_context(s->thread_context[i], s) < 0)
00901 goto fail;
00902 s->thread_context[i]->start_mb_y =
00903 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
00904 s->thread_context[i]->end_mb_y =
00905 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
00906 }
00907 } else {
00908 if (init_duplicate_context(s, s) < 0)
00909 goto fail;
00910 s->start_mb_y = 0;
00911 s->end_mb_y = s->mb_height;
00912 }
00913 s->slice_context_count = nb_slices;
00914 }
00915
00916 return 0;
00917 fail:
00918 MPV_common_end(s);
00919 return -1;
00920 }
00921
00922
00923 void MPV_common_end(MpegEncContext *s)
00924 {
00925 int i, j, k;
00926
00927 if (s->slice_context_count > 1) {
00928 for (i = 0; i < s->slice_context_count; i++) {
00929 free_duplicate_context(s->thread_context[i]);
00930 }
00931 for (i = 1; i < s->slice_context_count; i++) {
00932 av_freep(&s->thread_context[i]);
00933 }
00934 s->slice_context_count = 1;
00935 } else free_duplicate_context(s);
00936
00937 av_freep(&s->parse_context.buffer);
00938 s->parse_context.buffer_size = 0;
00939
00940 av_freep(&s->mb_type);
00941 av_freep(&s->p_mv_table_base);
00942 av_freep(&s->b_forw_mv_table_base);
00943 av_freep(&s->b_back_mv_table_base);
00944 av_freep(&s->b_bidir_forw_mv_table_base);
00945 av_freep(&s->b_bidir_back_mv_table_base);
00946 av_freep(&s->b_direct_mv_table_base);
00947 s->p_mv_table = NULL;
00948 s->b_forw_mv_table = NULL;
00949 s->b_back_mv_table = NULL;
00950 s->b_bidir_forw_mv_table = NULL;
00951 s->b_bidir_back_mv_table = NULL;
00952 s->b_direct_mv_table = NULL;
00953 for (i = 0; i < 2; i++) {
00954 for (j = 0; j < 2; j++) {
00955 for (k = 0; k < 2; k++) {
00956 av_freep(&s->b_field_mv_table_base[i][j][k]);
00957 s->b_field_mv_table[i][j][k] = NULL;
00958 }
00959 av_freep(&s->b_field_select_table[i][j]);
00960 av_freep(&s->p_field_mv_table_base[i][j]);
00961 s->p_field_mv_table[i][j] = NULL;
00962 }
00963 av_freep(&s->p_field_select_table[i]);
00964 }
00965
00966 av_freep(&s->dc_val_base);
00967 av_freep(&s->coded_block_base);
00968 av_freep(&s->mbintra_table);
00969 av_freep(&s->cbp_table);
00970 av_freep(&s->pred_dir_table);
00971
00972 av_freep(&s->mbskip_table);
00973 av_freep(&s->bitstream_buffer);
00974 s->allocated_bitstream_buffer_size = 0;
00975
00976 av_freep(&s->avctx->stats_out);
00977 av_freep(&s->ac_stats);
00978 av_freep(&s->error_status_table);
00979 av_freep(&s->mb_index2xy);
00980 av_freep(&s->lambda_table);
00981 av_freep(&s->q_intra_matrix);
00982 av_freep(&s->q_inter_matrix);
00983 av_freep(&s->q_intra_matrix16);
00984 av_freep(&s->q_inter_matrix16);
00985 av_freep(&s->input_picture);
00986 av_freep(&s->reordered_input_picture);
00987 av_freep(&s->dct_offset);
00988
00989 if (s->picture && !s->avctx->internal->is_copy) {
00990 for (i = 0; i < s->picture_count; i++) {
00991 free_picture(s, &s->picture[i]);
00992 }
00993 }
00994 av_freep(&s->picture);
00995 s->context_initialized = 0;
00996 s->last_picture_ptr =
00997 s->next_picture_ptr =
00998 s->current_picture_ptr = NULL;
00999 s->linesize = s->uvlinesize = 0;
01000
01001 for (i = 0; i < 3; i++)
01002 av_freep(&s->visualization_buffer[i]);
01003
01004 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
01005 avcodec_default_free_buffers(s->avctx);
01006 }
01007
01008 void ff_init_rl(RLTable *rl,
01009 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
01010 {
01011 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
01012 uint8_t index_run[MAX_RUN + 1];
01013 int last, run, level, start, end, i;
01014
01015
01016 if (static_store && rl->max_level[0])
01017 return;
01018
01019
01020 for (last = 0; last < 2; last++) {
01021 if (last == 0) {
01022 start = 0;
01023 end = rl->last;
01024 } else {
01025 start = rl->last;
01026 end = rl->n;
01027 }
01028
01029 memset(max_level, 0, MAX_RUN + 1);
01030 memset(max_run, 0, MAX_LEVEL + 1);
01031 memset(index_run, rl->n, MAX_RUN + 1);
01032 for (i = start; i < end; i++) {
01033 run = rl->table_run[i];
01034 level = rl->table_level[i];
01035 if (index_run[run] == rl->n)
01036 index_run[run] = i;
01037 if (level > max_level[run])
01038 max_level[run] = level;
01039 if (run > max_run[level])
01040 max_run[level] = run;
01041 }
01042 if (static_store)
01043 rl->max_level[last] = static_store[last];
01044 else
01045 rl->max_level[last] = av_malloc(MAX_RUN + 1);
01046 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
01047 if (static_store)
01048 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
01049 else
01050 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
01051 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
01052 if (static_store)
01053 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
01054 else
01055 rl->index_run[last] = av_malloc(MAX_RUN + 1);
01056 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
01057 }
01058 }
01059
01060 void ff_init_vlc_rl(RLTable *rl)
01061 {
01062 int i, q;
01063
01064 for (q = 0; q < 32; q++) {
01065 int qmul = q * 2;
01066 int qadd = (q - 1) | 1;
01067
01068 if (q == 0) {
01069 qmul = 1;
01070 qadd = 0;
01071 }
01072 for (i = 0; i < rl->vlc.table_size; i++) {
01073 int code = rl->vlc.table[i][0];
01074 int len = rl->vlc.table[i][1];
01075 int level, run;
01076
01077 if (len == 0) {
01078 run = 66;
01079 level = MAX_LEVEL;
01080 } else if (len < 0) {
01081 run = 0;
01082 level = code;
01083 } else {
01084 if (code == rl->n) {
01085 run = 66;
01086 level = 0;
01087 } else {
01088 run = rl->table_run[code] + 1;
01089 level = rl->table_level[code] * qmul + qadd;
01090 if (code >= rl->last) run += 192;
01091 }
01092 }
01093 rl->rl_vlc[q][i].len = len;
01094 rl->rl_vlc[q][i].level = level;
01095 rl->rl_vlc[q][i].run = run;
01096 }
01097 }
01098 }
01099
01100 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
01101 {
01102 int i;
01103
01104
01105 for (i = 0; i < s->picture_count; i++) {
01106 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
01107 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
01108 (remove_current || &s->picture[i] != s->current_picture_ptr)
01109 ) {
01110 free_frame_buffer(s, &s->picture[i]);
01111 }
01112 }
01113 }
01114
01115 int ff_find_unused_picture(MpegEncContext *s, int shared)
01116 {
01117 int i;
01118
01119 if (shared) {
01120 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01121 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
01122 return i;
01123 }
01124 } else {
01125 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01126 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
01127 return i;
01128 }
01129 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01130 if (s->picture[i].f.data[0] == NULL)
01131 return i;
01132 }
01133 }
01134
01135 return AVERROR_INVALIDDATA;
01136 }
01137
01138 static void update_noise_reduction(MpegEncContext *s)
01139 {
01140 int intra, i;
01141
01142 for (intra = 0; intra < 2; intra++) {
01143 if (s->dct_count[intra] > (1 << 16)) {
01144 for (i = 0; i < 64; i++) {
01145 s->dct_error_sum[intra][i] >>= 1;
01146 }
01147 s->dct_count[intra] >>= 1;
01148 }
01149
01150 for (i = 0; i < 64; i++) {
01151 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
01152 s->dct_count[intra] +
01153 s->dct_error_sum[intra][i] / 2) /
01154 (s->dct_error_sum[intra][i] + 1);
01155 }
01156 }
01157 }
01158
01163 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
01164 {
01165 int i;
01166 Picture *pic;
01167 s->mb_skipped = 0;
01168
01169 assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
01170 s->codec_id == CODEC_ID_SVQ3);
01171
01172
01173 if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
01174 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
01175 s->last_picture_ptr != s->next_picture_ptr &&
01176 s->last_picture_ptr->f.data[0]) {
01177 if (s->last_picture_ptr->owner2 == s)
01178 free_frame_buffer(s, s->last_picture_ptr);
01179 }
01180
01181
01182
01183 if (!s->encoding) {
01184 for (i = 0; i < s->picture_count; i++) {
01185 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
01186 &s->picture[i] != s->last_picture_ptr &&
01187 &s->picture[i] != s->next_picture_ptr &&
01188 s->picture[i].f.reference) {
01189 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
01190 av_log(avctx, AV_LOG_ERROR,
01191 "releasing zombie picture\n");
01192 free_frame_buffer(s, &s->picture[i]);
01193 }
01194 }
01195 }
01196 }
01197
01198 if (!s->encoding) {
01199 ff_release_unused_pictures(s, 1);
01200
01201 if (s->current_picture_ptr &&
01202 s->current_picture_ptr->f.data[0] == NULL) {
01203
01204
01205 pic = s->current_picture_ptr;
01206 } else {
01207 i = ff_find_unused_picture(s, 0);
01208 pic = &s->picture[i];
01209 }
01210
01211 pic->f.reference = 0;
01212 if (!s->dropable) {
01213 if (s->codec_id == CODEC_ID_H264)
01214 pic->f.reference = s->picture_structure;
01215 else if (s->pict_type != AV_PICTURE_TYPE_B)
01216 pic->f.reference = 3;
01217 }
01218
01219 pic->f.coded_picture_number = s->coded_picture_number++;
01220
01221 if (ff_alloc_picture(s, pic, 0) < 0)
01222 return -1;
01223
01224 s->current_picture_ptr = pic;
01225
01226 s->current_picture_ptr->f.top_field_first = s->top_field_first;
01227 if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
01228 s->codec_id == CODEC_ID_MPEG2VIDEO) {
01229 if (s->picture_structure != PICT_FRAME)
01230 s->current_picture_ptr->f.top_field_first =
01231 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
01232 }
01233 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
01234 !s->progressive_sequence;
01235 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
01236 }
01237
01238 s->current_picture_ptr->f.pict_type = s->pict_type;
01239
01240
01241 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
01242
01243 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01244
01245 if (s->pict_type != AV_PICTURE_TYPE_B) {
01246 s->last_picture_ptr = s->next_picture_ptr;
01247 if (!s->dropable)
01248 s->next_picture_ptr = s->current_picture_ptr;
01249 }
01250
01251
01252
01253
01254
01255
01256
01257 if (s->codec_id != CODEC_ID_H264) {
01258 if ((s->last_picture_ptr == NULL ||
01259 s->last_picture_ptr->f.data[0] == NULL) &&
01260 (s->pict_type != AV_PICTURE_TYPE_I ||
01261 s->picture_structure != PICT_FRAME)) {
01262 if (s->pict_type != AV_PICTURE_TYPE_I)
01263 av_log(avctx, AV_LOG_ERROR,
01264 "warning: first frame is no keyframe\n");
01265 else if (s->picture_structure != PICT_FRAME)
01266 av_log(avctx, AV_LOG_INFO,
01267 "allocate dummy last picture for field based first keyframe\n");
01268
01269
01270 i = ff_find_unused_picture(s, 0);
01271 s->last_picture_ptr = &s->picture[i];
01272
01273 s->last_picture_ptr->f.reference = 3;
01274 s->last_picture_ptr->f.pict_type = AV_PICTURE_TYPE_I;
01275
01276 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
01277 return -1;
01278 ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
01279 INT_MAX, 0);
01280 ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
01281 INT_MAX, 1);
01282 }
01283 if ((s->next_picture_ptr == NULL ||
01284 s->next_picture_ptr->f.data[0] == NULL) &&
01285 s->pict_type == AV_PICTURE_TYPE_B) {
01286
01287 i = ff_find_unused_picture(s, 0);
01288 s->next_picture_ptr = &s->picture[i];
01289
01290 s->next_picture_ptr->f.reference = 3;
01291 s->next_picture_ptr->f.pict_type = AV_PICTURE_TYPE_I;
01292
01293 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
01294 return -1;
01295 ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
01296 INT_MAX, 0);
01297 ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
01298 INT_MAX, 1);
01299 }
01300 }
01301
01302 if (s->last_picture_ptr)
01303 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01304 if (s->next_picture_ptr)
01305 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01306
01307 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
01308 (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
01309 if (s->next_picture_ptr)
01310 s->next_picture_ptr->owner2 = s;
01311 if (s->last_picture_ptr)
01312 s->last_picture_ptr->owner2 = s;
01313 }
01314
01315 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
01316 s->last_picture_ptr->f.data[0]));
01317
01318 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
01319 int i;
01320 for (i = 0; i < 4; i++) {
01321 if (s->picture_structure == PICT_BOTTOM_FIELD) {
01322 s->current_picture.f.data[i] +=
01323 s->current_picture.f.linesize[i];
01324 }
01325 s->current_picture.f.linesize[i] *= 2;
01326 s->last_picture.f.linesize[i] *= 2;
01327 s->next_picture.f.linesize[i] *= 2;
01328 }
01329 }
01330
01331 s->err_recognition = avctx->err_recognition;
01332
01333
01334
01335
01336 if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
01337 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01338 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01339 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
01340 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01341 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01342 } else {
01343 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01344 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01345 }
01346
01347 if (s->dct_error_sum) {
01348 assert(s->avctx->noise_reduction && s->encoding);
01349 update_noise_reduction(s);
01350 }
01351
01352 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01353 return ff_xvmc_field_start(s, avctx);
01354
01355 return 0;
01356 }
01357
01358
01359
01360 void MPV_frame_end(MpegEncContext *s)
01361 {
01362 int i;
01363
01364
01365 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
01366 ff_xvmc_field_end(s);
01367 } else if ((s->error_count || s->encoding) &&
01368 !s->avctx->hwaccel &&
01369 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
01370 s->unrestricted_mv &&
01371 s->current_picture.f.reference &&
01372 !s->intra_only &&
01373 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
01374 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
01375 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
01376 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
01377 s->h_edge_pos, s->v_edge_pos,
01378 EDGE_WIDTH, EDGE_WIDTH,
01379 EDGE_TOP | EDGE_BOTTOM);
01380 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
01381 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01382 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01383 EDGE_TOP | EDGE_BOTTOM);
01384 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
01385 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01386 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01387 EDGE_TOP | EDGE_BOTTOM);
01388 }
01389
01390 emms_c();
01391
01392 s->last_pict_type = s->pict_type;
01393 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
01394 if (s->pict_type!= AV_PICTURE_TYPE_B) {
01395 s->last_non_b_pict_type = s->pict_type;
01396 }
01397 #if 0
01398
01399 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
01400 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
01401 s->picture[i] = s->current_picture;
01402 break;
01403 }
01404 }
01405 assert(i < MAX_PICTURE_COUNT);
01406 #endif
01407
01408 if (s->encoding) {
01409
01410 for (i = 0; i < s->picture_count; i++) {
01411 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
01412 ) {
01413 free_frame_buffer(s, &s->picture[i]);
01414 }
01415 }
01416 }
01417
01418 #if 0
01419 memset(&s->last_picture, 0, sizeof(Picture));
01420 memset(&s->next_picture, 0, sizeof(Picture));
01421 memset(&s->current_picture, 0, sizeof(Picture));
01422 #endif
01423 s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr;
01424
01425 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
01426 ff_thread_report_progress((AVFrame *) s->current_picture_ptr, INT_MAX, 0);
01427 }
01428 }
01429
01437 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
01438 int w, int h, int stride, int color)
01439 {
01440 int x, y, fr, f;
01441
01442 sx = av_clip(sx, 0, w - 1);
01443 sy = av_clip(sy, 0, h - 1);
01444 ex = av_clip(ex, 0, w - 1);
01445 ey = av_clip(ey, 0, h - 1);
01446
01447 buf[sy * stride + sx] += color;
01448
01449 if (FFABS(ex - sx) > FFABS(ey - sy)) {
01450 if (sx > ex) {
01451 FFSWAP(int, sx, ex);
01452 FFSWAP(int, sy, ey);
01453 }
01454 buf += sx + sy * stride;
01455 ex -= sx;
01456 f = ((ey - sy) << 16) / ex;
01457 for (x = 0; x = ex; x++) {
01458 y = (x * f) >> 16;
01459 fr = (x * f) & 0xFFFF;
01460 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
01461 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
01462 }
01463 } else {
01464 if (sy > ey) {
01465 FFSWAP(int, sx, ex);
01466 FFSWAP(int, sy, ey);
01467 }
01468 buf += sx + sy * stride;
01469 ey -= sy;
01470 if (ey)
01471 f = ((ex - sx) << 16) / ey;
01472 else
01473 f = 0;
01474 for (y = 0; y = ey; y++) {
01475 x = (y * f) >> 16;
01476 fr = (y * f) & 0xFFFF;
01477 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
01478 buf[y * stride + x + 1] += (color * fr ) >> 16;
01479 }
01480 }
01481 }
01482
01490 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
01491 int ey, int w, int h, int stride, int color)
01492 {
01493 int dx,dy;
01494
01495 sx = av_clip(sx, -100, w + 100);
01496 sy = av_clip(sy, -100, h + 100);
01497 ex = av_clip(ex, -100, w + 100);
01498 ey = av_clip(ey, -100, h + 100);
01499
01500 dx = ex - sx;
01501 dy = ey - sy;
01502
01503 if (dx * dx + dy * dy > 3 * 3) {
01504 int rx = dx + dy;
01505 int ry = -dx + dy;
01506 int length = ff_sqrt((rx * rx + ry * ry) << 8);
01507
01508
01509 rx = ROUNDED_DIV(rx * 3 << 4, length);
01510 ry = ROUNDED_DIV(ry * 3 << 4, length);
01511
01512 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01513 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01514 }
01515 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01516 }
01517
01521 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
01522 {
01523 if (s->avctx->hwaccel || !pict || !pict->mb_type)
01524 return;
01525
01526 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
01527 int x,y;
01528
01529 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
01530 switch (pict->pict_type) {
01531 case AV_PICTURE_TYPE_I:
01532 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
01533 break;
01534 case AV_PICTURE_TYPE_P:
01535 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
01536 break;
01537 case AV_PICTURE_TYPE_B:
01538 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
01539 break;
01540 case AV_PICTURE_TYPE_S:
01541 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
01542 break;
01543 case AV_PICTURE_TYPE_SI:
01544 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
01545 break;
01546 case AV_PICTURE_TYPE_SP:
01547 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
01548 break;
01549 }
01550 for (y = 0; y < s->mb_height; y++) {
01551 for (x = 0; x < s->mb_width; x++) {
01552 if (s->avctx->debug & FF_DEBUG_SKIP) {
01553 int count = s->mbskip_table[x + y * s->mb_stride];
01554 if (count > 9)
01555 count = 9;
01556 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01557 }
01558 if (s->avctx->debug & FF_DEBUG_QP) {
01559 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
01560 pict->qscale_table[x + y * s->mb_stride]);
01561 }
01562 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
01563 int mb_type = pict->mb_type[x + y * s->mb_stride];
01564
01565 if (IS_PCM(mb_type))
01566 av_log(s->avctx, AV_LOG_DEBUG, "P");
01567 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01568 av_log(s->avctx, AV_LOG_DEBUG, "A");
01569 else if (IS_INTRA4x4(mb_type))
01570 av_log(s->avctx, AV_LOG_DEBUG, "i");
01571 else if (IS_INTRA16x16(mb_type))
01572 av_log(s->avctx, AV_LOG_DEBUG, "I");
01573 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01574 av_log(s->avctx, AV_LOG_DEBUG, "d");
01575 else if (IS_DIRECT(mb_type))
01576 av_log(s->avctx, AV_LOG_DEBUG, "D");
01577 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
01578 av_log(s->avctx, AV_LOG_DEBUG, "g");
01579 else if (IS_GMC(mb_type))
01580 av_log(s->avctx, AV_LOG_DEBUG, "G");
01581 else if (IS_SKIP(mb_type))
01582 av_log(s->avctx, AV_LOG_DEBUG, "S");
01583 else if (!USES_LIST(mb_type, 1))
01584 av_log(s->avctx, AV_LOG_DEBUG, ">");
01585 else if (!USES_LIST(mb_type, 0))
01586 av_log(s->avctx, AV_LOG_DEBUG, "<");
01587 else {
01588 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01589 av_log(s->avctx, AV_LOG_DEBUG, "X");
01590 }
01591
01592
01593 if (IS_8X8(mb_type))
01594 av_log(s->avctx, AV_LOG_DEBUG, "+");
01595 else if (IS_16X8(mb_type))
01596 av_log(s->avctx, AV_LOG_DEBUG, "-");
01597 else if (IS_8X16(mb_type))
01598 av_log(s->avctx, AV_LOG_DEBUG, "|");
01599 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
01600 av_log(s->avctx, AV_LOG_DEBUG, " ");
01601 else
01602 av_log(s->avctx, AV_LOG_DEBUG, "?");
01603
01604
01605 if (IS_INTERLACED(mb_type))
01606 av_log(s->avctx, AV_LOG_DEBUG, "=");
01607 else
01608 av_log(s->avctx, AV_LOG_DEBUG, " ");
01609 }
01610
01611 }
01612 av_log(s->avctx, AV_LOG_DEBUG, "\n");
01613 }
01614 }
01615
01616 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
01617 (s->avctx->debug_mv)) {
01618 const int shift = 1 + s->quarter_sample;
01619 int mb_y;
01620 uint8_t *ptr;
01621 int i;
01622 int h_chroma_shift, v_chroma_shift, block_height;
01623 const int width = s->avctx->width;
01624 const int height = s->avctx->height;
01625 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
01626 const int mv_stride = (s->mb_width << mv_sample_log2) +
01627 (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01628 s->low_delay = 0;
01629
01630 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
01631 &h_chroma_shift, &v_chroma_shift);
01632 for (i = 0; i < 3; i++) {
01633 memcpy(s->visualization_buffer[i], pict->data[i],
01634 (i == 0) ? pict->linesize[i] * height:
01635 pict->linesize[i] * height >> v_chroma_shift);
01636 pict->data[i] = s->visualization_buffer[i];
01637 }
01638 pict->type = FF_BUFFER_TYPE_COPY;
01639 ptr = pict->data[0];
01640 block_height = 16 >> v_chroma_shift;
01641
01642 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
01643 int mb_x;
01644 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
01645 const int mb_index = mb_x + mb_y * s->mb_stride;
01646 if ((s->avctx->debug_mv) && pict->motion_val) {
01647 int type;
01648 for (type = 0; type < 3; type++) {
01649 int direction = 0;
01650 switch (type) {
01651 case 0:
01652 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
01653 (pict->pict_type!= AV_PICTURE_TYPE_P))
01654 continue;
01655 direction = 0;
01656 break;
01657 case 1:
01658 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
01659 (pict->pict_type!= AV_PICTURE_TYPE_B))
01660 continue;
01661 direction = 0;
01662 break;
01663 case 2:
01664 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
01665 (pict->pict_type!= AV_PICTURE_TYPE_B))
01666 continue;
01667 direction = 1;
01668 break;
01669 }
01670 if (!USES_LIST(pict->mb_type[mb_index], direction))
01671 continue;
01672
01673 if (IS_8X8(pict->mb_type[mb_index])) {
01674 int i;
01675 for (i = 0; i < 4; i++) {
01676 int sx = mb_x * 16 + 4 + 8 * (i & 1);
01677 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
01678 int xy = (mb_x * 2 + (i & 1) +
01679 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01680 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
01681 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
01682 draw_arrow(ptr, sx, sy, mx, my, width,
01683 height, s->linesize, 100);
01684 }
01685 } else if (IS_16X8(pict->mb_type[mb_index])) {
01686 int i;
01687 for (i = 0; i < 2; i++) {
01688 int sx = mb_x * 16 + 8;
01689 int sy = mb_y * 16 + 4 + 8 * i;
01690 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
01691 int mx = (pict->motion_val[direction][xy][0] >> shift);
01692 int my = (pict->motion_val[direction][xy][1] >> shift);
01693
01694 if (IS_INTERLACED(pict->mb_type[mb_index]))
01695 my *= 2;
01696
01697 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01698 height, s->linesize, 100);
01699 }
01700 } else if (IS_8X16(pict->mb_type[mb_index])) {
01701 int i;
01702 for (i = 0; i < 2; i++) {
01703 int sx = mb_x * 16 + 4 + 8 * i;
01704 int sy = mb_y * 16 + 8;
01705 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
01706 int mx = pict->motion_val[direction][xy][0] >> shift;
01707 int my = pict->motion_val[direction][xy][1] >> shift;
01708
01709 if (IS_INTERLACED(pict->mb_type[mb_index]))
01710 my *= 2;
01711
01712 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01713 height, s->linesize, 100);
01714 }
01715 } else {
01716 int sx = mb_x * 16 + 8;
01717 int sy = mb_y * 16 + 8;
01718 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
01719 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
01720 int my = pict->motion_val[direction][xy][1] >> shift + sy;
01721 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01722 }
01723 }
01724 }
01725 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
01726 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
01727 0x0101010101010101ULL;
01728 int y;
01729 for (y = 0; y < block_height; y++) {
01730 *(uint64_t *)(pict->data[1] + 8 * mb_x +
01731 (block_height * mb_y + y) *
01732 pict->linesize[1]) = c;
01733 *(uint64_t *)(pict->data[2] + 8 * mb_x +
01734 (block_height * mb_y + y) *
01735 pict->linesize[2]) = c;
01736 }
01737 }
01738 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
01739 pict->motion_val) {
01740 int mb_type = pict->mb_type[mb_index];
01741 uint64_t u,v;
01742 int y;
01743 #define COLOR(theta, r) \
01744 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
01745 v = (int)(128 + r * sin(theta * 3.141592 / 180));
01746
01747
01748 u = v = 128;
01749 if (IS_PCM(mb_type)) {
01750 COLOR(120, 48)
01751 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
01752 IS_INTRA16x16(mb_type)) {
01753 COLOR(30, 48)
01754 } else if (IS_INTRA4x4(mb_type)) {
01755 COLOR(90, 48)
01756 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
01757
01758 } else if (IS_DIRECT(mb_type)) {
01759 COLOR(150, 48)
01760 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
01761 COLOR(170, 48)
01762 } else if (IS_GMC(mb_type)) {
01763 COLOR(190, 48)
01764 } else if (IS_SKIP(mb_type)) {
01765
01766 } else if (!USES_LIST(mb_type, 1)) {
01767 COLOR(240, 48)
01768 } else if (!USES_LIST(mb_type, 0)) {
01769 COLOR(0, 48)
01770 } else {
01771 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01772 COLOR(300,48)
01773 }
01774
01775 u *= 0x0101010101010101ULL;
01776 v *= 0x0101010101010101ULL;
01777 for (y = 0; y < block_height; y++) {
01778 *(uint64_t *)(pict->data[1] + 8 * mb_x +
01779 (block_height * mb_y + y) * pict->linesize[1]) = u;
01780 *(uint64_t *)(pict->data[2] + 8 * mb_x +
01781 (block_height * mb_y + y) * pict->linesize[2]) = v;
01782 }
01783
01784
01785 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
01786 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
01787 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01788 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
01789 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01790 }
01791 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
01792 for (y = 0; y < 16; y++)
01793 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
01794 pict->linesize[0]] ^= 0x80;
01795 }
01796 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
01797 int dm = 1 << (mv_sample_log2 - 2);
01798 for (i = 0; i < 4; i++) {
01799 int sx = mb_x * 16 + 8 * (i & 1);
01800 int sy = mb_y * 16 + 8 * (i >> 1);
01801 int xy = (mb_x * 2 + (i & 1) +
01802 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01803
01804 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
01805 if (mv[0] != mv[dm] ||
01806 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
01807 for (y = 0; y < 8; y++)
01808 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
01809 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
01810 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
01811 pict->linesize[0]) ^= 0x8080808080808080ULL;
01812 }
01813 }
01814
01815 if (IS_INTERLACED(mb_type) &&
01816 s->codec_id == CODEC_ID_H264) {
01817
01818 }
01819 }
01820 s->mbskip_table[mb_index] = 0;
01821 }
01822 }
01823 }
01824 }
01825
01826 static inline int hpel_motion_lowres(MpegEncContext *s,
01827 uint8_t *dest, uint8_t *src,
01828 int field_based, int field_select,
01829 int src_x, int src_y,
01830 int width, int height, int stride,
01831 int h_edge_pos, int v_edge_pos,
01832 int w, int h, h264_chroma_mc_func *pix_op,
01833 int motion_x, int motion_y)
01834 {
01835 const int lowres = s->avctx->lowres;
01836 const int op_index = FFMIN(lowres, 2);
01837 const int s_mask = (2 << lowres) - 1;
01838 int emu = 0;
01839 int sx, sy;
01840
01841 if (s->quarter_sample) {
01842 motion_x /= 2;
01843 motion_y /= 2;
01844 }
01845
01846 sx = motion_x & s_mask;
01847 sy = motion_y & s_mask;
01848 src_x += motion_x >> lowres + 1;
01849 src_y += motion_y >> lowres + 1;
01850
01851 src += src_y * stride + src_x;
01852
01853 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
01854 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
01855 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
01856 (h + 1) << field_based, src_x,
01857 src_y << field_based,
01858 h_edge_pos,
01859 v_edge_pos);
01860 src = s->edge_emu_buffer;
01861 emu = 1;
01862 }
01863
01864 sx = (sx << 2) >> lowres;
01865 sy = (sy << 2) >> lowres;
01866 if (field_select)
01867 src += s->linesize;
01868 pix_op[op_index](dest, src, stride, h, sx, sy);
01869 return emu;
01870 }
01871
01872
01873 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01874 uint8_t *dest_y,
01875 uint8_t *dest_cb,
01876 uint8_t *dest_cr,
01877 int field_based,
01878 int bottom_field,
01879 int field_select,
01880 uint8_t **ref_picture,
01881 h264_chroma_mc_func *pix_op,
01882 int motion_x, int motion_y,
01883 int h, int mb_y)
01884 {
01885 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01886 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
01887 uvsx, uvsy;
01888 const int lowres = s->avctx->lowres;
01889 const int op_index = FFMIN(lowres, 2);
01890 const int block_s = 8>>lowres;
01891 const int s_mask = (2 << lowres) - 1;
01892 const int h_edge_pos = s->h_edge_pos >> lowres;
01893 const int v_edge_pos = s->v_edge_pos >> lowres;
01894 linesize = s->current_picture.f.linesize[0] << field_based;
01895 uvlinesize = s->current_picture.f.linesize[1] << field_based;
01896
01897
01898 if (s->quarter_sample) {
01899 motion_x /= 2;
01900 motion_y /= 2;
01901 }
01902
01903 if (field_based) {
01904 motion_y += (bottom_field - field_select) * (1 << lowres - 1);
01905 }
01906
01907 sx = motion_x & s_mask;
01908 sy = motion_y & s_mask;
01909 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
01910 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
01911
01912 if (s->out_format == FMT_H263) {
01913 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
01914 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
01915 uvsrc_x = src_x >> 1;
01916 uvsrc_y = src_y >> 1;
01917 } else if (s->out_format == FMT_H261) {
01918
01919 mx = motion_x / 4;
01920 my = motion_y / 4;
01921 uvsx = (2 * mx) & s_mask;
01922 uvsy = (2 * my) & s_mask;
01923 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
01924 uvsrc_y = mb_y * block_s + (my >> lowres);
01925 } else {
01926 mx = motion_x / 2;
01927 my = motion_y / 2;
01928 uvsx = mx & s_mask;
01929 uvsy = my & s_mask;
01930 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
01931 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
01932 }
01933
01934 ptr_y = ref_picture[0] + src_y * linesize + src_x;
01935 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01936 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01937
01938 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
01939 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
01940 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
01941 s->linesize, 17, 17 + field_based,
01942 src_x, src_y << field_based, h_edge_pos,
01943 v_edge_pos);
01944 ptr_y = s->edge_emu_buffer;
01945 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
01946 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
01947 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
01948 9 + field_based,
01949 uvsrc_x, uvsrc_y << field_based,
01950 h_edge_pos >> 1, v_edge_pos >> 1);
01951 s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
01952 9 + field_based,
01953 uvsrc_x, uvsrc_y << field_based,
01954 h_edge_pos >> 1, v_edge_pos >> 1);
01955 ptr_cb = uvbuf;
01956 ptr_cr = uvbuf + 16;
01957 }
01958 }
01959
01960
01961 if (bottom_field) {
01962 dest_y += s->linesize;
01963 dest_cb += s->uvlinesize;
01964 dest_cr += s->uvlinesize;
01965 }
01966
01967 if (field_select) {
01968 ptr_y += s->linesize;
01969 ptr_cb += s->uvlinesize;
01970 ptr_cr += s->uvlinesize;
01971 }
01972
01973 sx = (sx << 2) >> lowres;
01974 sy = (sy << 2) >> lowres;
01975 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
01976
01977 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
01978 uvsx = (uvsx << 2) >> lowres;
01979 uvsy = (uvsy << 2) >> lowres;
01980 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift,
01981 uvsx, uvsy);
01982 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift,
01983 uvsx, uvsy);
01984 }
01985
01986 }
01987
01988 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01989 uint8_t *dest_cb, uint8_t *dest_cr,
01990 uint8_t **ref_picture,
01991 h264_chroma_mc_func * pix_op,
01992 int mx, int my)
01993 {
01994 const int lowres = s->avctx->lowres;
01995 const int op_index = FFMIN(lowres, 2);
01996 const int block_s = 8 >> lowres;
01997 const int s_mask = (2 << lowres) - 1;
01998 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
01999 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
02000 int emu = 0, src_x, src_y, offset, sx, sy;
02001 uint8_t *ptr;
02002
02003 if (s->quarter_sample) {
02004 mx /= 2;
02005 my /= 2;
02006 }
02007
02008
02009
02010 mx = ff_h263_round_chroma(mx);
02011 my = ff_h263_round_chroma(my);
02012
02013 sx = mx & s_mask;
02014 sy = my & s_mask;
02015 src_x = s->mb_x * block_s + (mx >> lowres + 1);
02016 src_y = s->mb_y * block_s + (my >> lowres + 1);
02017
02018 offset = src_y * s->uvlinesize + src_x;
02019 ptr = ref_picture[1] + offset;
02020 if (s->flags & CODEC_FLAG_EMU_EDGE) {
02021 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
02022 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
02023 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
02024 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
02025 ptr = s->edge_emu_buffer;
02026 emu = 1;
02027 }
02028 }
02029 sx = (sx << 2) >> lowres;
02030 sy = (sy << 2) >> lowres;
02031 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
02032
02033 ptr = ref_picture[2] + offset;
02034 if (emu) {
02035 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
02036 src_x, src_y, h_edge_pos, v_edge_pos);
02037 ptr = s->edge_emu_buffer;
02038 }
02039 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
02040 }
02041
02053 static inline void MPV_motion_lowres(MpegEncContext *s,
02054 uint8_t *dest_y, uint8_t *dest_cb,
02055 uint8_t *dest_cr,
02056 int dir, uint8_t **ref_picture,
02057 h264_chroma_mc_func *pix_op)
02058 {
02059 int mx, my;
02060 int mb_x, mb_y, i;
02061 const int lowres = s->avctx->lowres;
02062 const int block_s = 8 >>lowres;
02063
02064 mb_x = s->mb_x;
02065 mb_y = s->mb_y;
02066
02067 switch (s->mv_type) {
02068 case MV_TYPE_16X16:
02069 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02070 0, 0, 0,
02071 ref_picture, pix_op,
02072 s->mv[dir][0][0], s->mv[dir][0][1],
02073 2 * block_s, mb_y);
02074 break;
02075 case MV_TYPE_8X8:
02076 mx = 0;
02077 my = 0;
02078 for (i = 0; i < 4; i++) {
02079 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
02080 s->linesize) * block_s,
02081 ref_picture[0], 0, 0,
02082 (2 * mb_x + (i & 1)) * block_s,
02083 (2 * mb_y + (i >> 1)) * block_s,
02084 s->width, s->height, s->linesize,
02085 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
02086 block_s, block_s, pix_op,
02087 s->mv[dir][i][0], s->mv[dir][i][1]);
02088
02089 mx += s->mv[dir][i][0];
02090 my += s->mv[dir][i][1];
02091 }
02092
02093 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
02094 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
02095 pix_op, mx, my);
02096 break;
02097 case MV_TYPE_FIELD:
02098 if (s->picture_structure == PICT_FRAME) {
02099
02100 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02101 1, 0, s->field_select[dir][0],
02102 ref_picture, pix_op,
02103 s->mv[dir][0][0], s->mv[dir][0][1],
02104 block_s, mb_y);
02105
02106 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02107 1, 1, s->field_select[dir][1],
02108 ref_picture, pix_op,
02109 s->mv[dir][1][0], s->mv[dir][1][1],
02110 block_s, mb_y);
02111 } else {
02112 if (s->picture_structure != s->field_select[dir][0] + 1 &&
02113 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
02114 ref_picture = s->current_picture_ptr->f.data;
02115
02116 }
02117 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02118 0, 0, s->field_select[dir][0],
02119 ref_picture, pix_op,
02120 s->mv[dir][0][0],
02121 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
02122 }
02123 break;
02124 case MV_TYPE_16X8:
02125 for (i = 0; i < 2; i++) {
02126 uint8_t **ref2picture;
02127
02128 if (s->picture_structure == s->field_select[dir][i] + 1 ||
02129 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
02130 ref2picture = ref_picture;
02131 } else {
02132 ref2picture = s->current_picture_ptr->f.data;
02133 }
02134
02135 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02136 0, 0, s->field_select[dir][i],
02137 ref2picture, pix_op,
02138 s->mv[dir][i][0], s->mv[dir][i][1] +
02139 2 * block_s * i, block_s, mb_y >> 1);
02140
02141 dest_y += 2 * block_s * s->linesize;
02142 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02143 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02144 }
02145 break;
02146 case MV_TYPE_DMV:
02147 if (s->picture_structure == PICT_FRAME) {
02148 for (i = 0; i < 2; i++) {
02149 int j;
02150 for (j = 0; j < 2; j++) {
02151 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02152 1, j, j ^ i,
02153 ref_picture, pix_op,
02154 s->mv[dir][2 * i + j][0],
02155 s->mv[dir][2 * i + j][1],
02156 block_s, mb_y);
02157 }
02158 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02159 }
02160 } else {
02161 for (i = 0; i < 2; i++) {
02162 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02163 0, 0, s->picture_structure != i + 1,
02164 ref_picture, pix_op,
02165 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
02166 2 * block_s, mb_y >> 1);
02167
02168
02169 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02170
02171
02172
02173 if (!s->first_field) {
02174 ref_picture = s->current_picture_ptr->f.data;
02175 }
02176 }
02177 }
02178 break;
02179 default:
02180 assert(0);
02181 }
02182 }
02183
02187 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
02188 {
02189 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
02190 int my, off, i, mvs;
02191
02192 if (s->picture_structure != PICT_FRAME) goto unhandled;
02193
02194 switch (s->mv_type) {
02195 case MV_TYPE_16X16:
02196 mvs = 1;
02197 break;
02198 case MV_TYPE_16X8:
02199 mvs = 2;
02200 break;
02201 case MV_TYPE_8X8:
02202 mvs = 4;
02203 break;
02204 default:
02205 goto unhandled;
02206 }
02207
02208 for (i = 0; i < mvs; i++) {
02209 my = s->mv[dir][i][1]<<qpel_shift;
02210 my_max = FFMAX(my_max, my);
02211 my_min = FFMIN(my_min, my);
02212 }
02213
02214 off = (FFMAX(-my_min, my_max) + 63) >> 6;
02215
02216 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
02217 unhandled:
02218 return s->mb_height-1;
02219 }
02220
02221
02222 static inline void put_dct(MpegEncContext *s,
02223 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02224 {
02225 s->dct_unquantize_intra(s, block, i, qscale);
02226 s->dsp.idct_put (dest, line_size, block);
02227 }
02228
02229
02230 static inline void add_dct(MpegEncContext *s,
02231 DCTELEM *block, int i, uint8_t *dest, int line_size)
02232 {
02233 if (s->block_last_index[i] >= 0) {
02234 s->dsp.idct_add (dest, line_size, block);
02235 }
02236 }
02237
02238 static inline void add_dequant_dct(MpegEncContext *s,
02239 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02240 {
02241 if (s->block_last_index[i] >= 0) {
02242 s->dct_unquantize_inter(s, block, i, qscale);
02243
02244 s->dsp.idct_add (dest, line_size, block);
02245 }
02246 }
02247
02251 void ff_clean_intra_table_entries(MpegEncContext *s)
02252 {
02253 int wrap = s->b8_stride;
02254 int xy = s->block_index[0];
02255
02256 s->dc_val[0][xy ] =
02257 s->dc_val[0][xy + 1 ] =
02258 s->dc_val[0][xy + wrap] =
02259 s->dc_val[0][xy + 1 + wrap] = 1024;
02260
02261 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
02262 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
02263 if (s->msmpeg4_version>=3) {
02264 s->coded_block[xy ] =
02265 s->coded_block[xy + 1 ] =
02266 s->coded_block[xy + wrap] =
02267 s->coded_block[xy + 1 + wrap] = 0;
02268 }
02269
02270 wrap = s->mb_stride;
02271 xy = s->mb_x + s->mb_y * wrap;
02272 s->dc_val[1][xy] =
02273 s->dc_val[2][xy] = 1024;
02274
02275 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
02276 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
02277
02278 s->mbintra_table[xy]= 0;
02279 }
02280
02281
02282
02283
02284
02285
02286
02287
02288
02289
02290
02291 static av_always_inline
02292 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
02293 int lowres_flag, int is_mpeg12)
02294 {
02295 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
02296 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
02297 ff_xvmc_decode_mb(s);
02298 return;
02299 }
02300
02301 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
02302
02303 int i,j;
02304 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
02305 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
02306 for(i=0; i<6; i++){
02307 for(j=0; j<64; j++){
02308 *dct++ = block[i][s->dsp.idct_permutation[j]];
02309 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
02310 }
02311 av_log(s->avctx, AV_LOG_DEBUG, "\n");
02312 }
02313 }
02314
02315 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
02316
02317
02318 if (!s->mb_intra) {
02319 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
02320 if(s->mbintra_table[mb_xy])
02321 ff_clean_intra_table_entries(s);
02322 } else {
02323 s->last_dc[0] =
02324 s->last_dc[1] =
02325 s->last_dc[2] = 128 << s->intra_dc_precision;
02326 }
02327 }
02328 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
02329 s->mbintra_table[mb_xy]=1;
02330
02331 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) {
02332 uint8_t *dest_y, *dest_cb, *dest_cr;
02333 int dct_linesize, dct_offset;
02334 op_pixels_func (*op_pix)[4];
02335 qpel_mc_func (*op_qpix)[16];
02336 const int linesize = s->current_picture.f.linesize[0];
02337 const int uvlinesize = s->current_picture.f.linesize[1];
02338 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
02339 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
02340
02341
02342
02343 if(!s->encoding){
02344 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
02345
02346 if (s->mb_skipped) {
02347 s->mb_skipped= 0;
02348 assert(s->pict_type!=AV_PICTURE_TYPE_I);
02349 *mbskip_ptr = 1;
02350 } else if(!s->current_picture.f.reference) {
02351 *mbskip_ptr = 1;
02352 } else{
02353 *mbskip_ptr = 0;
02354 }
02355 }
02356
02357 dct_linesize = linesize << s->interlaced_dct;
02358 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
02359
02360 if(readable){
02361 dest_y= s->dest[0];
02362 dest_cb= s->dest[1];
02363 dest_cr= s->dest[2];
02364 }else{
02365 dest_y = s->b_scratchpad;
02366 dest_cb= s->b_scratchpad+16*linesize;
02367 dest_cr= s->b_scratchpad+32*linesize;
02368 }
02369
02370 if (!s->mb_intra) {
02371
02372
02373 if(!s->encoding){
02374
02375 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
02376 if (s->mv_dir & MV_DIR_FORWARD) {
02377 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
02378 }
02379 if (s->mv_dir & MV_DIR_BACKWARD) {
02380 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
02381 }
02382 }
02383
02384 if(lowres_flag){
02385 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
02386
02387 if (s->mv_dir & MV_DIR_FORWARD) {
02388 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
02389 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
02390 }
02391 if (s->mv_dir & MV_DIR_BACKWARD) {
02392 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
02393 }
02394 }else{
02395 op_qpix= s->me.qpel_put;
02396 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
02397 op_pix = s->dsp.put_pixels_tab;
02398 }else{
02399 op_pix = s->dsp.put_no_rnd_pixels_tab;
02400 }
02401 if (s->mv_dir & MV_DIR_FORWARD) {
02402 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
02403 op_pix = s->dsp.avg_pixels_tab;
02404 op_qpix= s->me.qpel_avg;
02405 }
02406 if (s->mv_dir & MV_DIR_BACKWARD) {
02407 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
02408 }
02409 }
02410 }
02411
02412
02413 if(s->avctx->skip_idct){
02414 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
02415 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
02416 || s->avctx->skip_idct >= AVDISCARD_ALL)
02417 goto skip_idct;
02418 }
02419
02420
02421 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
02422 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
02423 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
02424 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
02425 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
02426 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02427
02428 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02429 if (s->chroma_y_shift){
02430 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02431 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02432 }else{
02433 dct_linesize >>= 1;
02434 dct_offset >>=1;
02435 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02436 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02437 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02438 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02439 }
02440 }
02441 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
02442 add_dct(s, block[0], 0, dest_y , dct_linesize);
02443 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
02444 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
02445 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
02446
02447 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02448 if(s->chroma_y_shift){
02449 add_dct(s, block[4], 4, dest_cb, uvlinesize);
02450 add_dct(s, block[5], 5, dest_cr, uvlinesize);
02451 }else{
02452
02453 dct_linesize = uvlinesize << s->interlaced_dct;
02454 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
02455
02456 add_dct(s, block[4], 4, dest_cb, dct_linesize);
02457 add_dct(s, block[5], 5, dest_cr, dct_linesize);
02458 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
02459 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
02460 if(!s->chroma_x_shift){
02461 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
02462 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
02463 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
02464 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
02465 }
02466 }
02467 }
02468 }
02469 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02470 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02471 }
02472 } else {
02473
02474 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
02475 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
02476 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
02477 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
02478 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02479
02480 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02481 if(s->chroma_y_shift){
02482 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02483 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02484 }else{
02485 dct_offset >>=1;
02486 dct_linesize >>=1;
02487 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02488 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02489 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02490 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02491 }
02492 }
02493 }else{
02494 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
02495 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
02496 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
02497 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02498
02499 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02500 if(s->chroma_y_shift){
02501 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02502 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02503 }else{
02504
02505 dct_linesize = uvlinesize << s->interlaced_dct;
02506 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
02507
02508 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
02509 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
02510 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02511 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02512 if(!s->chroma_x_shift){
02513 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
02514 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
02515 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
02516 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
02517 }
02518 }
02519 }
02520 }
02521 }
02522 skip_idct:
02523 if(!readable){
02524 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
02525 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02526 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02527 }
02528 }
02529 }
02530
02531 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02532 #if !CONFIG_SMALL
02533 if(s->out_format == FMT_MPEG1) {
02534 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02535 else MPV_decode_mb_internal(s, block, 0, 1);
02536 } else
02537 #endif
02538 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02539 else MPV_decode_mb_internal(s, block, 0, 0);
02540 }
02541
02545 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02546 const int field_pic= s->picture_structure != PICT_FRAME;
02547 if(field_pic){
02548 h <<= 1;
02549 y <<= 1;
02550 }
02551
02552 if (!s->avctx->hwaccel
02553 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
02554 && s->unrestricted_mv
02555 && s->current_picture.f.reference
02556 && !s->intra_only
02557 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
02558 int sides = 0, edge_h;
02559 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
02560 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
02561 if (y==0) sides |= EDGE_TOP;
02562 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
02563
02564 edge_h= FFMIN(h, s->v_edge_pos - y);
02565
02566 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
02567 s->linesize, s->h_edge_pos, edge_h,
02568 EDGE_WIDTH, EDGE_WIDTH, sides);
02569 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
02570 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
02571 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
02572 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
02573 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
02574 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
02575 }
02576
02577 h= FFMIN(h, s->avctx->height - y);
02578
02579 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
02580
02581 if (s->avctx->draw_horiz_band) {
02582 AVFrame *src;
02583 int offset[AV_NUM_DATA_POINTERS];
02584 int i;
02585
02586 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02587 src= (AVFrame*)s->current_picture_ptr;
02588 else if(s->last_picture_ptr)
02589 src= (AVFrame*)s->last_picture_ptr;
02590 else
02591 return;
02592
02593 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02594 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
02595 offset[i] = 0;
02596 }else{
02597 offset[0]= y * s->linesize;
02598 offset[1]=
02599 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02600 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
02601 offset[i] = 0;
02602 }
02603
02604 emms_c();
02605
02606 s->avctx->draw_horiz_band(s->avctx, src, offset,
02607 y, s->picture_structure, h);
02608 }
02609 }
02610
02611 void ff_init_block_index(MpegEncContext *s){
02612 const int linesize = s->current_picture.f.linesize[0];
02613 const int uvlinesize = s->current_picture.f.linesize[1];
02614 const int mb_size= 4 - s->avctx->lowres;
02615
02616 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
02617 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
02618 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02619 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02620 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02621 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02622
02623
02624 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
02625 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02626 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02627
02628 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02629 {
02630 if(s->picture_structure==PICT_FRAME){
02631 s->dest[0] += s->mb_y * linesize << mb_size;
02632 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02633 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02634 }else{
02635 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
02636 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02637 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02638 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02639 }
02640 }
02641 }
02642
02643 void ff_mpeg_flush(AVCodecContext *avctx){
02644 int i;
02645 MpegEncContext *s = avctx->priv_data;
02646
02647 if(s==NULL || s->picture==NULL)
02648 return;
02649
02650 for(i=0; i<s->picture_count; i++){
02651 if (s->picture[i].f.data[0] &&
02652 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
02653 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
02654 free_frame_buffer(s, &s->picture[i]);
02655 }
02656 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02657
02658 s->mb_x= s->mb_y= 0;
02659
02660 s->parse_context.state= -1;
02661 s->parse_context.frame_start_found= 0;
02662 s->parse_context.overread= 0;
02663 s->parse_context.overread_index= 0;
02664 s->parse_context.index= 0;
02665 s->parse_context.last_index= 0;
02666 s->bitstream_buffer_size=0;
02667 s->pp_time=0;
02668 }
02669
02670 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02671 DCTELEM *block, int n, int qscale)
02672 {
02673 int i, level, nCoeffs;
02674 const uint16_t *quant_matrix;
02675
02676 nCoeffs= s->block_last_index[n];
02677
02678 if (n < 4)
02679 block[0] = block[0] * s->y_dc_scale;
02680 else
02681 block[0] = block[0] * s->c_dc_scale;
02682
02683 quant_matrix = s->intra_matrix;
02684 for(i=1;i<=nCoeffs;i++) {
02685 int j= s->intra_scantable.permutated[i];
02686 level = block[j];
02687 if (level) {
02688 if (level < 0) {
02689 level = -level;
02690 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02691 level = (level - 1) | 1;
02692 level = -level;
02693 } else {
02694 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02695 level = (level - 1) | 1;
02696 }
02697 block[j] = level;
02698 }
02699 }
02700 }
02701
02702 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02703 DCTELEM *block, int n, int qscale)
02704 {
02705 int i, level, nCoeffs;
02706 const uint16_t *quant_matrix;
02707
02708 nCoeffs= s->block_last_index[n];
02709
02710 quant_matrix = s->inter_matrix;
02711 for(i=0; i<=nCoeffs; i++) {
02712 int j= s->intra_scantable.permutated[i];
02713 level = block[j];
02714 if (level) {
02715 if (level < 0) {
02716 level = -level;
02717 level = (((level << 1) + 1) * qscale *
02718 ((int) (quant_matrix[j]))) >> 4;
02719 level = (level - 1) | 1;
02720 level = -level;
02721 } else {
02722 level = (((level << 1) + 1) * qscale *
02723 ((int) (quant_matrix[j]))) >> 4;
02724 level = (level - 1) | 1;
02725 }
02726 block[j] = level;
02727 }
02728 }
02729 }
02730
02731 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02732 DCTELEM *block, int n, int qscale)
02733 {
02734 int i, level, nCoeffs;
02735 const uint16_t *quant_matrix;
02736
02737 if(s->alternate_scan) nCoeffs= 63;
02738 else nCoeffs= s->block_last_index[n];
02739
02740 if (n < 4)
02741 block[0] = block[0] * s->y_dc_scale;
02742 else
02743 block[0] = block[0] * s->c_dc_scale;
02744 quant_matrix = s->intra_matrix;
02745 for(i=1;i<=nCoeffs;i++) {
02746 int j= s->intra_scantable.permutated[i];
02747 level = block[j];
02748 if (level) {
02749 if (level < 0) {
02750 level = -level;
02751 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02752 level = -level;
02753 } else {
02754 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02755 }
02756 block[j] = level;
02757 }
02758 }
02759 }
02760
02761 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02762 DCTELEM *block, int n, int qscale)
02763 {
02764 int i, level, nCoeffs;
02765 const uint16_t *quant_matrix;
02766 int sum=-1;
02767
02768 if(s->alternate_scan) nCoeffs= 63;
02769 else nCoeffs= s->block_last_index[n];
02770
02771 if (n < 4)
02772 block[0] = block[0] * s->y_dc_scale;
02773 else
02774 block[0] = block[0] * s->c_dc_scale;
02775 quant_matrix = s->intra_matrix;
02776 for(i=1;i<=nCoeffs;i++) {
02777 int j= s->intra_scantable.permutated[i];
02778 level = block[j];
02779 if (level) {
02780 if (level < 0) {
02781 level = -level;
02782 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02783 level = -level;
02784 } else {
02785 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02786 }
02787 block[j] = level;
02788 sum+=level;
02789 }
02790 }
02791 block[63]^=sum&1;
02792 }
02793
02794 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02795 DCTELEM *block, int n, int qscale)
02796 {
02797 int i, level, nCoeffs;
02798 const uint16_t *quant_matrix;
02799 int sum=-1;
02800
02801 if(s->alternate_scan) nCoeffs= 63;
02802 else nCoeffs= s->block_last_index[n];
02803
02804 quant_matrix = s->inter_matrix;
02805 for(i=0; i<=nCoeffs; i++) {
02806 int j= s->intra_scantable.permutated[i];
02807 level = block[j];
02808 if (level) {
02809 if (level < 0) {
02810 level = -level;
02811 level = (((level << 1) + 1) * qscale *
02812 ((int) (quant_matrix[j]))) >> 4;
02813 level = -level;
02814 } else {
02815 level = (((level << 1) + 1) * qscale *
02816 ((int) (quant_matrix[j]))) >> 4;
02817 }
02818 block[j] = level;
02819 sum+=level;
02820 }
02821 }
02822 block[63]^=sum&1;
02823 }
02824
02825 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02826 DCTELEM *block, int n, int qscale)
02827 {
02828 int i, level, qmul, qadd;
02829 int nCoeffs;
02830
02831 assert(s->block_last_index[n]>=0);
02832
02833 qmul = qscale << 1;
02834
02835 if (!s->h263_aic) {
02836 if (n < 4)
02837 block[0] = block[0] * s->y_dc_scale;
02838 else
02839 block[0] = block[0] * s->c_dc_scale;
02840 qadd = (qscale - 1) | 1;
02841 }else{
02842 qadd = 0;
02843 }
02844 if(s->ac_pred)
02845 nCoeffs=63;
02846 else
02847 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02848
02849 for(i=1; i<=nCoeffs; i++) {
02850 level = block[i];
02851 if (level) {
02852 if (level < 0) {
02853 level = level * qmul - qadd;
02854 } else {
02855 level = level * qmul + qadd;
02856 }
02857 block[i] = level;
02858 }
02859 }
02860 }
02861
02862 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02863 DCTELEM *block, int n, int qscale)
02864 {
02865 int i, level, qmul, qadd;
02866 int nCoeffs;
02867
02868 assert(s->block_last_index[n]>=0);
02869
02870 qadd = (qscale - 1) | 1;
02871 qmul = qscale << 1;
02872
02873 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02874
02875 for(i=0; i<=nCoeffs; i++) {
02876 level = block[i];
02877 if (level) {
02878 if (level < 0) {
02879 level = level * qmul - qadd;
02880 } else {
02881 level = level * qmul + qadd;
02882 }
02883 block[i] = level;
02884 }
02885 }
02886 }
02887
02891 void ff_set_qscale(MpegEncContext * s, int qscale)
02892 {
02893 if (qscale < 1)
02894 qscale = 1;
02895 else if (qscale > 31)
02896 qscale = 31;
02897
02898 s->qscale = qscale;
02899 s->chroma_qscale= s->chroma_qscale_table[qscale];
02900
02901 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02902 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02903 }
02904
02905 void MPV_report_decode_progress(MpegEncContext *s)
02906 {
02907 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
02908 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
02909 }