libavcodec/mpegvideo.c
Go to the documentation of this file.
00001 /*
00002  * The simplest mpeg encoder (well, it was the simplest!)
00003  * Copyright (c) 2000,2001 Fabrice Bellard
00004  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
00005  *
00006  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
00007  *
00008  * This file is part of Libav.
00009  *
00010  * Libav is free software; you can redistribute it and/or
00011  * modify it under the terms of the GNU Lesser General Public
00012  * License as published by the Free Software Foundation; either
00013  * version 2.1 of the License, or (at your option) any later version.
00014  *
00015  * Libav is distributed in the hope that it will be useful,
00016  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00017  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00018  * Lesser General Public License for more details.
00019  *
00020  * You should have received a copy of the GNU Lesser General Public
00021  * License along with Libav; if not, write to the Free Software
00022  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00023  */
00024 
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/imgutils.h"
00032 #include "avcodec.h"
00033 #include "dsputil.h"
00034 #include "internal.h"
00035 #include "mpegvideo.h"
00036 #include "mpegvideo_common.h"
00037 #include "mjpegenc.h"
00038 #include "msmpeg4.h"
00039 #include "faandct.h"
00040 #include "xvmc_internal.h"
00041 #include "thread.h"
00042 #include <limits.h>
00043 
00044 //#undef NDEBUG
00045 //#include <assert.h>
00046 
00047 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00048                                    DCTELEM *block, int n, int qscale);
00049 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00050                                    DCTELEM *block, int n, int qscale);
00051 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00052                                    DCTELEM *block, int n, int qscale);
00053 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00054                                    DCTELEM *block, int n, int qscale);
00055 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00056                                    DCTELEM *block, int n, int qscale);
00057 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00058                                   DCTELEM *block, int n, int qscale);
00059 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00060                                   DCTELEM *block, int n, int qscale);
00061 
00062 
00063 /* enable all paranoid tests for rounding, overflows, etc... */
00064 //#define PARANOID
00065 
00066 //#define DEBUG
00067 
00068 
00069 static const uint8_t ff_default_chroma_qscale_table[32] = {
00070 //   0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
00071      0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
00072     16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
00073 };
00074 
00075 const uint8_t ff_mpeg1_dc_scale_table[128] = {
00076 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
00077     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00078     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00079     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00080     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00081     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00082     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00083     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00084     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00085 };
00086 
00087 static const uint8_t mpeg2_dc_scale_table1[128] = {
00088 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
00089     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00090     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00091     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00092     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00093     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00094     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00095     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00096     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00097 };
00098 
00099 static const uint8_t mpeg2_dc_scale_table2[128] = {
00100 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
00101     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00102     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00103     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00104     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00105     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00106     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00107     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00108     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00109 };
00110 
00111 static const uint8_t mpeg2_dc_scale_table3[128] = {
00112 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
00113     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00114     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00115     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00116     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00117     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00118     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00119     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00120     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00121 };
00122 
00123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
00124     ff_mpeg1_dc_scale_table,
00125     mpeg2_dc_scale_table1,
00126     mpeg2_dc_scale_table2,
00127     mpeg2_dc_scale_table3,
00128 };
00129 
00130 const enum PixelFormat ff_pixfmt_list_420[] = {
00131     PIX_FMT_YUV420P,
00132     PIX_FMT_NONE
00133 };
00134 
00135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00136     PIX_FMT_DXVA2_VLD,
00137     PIX_FMT_VAAPI_VLD,
00138     PIX_FMT_VDA_VLD,
00139     PIX_FMT_YUV420P,
00140     PIX_FMT_NONE
00141 };
00142 
00143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
00144                                           const uint8_t *end,
00145                                           uint32_t * restrict state)
00146 {
00147     int i;
00148 
00149     assert(p <= end);
00150     if (p >= end)
00151         return end;
00152 
00153     for (i = 0; i < 3; i++) {
00154         uint32_t tmp = *state << 8;
00155         *state = tmp + *(p++);
00156         if (tmp == 0x100 || p == end)
00157             return p;
00158     }
00159 
00160     while (p < end) {
00161         if      (p[-1] > 1      ) p += 3;
00162         else if (p[-2]          ) p += 2;
00163         else if (p[-3]|(p[-1]-1)) p++;
00164         else {
00165             p++;
00166             break;
00167         }
00168     }
00169 
00170     p = FFMIN(p, end) - 4;
00171     *state = AV_RB32(p);
00172 
00173     return p + 4;
00174 }
00175 
00176 /* init common dct for both encoder and decoder */
00177 av_cold int ff_dct_common_init(MpegEncContext *s)
00178 {
00179     dsputil_init(&s->dsp, s->avctx);
00180 
00181     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00182     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00183     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00184     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00185     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00186     if (s->flags & CODEC_FLAG_BITEXACT)
00187         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00188     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00189 
00190 #if HAVE_MMX
00191     MPV_common_init_mmx(s);
00192 #elif ARCH_ALPHA
00193     MPV_common_init_axp(s);
00194 #elif CONFIG_MLIB
00195     MPV_common_init_mlib(s);
00196 #elif HAVE_MMI
00197     MPV_common_init_mmi(s);
00198 #elif ARCH_ARM
00199     MPV_common_init_arm(s);
00200 #elif HAVE_ALTIVEC
00201     MPV_common_init_altivec(s);
00202 #elif ARCH_BFIN
00203     MPV_common_init_bfin(s);
00204 #endif
00205 
00206     /* load & permutate scantables
00207      * note: only wmv uses different ones
00208      */
00209     if (s->alternate_scan) {
00210         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
00211         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
00212     } else {
00213         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
00214         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
00215     }
00216     ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00217     ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00218 
00219     return 0;
00220 }
00221 
00222 void ff_copy_picture(Picture *dst, Picture *src)
00223 {
00224     *dst = *src;
00225     dst->f.type = FF_BUFFER_TYPE_COPY;
00226 }
00227 
00231 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00232 {
00233     /* Windows Media Image codecs allocate internal buffers with different
00234      * dimensions; ignore user defined callbacks for these
00235      */
00236     if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
00237         ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
00238     else
00239         avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
00240     av_freep(&pic->f.hwaccel_picture_private);
00241 }
00242 
00246 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00247 {
00248     int r;
00249 
00250     if (s->avctx->hwaccel) {
00251         assert(!pic->f.hwaccel_picture_private);
00252         if (s->avctx->hwaccel->priv_data_size) {
00253             pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00254             if (!pic->f.hwaccel_picture_private) {
00255                 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00256                 return -1;
00257             }
00258         }
00259     }
00260 
00261     if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
00262         r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
00263     else
00264         r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
00265 
00266     if (r < 0 || !pic->f.type || !pic->f.data[0]) {
00267         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
00268                r, pic->f.type, pic->f.data[0]);
00269         av_freep(&pic->f.hwaccel_picture_private);
00270         return -1;
00271     }
00272 
00273     if (s->linesize && (s->linesize   != pic->f.linesize[0] ||
00274                         s->uvlinesize != pic->f.linesize[1])) {
00275         av_log(s->avctx, AV_LOG_ERROR,
00276                "get_buffer() failed (stride changed)\n");
00277         free_frame_buffer(s, pic);
00278         return -1;
00279     }
00280 
00281     if (pic->f.linesize[1] != pic->f.linesize[2]) {
00282         av_log(s->avctx, AV_LOG_ERROR,
00283                "get_buffer() failed (uv stride mismatch)\n");
00284         free_frame_buffer(s, pic);
00285         return -1;
00286     }
00287 
00288     return 0;
00289 }
00290 
00295 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
00296 {
00297     const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
00298 
00299     // the + 1 is needed so memset(,,stride*height) does not sig11
00300 
00301     const int mb_array_size = s->mb_stride * s->mb_height;
00302     const int b8_array_size = s->b8_stride * s->mb_height * 2;
00303     const int b4_array_size = s->b4_stride * s->mb_height * 4;
00304     int i;
00305     int r = -1;
00306 
00307     if (shared) {
00308         assert(pic->f.data[0]);
00309         assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
00310         pic->f.type = FF_BUFFER_TYPE_SHARED;
00311     } else {
00312         assert(!pic->f.data[0]);
00313 
00314         if (alloc_frame_buffer(s, pic) < 0)
00315             return -1;
00316 
00317         s->linesize   = pic->f.linesize[0];
00318         s->uvlinesize = pic->f.linesize[1];
00319     }
00320 
00321     if (pic->f.qscale_table == NULL) {
00322         if (s->encoding) {
00323             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
00324                               mb_array_size * sizeof(int16_t), fail)
00325             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
00326                               mb_array_size * sizeof(int16_t), fail)
00327             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
00328                               mb_array_size * sizeof(int8_t ), fail)
00329         }
00330 
00331         FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
00332                           mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
00333         FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
00334                           (big_mb_num + s->mb_stride) * sizeof(uint8_t),
00335                           fail)
00336         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
00337                           (big_mb_num + s->mb_stride) * sizeof(uint32_t),
00338                           fail)
00339         pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
00340         pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
00341         if (s->out_format == FMT_H264) {
00342             for (i = 0; i < 2; i++) {
00343                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00344                                   2 * (b4_array_size + 4) * sizeof(int16_t),
00345                                   fail)
00346                 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00347                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00348                                   4 * mb_array_size * sizeof(uint8_t), fail)
00349             }
00350             pic->f.motion_subsample_log2 = 2;
00351         } else if (s->out_format == FMT_H263 || s->encoding ||
00352                    (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
00353             for (i = 0; i < 2; i++) {
00354                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00355                                   2 * (b8_array_size + 4) * sizeof(int16_t),
00356                                   fail)
00357                 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00358                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00359                                   4 * mb_array_size * sizeof(uint8_t), fail)
00360             }
00361             pic->f.motion_subsample_log2 = 3;
00362         }
00363         if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00364             FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
00365                               64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
00366         }
00367         pic->f.qstride = s->mb_stride;
00368         FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
00369                           1 * sizeof(AVPanScan), fail)
00370     }
00371 
00372     pic->owner2 = s;
00373 
00374     return 0;
00375 fail: // for  the FF_ALLOCZ_OR_GOTO macro
00376     if (r >= 0)
00377         free_frame_buffer(s, pic);
00378     return -1;
00379 }
00380 
00384 static void free_picture(MpegEncContext *s, Picture *pic)
00385 {
00386     int i;
00387 
00388     if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
00389         free_frame_buffer(s, pic);
00390     }
00391 
00392     av_freep(&pic->mb_var);
00393     av_freep(&pic->mc_mb_var);
00394     av_freep(&pic->mb_mean);
00395     av_freep(&pic->f.mbskip_table);
00396     av_freep(&pic->qscale_table_base);
00397     av_freep(&pic->mb_type_base);
00398     av_freep(&pic->f.dct_coeff);
00399     av_freep(&pic->f.pan_scan);
00400     pic->f.mb_type = NULL;
00401     for (i = 0; i < 2; i++) {
00402         av_freep(&pic->motion_val_base[i]);
00403         av_freep(&pic->f.ref_index[i]);
00404     }
00405 
00406     if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
00407         for (i = 0; i < 4; i++) {
00408             pic->f.base[i] =
00409             pic->f.data[i] = NULL;
00410         }
00411         pic->f.type = 0;
00412     }
00413 }
00414 
00415 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
00416 {
00417     int y_size = s->b8_stride * (2 * s->mb_height + 1);
00418     int c_size = s->mb_stride * (s->mb_height + 1);
00419     int yc_size = y_size + 2 * c_size;
00420     int i;
00421 
00422     // edge emu needs blocksize + filter length - 1
00423     // (= 17x17 for  halfpel / 21x21 for  h264)
00424     FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
00425                       (s->width + 64) * 2 * 21 * 2, fail);    // (width + edge + align)*interlaced*MBsize*tolerance
00426 
00427     // FIXME should be linesize instead of s->width * 2
00428     // but that is not known before get_buffer()
00429     FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
00430                       (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
00431     s->me.temp         = s->me.scratchpad;
00432     s->rd_scratchpad   = s->me.scratchpad;
00433     s->b_scratchpad    = s->me.scratchpad;
00434     s->obmc_scratchpad = s->me.scratchpad + 16;
00435     if (s->encoding) {
00436         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
00437                           ME_MAP_SIZE * sizeof(uint32_t), fail)
00438         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
00439                           ME_MAP_SIZE * sizeof(uint32_t), fail)
00440         if (s->avctx->noise_reduction) {
00441             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
00442                               2 * 64 * sizeof(int), fail)
00443         }
00444     }
00445     FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
00446     s->block = s->blocks[0];
00447 
00448     for (i = 0; i < 12; i++) {
00449         s->pblocks[i] = &s->block[i];
00450     }
00451 
00452     if (s->out_format == FMT_H263) {
00453         /* ac values */
00454         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
00455                           yc_size * sizeof(int16_t) * 16, fail);
00456         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00457         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00458         s->ac_val[2] = s->ac_val[1] + c_size;
00459     }
00460 
00461     return 0;
00462 fail:
00463     return -1; // free() through MPV_common_end()
00464 }
00465 
00466 static void free_duplicate_context(MpegEncContext *s)
00467 {
00468     if (s == NULL)
00469         return;
00470 
00471     av_freep(&s->edge_emu_buffer);
00472     av_freep(&s->me.scratchpad);
00473     s->me.temp =
00474     s->rd_scratchpad =
00475     s->b_scratchpad =
00476     s->obmc_scratchpad = NULL;
00477 
00478     av_freep(&s->dct_error_sum);
00479     av_freep(&s->me.map);
00480     av_freep(&s->me.score_map);
00481     av_freep(&s->blocks);
00482     av_freep(&s->ac_val_base);
00483     s->block = NULL;
00484 }
00485 
00486 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
00487 {
00488 #define COPY(a) bak->a = src->a
00489     COPY(edge_emu_buffer);
00490     COPY(me.scratchpad);
00491     COPY(me.temp);
00492     COPY(rd_scratchpad);
00493     COPY(b_scratchpad);
00494     COPY(obmc_scratchpad);
00495     COPY(me.map);
00496     COPY(me.score_map);
00497     COPY(blocks);
00498     COPY(block);
00499     COPY(start_mb_y);
00500     COPY(end_mb_y);
00501     COPY(me.map_generation);
00502     COPY(pb);
00503     COPY(dct_error_sum);
00504     COPY(dct_count[0]);
00505     COPY(dct_count[1]);
00506     COPY(ac_val_base);
00507     COPY(ac_val[0]);
00508     COPY(ac_val[1]);
00509     COPY(ac_val[2]);
00510 #undef COPY
00511 }
00512 
00513 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
00514 {
00515     MpegEncContext bak;
00516     int i;
00517     // FIXME copy only needed parts
00518     // START_TIMER
00519     backup_duplicate_context(&bak, dst);
00520     memcpy(dst, src, sizeof(MpegEncContext));
00521     backup_duplicate_context(dst, &bak);
00522     for (i = 0; i < 12; i++) {
00523         dst->pblocks[i] = &dst->block[i];
00524     }
00525     // STOP_TIMER("update_duplicate_context")
00526     // about 10k cycles / 0.01 sec for  1000frames on 1ghz with 2 threads
00527 }
00528 
00529 int ff_mpeg_update_thread_context(AVCodecContext *dst,
00530                                   const AVCodecContext *src)
00531 {
00532     MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
00533 
00534     if (dst == src || !s1->context_initialized)
00535         return 0;
00536 
00537     // FIXME can parameters change on I-frames?
00538     // in that case dst may need a reinit
00539     if (!s->context_initialized) {
00540         memcpy(s, s1, sizeof(MpegEncContext));
00541 
00542         s->avctx                 = dst;
00543         s->picture_range_start  += MAX_PICTURE_COUNT;
00544         s->picture_range_end    += MAX_PICTURE_COUNT;
00545         s->bitstream_buffer      = NULL;
00546         s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
00547 
00548         MPV_common_init(s);
00549     }
00550 
00551     s->avctx->coded_height  = s1->avctx->coded_height;
00552     s->avctx->coded_width   = s1->avctx->coded_width;
00553     s->avctx->width         = s1->avctx->width;
00554     s->avctx->height        = s1->avctx->height;
00555 
00556     s->coded_picture_number = s1->coded_picture_number;
00557     s->picture_number       = s1->picture_number;
00558     s->input_picture_number = s1->input_picture_number;
00559 
00560     memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
00561     memcpy(&s->last_picture, &s1->last_picture,
00562            (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
00563 
00564     s->last_picture_ptr    = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
00565     s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
00566     s->next_picture_ptr    = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
00567 
00568     // Error/bug resilience
00569     s->next_p_frame_damaged = s1->next_p_frame_damaged;
00570     s->workaround_bugs      = s1->workaround_bugs;
00571 
00572     // MPEG4 timing info
00573     memcpy(&s->time_increment_bits, &s1->time_increment_bits,
00574            (char *) &s1->shape - (char *) &s1->time_increment_bits);
00575 
00576     // B-frame info
00577     s->max_b_frames = s1->max_b_frames;
00578     s->low_delay    = s1->low_delay;
00579     s->dropable     = s1->dropable;
00580 
00581     // DivX handling (doesn't work)
00582     s->divx_packed  = s1->divx_packed;
00583 
00584     if (s1->bitstream_buffer) {
00585         if (s1->bitstream_buffer_size +
00586             FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
00587             av_fast_malloc(&s->bitstream_buffer,
00588                            &s->allocated_bitstream_buffer_size,
00589                            s1->allocated_bitstream_buffer_size);
00590             s->bitstream_buffer_size = s1->bitstream_buffer_size;
00591         memcpy(s->bitstream_buffer, s1->bitstream_buffer,
00592                s1->bitstream_buffer_size);
00593         memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
00594                FF_INPUT_BUFFER_PADDING_SIZE);
00595     }
00596 
00597     // MPEG2/interlacing info
00598     memcpy(&s->progressive_sequence, &s1->progressive_sequence,
00599            (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
00600 
00601     if (!s1->first_field) {
00602         s->last_pict_type = s1->pict_type;
00603         if (s1->current_picture_ptr)
00604             s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
00605 
00606         if (s1->pict_type != AV_PICTURE_TYPE_B) {
00607             s->last_non_b_pict_type = s1->pict_type;
00608         }
00609     }
00610 
00611     return 0;
00612 }
00613 
00620 void MPV_common_defaults(MpegEncContext *s)
00621 {
00622     s->y_dc_scale_table      =
00623     s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
00624     s->chroma_qscale_table   = ff_default_chroma_qscale_table;
00625     s->progressive_frame     = 1;
00626     s->progressive_sequence  = 1;
00627     s->picture_structure     = PICT_FRAME;
00628 
00629     s->coded_picture_number  = 0;
00630     s->picture_number        = 0;
00631     s->input_picture_number  = 0;
00632 
00633     s->picture_in_gop_number = 0;
00634 
00635     s->f_code                = 1;
00636     s->b_code                = 1;
00637 
00638     s->picture_range_start   = 0;
00639     s->picture_range_end     = MAX_PICTURE_COUNT;
00640 
00641     s->slice_context_count   = 1;
00642 }
00643 
00649 void MPV_decode_defaults(MpegEncContext *s)
00650 {
00651     MPV_common_defaults(s);
00652 }
00653 
00658 av_cold int MPV_common_init(MpegEncContext *s)
00659 {
00660     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
00661     int nb_slices = (HAVE_THREADS &&
00662                      s->avctx->active_thread_type & FF_THREAD_SLICE) ?
00663                     s->avctx->thread_count : 1;
00664 
00665     if (s->encoding && s->avctx->slices)
00666         nb_slices = s->avctx->slices;
00667 
00668     if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00669         s->mb_height = (s->height + 31) / 32 * 2;
00670     else if (s->codec_id != CODEC_ID_H264)
00671         s->mb_height = (s->height + 15) / 16;
00672 
00673     if (s->avctx->pix_fmt == PIX_FMT_NONE) {
00674         av_log(s->avctx, AV_LOG_ERROR,
00675                "decoding to PIX_FMT_NONE is not supported.\n");
00676         return -1;
00677     }
00678 
00679     if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
00680         int max_slices;
00681         if (s->mb_height)
00682             max_slices = FFMIN(MAX_THREADS, s->mb_height);
00683         else
00684             max_slices = MAX_THREADS;
00685         av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
00686                " reducing to %d\n", nb_slices, max_slices);
00687         nb_slices = max_slices;
00688     }
00689 
00690     if ((s->width || s->height) &&
00691         av_image_check_size(s->width, s->height, 0, s->avctx))
00692         return -1;
00693 
00694     ff_dct_common_init(s);
00695 
00696     s->flags  = s->avctx->flags;
00697     s->flags2 = s->avctx->flags2;
00698 
00699     /* set chroma shifts */
00700     avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
00701                                   &s->chroma_y_shift);
00702 
00703     /* convert fourcc to upper case */
00704     s->codec_tag          = avpriv_toupper4(s->avctx->codec_tag);
00705 
00706     s->stream_codec_tag   = avpriv_toupper4(s->avctx->stream_codec_tag);
00707 
00708     if (s->width && s->height) {
00709         s->mb_width   = (s->width + 15) / 16;
00710         s->mb_stride  = s->mb_width + 1;
00711         s->b8_stride  = s->mb_width * 2 + 1;
00712         s->b4_stride  = s->mb_width * 4 + 1;
00713         mb_array_size = s->mb_height * s->mb_stride;
00714         mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
00715 
00716         /* set default edge pos, will be overriden
00717          * in decode_header if needed */
00718         s->h_edge_pos = s->mb_width * 16;
00719         s->v_edge_pos = s->mb_height * 16;
00720 
00721         s->mb_num     = s->mb_width * s->mb_height;
00722 
00723         s->block_wrap[0] =
00724         s->block_wrap[1] =
00725         s->block_wrap[2] =
00726         s->block_wrap[3] = s->b8_stride;
00727         s->block_wrap[4] =
00728         s->block_wrap[5] = s->mb_stride;
00729 
00730         y_size  = s->b8_stride * (2 * s->mb_height + 1);
00731         c_size  = s->mb_stride * (s->mb_height + 1);
00732         yc_size = y_size + 2   * c_size;
00733 
00734         s->avctx->coded_frame = (AVFrame *)&s->current_picture;
00735 
00736         FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
00737                           fail); // error ressilience code looks cleaner with this
00738         for (y = 0; y < s->mb_height; y++)
00739             for (x = 0; x < s->mb_width; x++)
00740                 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
00741 
00742         s->mb_index2xy[s->mb_height * s->mb_width] =
00743                        (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
00744 
00745         if (s->encoding) {
00746             /* Allocate MV tables */
00747             FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
00748                               mv_table_size * 2 * sizeof(int16_t), fail);
00749             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
00750                               mv_table_size * 2 * sizeof(int16_t), fail);
00751             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
00752                               mv_table_size * 2 * sizeof(int16_t), fail);
00753             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
00754                               mv_table_size * 2 * sizeof(int16_t), fail);
00755             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
00756                               mv_table_size * 2 * sizeof(int16_t), fail);
00757             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
00758                               mv_table_size * 2 * sizeof(int16_t), fail);
00759             s->p_mv_table            = s->p_mv_table_base +
00760                                        s->mb_stride + 1;
00761             s->b_forw_mv_table       = s->b_forw_mv_table_base +
00762                                        s->mb_stride + 1;
00763             s->b_back_mv_table       = s->b_back_mv_table_base +
00764                                        s->mb_stride + 1;
00765             s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
00766                                        s->mb_stride + 1;
00767             s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
00768                                        s->mb_stride + 1;
00769             s->b_direct_mv_table     = s->b_direct_mv_table_base +
00770                                        s->mb_stride + 1;
00771 
00772             if (s->msmpeg4_version) {
00773                 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
00774                                   2 * 2 * (MAX_LEVEL + 1) *
00775                                   (MAX_RUN + 1) * 2 * sizeof(int), fail);
00776             }
00777             FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00778 
00779             /* Allocate MB type table */
00780             FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
00781                               sizeof(uint16_t), fail); // needed for encoding
00782 
00783             FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
00784                               sizeof(int), fail);
00785 
00786             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
00787                               64 * 32   * sizeof(int), fail);
00788             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
00789                               64 * 32   * sizeof(int), fail);
00790             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
00791                               64 * 32 * 2 * sizeof(uint16_t), fail);
00792             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
00793                               64 * 32 * 2 * sizeof(uint16_t), fail);
00794             FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
00795                               MAX_PICTURE_COUNT * sizeof(Picture *), fail);
00796             FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
00797                               MAX_PICTURE_COUNT * sizeof(Picture *), fail);
00798 
00799             if (s->avctx->noise_reduction) {
00800                 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
00801                                   2 * 64 * sizeof(uint16_t), fail);
00802             }
00803         }
00804     }
00805 
00806     s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
00807     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
00808                       s->picture_count * sizeof(Picture), fail);
00809     for (i = 0; i < s->picture_count; i++) {
00810         avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
00811     }
00812 
00813     if (s->width && s->height) {
00814         FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
00815                           mb_array_size * sizeof(uint8_t), fail);
00816 
00817         if (s->codec_id == CODEC_ID_MPEG4 ||
00818             (s->flags & CODEC_FLAG_INTERLACED_ME)) {
00819             /* interlaced direct mode decoding tables */
00820             for (i = 0; i < 2; i++) {
00821                 int j, k;
00822                 for (j = 0; j < 2; j++) {
00823                     for (k = 0; k < 2; k++) {
00824                         FF_ALLOCZ_OR_GOTO(s->avctx,
00825                                           s->b_field_mv_table_base[i][j][k],
00826                                           mv_table_size * 2 * sizeof(int16_t),
00827                                           fail);
00828                         s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
00829                                                        s->mb_stride + 1;
00830                     }
00831                     FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
00832                                       mb_array_size * 2 * sizeof(uint8_t),
00833                                       fail);
00834                     FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
00835                                       mv_table_size * 2 * sizeof(int16_t),
00836                                       fail);
00837                     s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
00838                                                 + s->mb_stride + 1;
00839                 }
00840                 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
00841                                   mb_array_size * 2 * sizeof(uint8_t),
00842                                   fail);
00843             }
00844         }
00845         if (s->out_format == FMT_H263) {
00846             /* cbp values */
00847             FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00848             s->coded_block = s->coded_block_base + s->b8_stride + 1;
00849 
00850             /* cbp, ac_pred, pred_dir */
00851             FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
00852                               mb_array_size * sizeof(uint8_t), fail);
00853             FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
00854                               mb_array_size * sizeof(uint8_t), fail);
00855         }
00856 
00857         if (s->h263_pred || s->h263_plus || !s->encoding) {
00858             /* dc values */
00859             // MN: we need these for  error resilience of intra-frames
00860             FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
00861                               yc_size * sizeof(int16_t), fail);
00862             s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00863             s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00864             s->dc_val[2] = s->dc_val[1] + c_size;
00865             for (i = 0; i < yc_size; i++)
00866                 s->dc_val_base[i] = 1024;
00867         }
00868 
00869         /* which mb is a intra block */
00870         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00871         memset(s->mbintra_table, 1, mb_array_size);
00872 
00873         /* init macroblock skip table */
00874         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
00875         // Note the + 1 is for  a quicker mpeg4 slice_end detection
00876 
00877         s->parse_context.state = -1;
00878         if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
00879             s->avctx->debug_mv) {
00880             s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
00881                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
00882             s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
00883                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
00884             s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
00885                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
00886         }
00887     }
00888 
00889     s->context_initialized = 1;
00890     s->thread_context[0]   = s;
00891 
00892     if (s->width && s->height) {
00893         if (nb_slices > 1) {
00894             for (i = 1; i < nb_slices; i++) {
00895                 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
00896                 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00897             }
00898 
00899             for (i = 0; i < nb_slices; i++) {
00900                 if (init_duplicate_context(s->thread_context[i], s) < 0)
00901                     goto fail;
00902                     s->thread_context[i]->start_mb_y =
00903                         (s->mb_height * (i) + nb_slices / 2) / nb_slices;
00904                     s->thread_context[i]->end_mb_y   =
00905                         (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
00906             }
00907         } else {
00908             if (init_duplicate_context(s, s) < 0)
00909                 goto fail;
00910             s->start_mb_y = 0;
00911             s->end_mb_y   = s->mb_height;
00912         }
00913         s->slice_context_count = nb_slices;
00914     }
00915 
00916     return 0;
00917  fail:
00918     MPV_common_end(s);
00919     return -1;
00920 }
00921 
00922 /* init common structure for both encoder and decoder */
00923 void MPV_common_end(MpegEncContext *s)
00924 {
00925     int i, j, k;
00926 
00927     if (s->slice_context_count > 1) {
00928         for (i = 0; i < s->slice_context_count; i++) {
00929             free_duplicate_context(s->thread_context[i]);
00930         }
00931         for (i = 1; i < s->slice_context_count; i++) {
00932             av_freep(&s->thread_context[i]);
00933         }
00934         s->slice_context_count = 1;
00935     } else free_duplicate_context(s);
00936 
00937     av_freep(&s->parse_context.buffer);
00938     s->parse_context.buffer_size = 0;
00939 
00940     av_freep(&s->mb_type);
00941     av_freep(&s->p_mv_table_base);
00942     av_freep(&s->b_forw_mv_table_base);
00943     av_freep(&s->b_back_mv_table_base);
00944     av_freep(&s->b_bidir_forw_mv_table_base);
00945     av_freep(&s->b_bidir_back_mv_table_base);
00946     av_freep(&s->b_direct_mv_table_base);
00947     s->p_mv_table            = NULL;
00948     s->b_forw_mv_table       = NULL;
00949     s->b_back_mv_table       = NULL;
00950     s->b_bidir_forw_mv_table = NULL;
00951     s->b_bidir_back_mv_table = NULL;
00952     s->b_direct_mv_table     = NULL;
00953     for (i = 0; i < 2; i++) {
00954         for (j = 0; j < 2; j++) {
00955             for (k = 0; k < 2; k++) {
00956                 av_freep(&s->b_field_mv_table_base[i][j][k]);
00957                 s->b_field_mv_table[i][j][k] = NULL;
00958             }
00959             av_freep(&s->b_field_select_table[i][j]);
00960             av_freep(&s->p_field_mv_table_base[i][j]);
00961             s->p_field_mv_table[i][j] = NULL;
00962         }
00963         av_freep(&s->p_field_select_table[i]);
00964     }
00965 
00966     av_freep(&s->dc_val_base);
00967     av_freep(&s->coded_block_base);
00968     av_freep(&s->mbintra_table);
00969     av_freep(&s->cbp_table);
00970     av_freep(&s->pred_dir_table);
00971 
00972     av_freep(&s->mbskip_table);
00973     av_freep(&s->bitstream_buffer);
00974     s->allocated_bitstream_buffer_size = 0;
00975 
00976     av_freep(&s->avctx->stats_out);
00977     av_freep(&s->ac_stats);
00978     av_freep(&s->error_status_table);
00979     av_freep(&s->mb_index2xy);
00980     av_freep(&s->lambda_table);
00981     av_freep(&s->q_intra_matrix);
00982     av_freep(&s->q_inter_matrix);
00983     av_freep(&s->q_intra_matrix16);
00984     av_freep(&s->q_inter_matrix16);
00985     av_freep(&s->input_picture);
00986     av_freep(&s->reordered_input_picture);
00987     av_freep(&s->dct_offset);
00988 
00989     if (s->picture && !s->avctx->internal->is_copy) {
00990         for (i = 0; i < s->picture_count; i++) {
00991             free_picture(s, &s->picture[i]);
00992         }
00993     }
00994     av_freep(&s->picture);
00995     s->context_initialized      = 0;
00996     s->last_picture_ptr         =
00997     s->next_picture_ptr         =
00998     s->current_picture_ptr      = NULL;
00999     s->linesize = s->uvlinesize = 0;
01000 
01001     for (i = 0; i < 3; i++)
01002         av_freep(&s->visualization_buffer[i]);
01003 
01004     if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
01005         avcodec_default_free_buffers(s->avctx);
01006 }
01007 
01008 void ff_init_rl(RLTable *rl,
01009                 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
01010 {
01011     int8_t  max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
01012     uint8_t index_run[MAX_RUN + 1];
01013     int last, run, level, start, end, i;
01014 
01015     /* If table is static, we can quit if rl->max_level[0] is not NULL */
01016     if (static_store && rl->max_level[0])
01017         return;
01018 
01019     /* compute max_level[], max_run[] and index_run[] */
01020     for (last = 0; last < 2; last++) {
01021         if (last == 0) {
01022             start = 0;
01023             end = rl->last;
01024         } else {
01025             start = rl->last;
01026             end = rl->n;
01027         }
01028 
01029         memset(max_level, 0, MAX_RUN + 1);
01030         memset(max_run, 0, MAX_LEVEL + 1);
01031         memset(index_run, rl->n, MAX_RUN + 1);
01032         for (i = start; i < end; i++) {
01033             run   = rl->table_run[i];
01034             level = rl->table_level[i];
01035             if (index_run[run] == rl->n)
01036                 index_run[run] = i;
01037             if (level > max_level[run])
01038                 max_level[run] = level;
01039             if (run > max_run[level])
01040                 max_run[level] = run;
01041         }
01042         if (static_store)
01043             rl->max_level[last] = static_store[last];
01044         else
01045             rl->max_level[last] = av_malloc(MAX_RUN + 1);
01046         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
01047         if (static_store)
01048             rl->max_run[last]   = static_store[last] + MAX_RUN + 1;
01049         else
01050             rl->max_run[last]   = av_malloc(MAX_LEVEL + 1);
01051         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
01052         if (static_store)
01053             rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
01054         else
01055             rl->index_run[last] = av_malloc(MAX_RUN + 1);
01056         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
01057     }
01058 }
01059 
01060 void ff_init_vlc_rl(RLTable *rl)
01061 {
01062     int i, q;
01063 
01064     for (q = 0; q < 32; q++) {
01065         int qmul = q * 2;
01066         int qadd = (q - 1) | 1;
01067 
01068         if (q == 0) {
01069             qmul = 1;
01070             qadd = 0;
01071         }
01072         for (i = 0; i < rl->vlc.table_size; i++) {
01073             int code = rl->vlc.table[i][0];
01074             int len  = rl->vlc.table[i][1];
01075             int level, run;
01076 
01077             if (len == 0) { // illegal code
01078                 run   = 66;
01079                 level = MAX_LEVEL;
01080             } else if (len < 0) { // more bits needed
01081                 run   = 0;
01082                 level = code;
01083             } else {
01084                 if (code == rl->n) { // esc
01085                     run   = 66;
01086                     level =  0;
01087                 } else {
01088                     run   = rl->table_run[code] + 1;
01089                     level = rl->table_level[code] * qmul + qadd;
01090                     if (code >= rl->last) run += 192;
01091                 }
01092             }
01093             rl->rl_vlc[q][i].len   = len;
01094             rl->rl_vlc[q][i].level = level;
01095             rl->rl_vlc[q][i].run   = run;
01096         }
01097     }
01098 }
01099 
01100 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
01101 {
01102     int i;
01103 
01104     /* release non reference frames */
01105     for (i = 0; i < s->picture_count; i++) {
01106         if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
01107             (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
01108             (remove_current || &s->picture[i] !=  s->current_picture_ptr)
01109             /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
01110             free_frame_buffer(s, &s->picture[i]);
01111         }
01112     }
01113 }
01114 
01115 int ff_find_unused_picture(MpegEncContext *s, int shared)
01116 {
01117     int i;
01118 
01119     if (shared) {
01120         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01121             if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
01122                 return i;
01123         }
01124     } else {
01125         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01126             if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
01127                 return i; // FIXME
01128         }
01129         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01130             if (s->picture[i].f.data[0] == NULL)
01131                 return i;
01132         }
01133     }
01134 
01135     return AVERROR_INVALIDDATA;
01136 }
01137 
01138 static void update_noise_reduction(MpegEncContext *s)
01139 {
01140     int intra, i;
01141 
01142     for (intra = 0; intra < 2; intra++) {
01143         if (s->dct_count[intra] > (1 << 16)) {
01144             for (i = 0; i < 64; i++) {
01145                 s->dct_error_sum[intra][i] >>= 1;
01146             }
01147             s->dct_count[intra] >>= 1;
01148         }
01149 
01150         for (i = 0; i < 64; i++) {
01151             s->dct_offset[intra][i] = (s->avctx->noise_reduction *
01152                                        s->dct_count[intra] +
01153                                        s->dct_error_sum[intra][i] / 2) /
01154                                       (s->dct_error_sum[intra][i] + 1);
01155         }
01156     }
01157 }
01158 
01163 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
01164 {
01165     int i;
01166     Picture *pic;
01167     s->mb_skipped = 0;
01168 
01169     /* mark & release old frames */
01170     if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
01171         if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
01172             s->last_picture_ptr != s->next_picture_ptr &&
01173             s->last_picture_ptr->f.data[0]) {
01174             if (s->last_picture_ptr->owner2 == s)
01175                 free_frame_buffer(s, s->last_picture_ptr);
01176         }
01177 
01178         /* release forgotten pictures */
01179         /* if (mpeg124/h263) */
01180         if (!s->encoding) {
01181             for (i = 0; i < s->picture_count; i++) {
01182                 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
01183                     &s->picture[i] != s->last_picture_ptr &&
01184                     &s->picture[i] != s->next_picture_ptr &&
01185                     s->picture[i].f.reference) {
01186                     if (!(avctx->active_thread_type & FF_THREAD_FRAME))
01187                         av_log(avctx, AV_LOG_ERROR,
01188                                "releasing zombie picture\n");
01189                     free_frame_buffer(s, &s->picture[i]);
01190                 }
01191             }
01192         }
01193     }
01194 
01195     if (!s->encoding) {
01196         ff_release_unused_pictures(s, 1);
01197 
01198         if (s->current_picture_ptr &&
01199             s->current_picture_ptr->f.data[0] == NULL) {
01200             // we already have a unused image
01201             // (maybe it was set before reading the header)
01202             pic = s->current_picture_ptr;
01203         } else {
01204             i   = ff_find_unused_picture(s, 0);
01205             pic = &s->picture[i];
01206         }
01207 
01208         pic->f.reference = 0;
01209         if (!s->dropable) {
01210             if (s->codec_id == CODEC_ID_H264)
01211                 pic->f.reference = s->picture_structure;
01212             else if (s->pict_type != AV_PICTURE_TYPE_B)
01213                 pic->f.reference = 3;
01214         }
01215 
01216         pic->f.coded_picture_number = s->coded_picture_number++;
01217 
01218         if (ff_alloc_picture(s, pic, 0) < 0)
01219             return -1;
01220 
01221         s->current_picture_ptr = pic;
01222         // FIXME use only the vars from current_pic
01223         s->current_picture_ptr->f.top_field_first = s->top_field_first;
01224         if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
01225             s->codec_id == CODEC_ID_MPEG2VIDEO) {
01226             if (s->picture_structure != PICT_FRAME)
01227                 s->current_picture_ptr->f.top_field_first =
01228                     (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
01229         }
01230         s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
01231                                                      !s->progressive_sequence;
01232         s->current_picture_ptr->field_picture      =  s->picture_structure != PICT_FRAME;
01233     }
01234 
01235     s->current_picture_ptr->f.pict_type = s->pict_type;
01236     // if (s->flags && CODEC_FLAG_QSCALE)
01237     //     s->current_picture_ptr->quality = s->new_picture_ptr->quality;
01238     s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
01239 
01240     ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01241 
01242     if (s->pict_type != AV_PICTURE_TYPE_B) {
01243         s->last_picture_ptr = s->next_picture_ptr;
01244         if (!s->dropable)
01245             s->next_picture_ptr = s->current_picture_ptr;
01246     }
01247     /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
01248            s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
01249            s->last_picture_ptr    ? s->last_picture_ptr->f.data[0]    : NULL,
01250            s->next_picture_ptr    ? s->next_picture_ptr->f.data[0]    : NULL,
01251            s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
01252            s->pict_type, s->dropable); */
01253 
01254     if (s->codec_id != CODEC_ID_H264) {
01255         if ((s->last_picture_ptr == NULL ||
01256              s->last_picture_ptr->f.data[0] == NULL) &&
01257             (s->pict_type != AV_PICTURE_TYPE_I ||
01258              s->picture_structure != PICT_FRAME)) {
01259             if (s->pict_type != AV_PICTURE_TYPE_I)
01260                 av_log(avctx, AV_LOG_ERROR,
01261                        "warning: first frame is no keyframe\n");
01262             else if (s->picture_structure != PICT_FRAME)
01263                 av_log(avctx, AV_LOG_INFO,
01264                        "allocate dummy last picture for field based first keyframe\n");
01265 
01266             /* Allocate a dummy frame */
01267             i = ff_find_unused_picture(s, 0);
01268             s->last_picture_ptr = &s->picture[i];
01269 
01270             s->last_picture_ptr->f.reference   = 3;
01271             s->last_picture_ptr->f.pict_type = AV_PICTURE_TYPE_I;
01272 
01273             if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
01274                 return -1;
01275             ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
01276                                       INT_MAX, 0);
01277             ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
01278                                       INT_MAX, 1);
01279         }
01280         if ((s->next_picture_ptr == NULL ||
01281              s->next_picture_ptr->f.data[0] == NULL) &&
01282             s->pict_type == AV_PICTURE_TYPE_B) {
01283             /* Allocate a dummy frame */
01284             i = ff_find_unused_picture(s, 0);
01285             s->next_picture_ptr = &s->picture[i];
01286 
01287             s->next_picture_ptr->f.reference   = 3;
01288             s->next_picture_ptr->f.pict_type = AV_PICTURE_TYPE_I;
01289 
01290             if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
01291                 return -1;
01292             ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
01293                                       INT_MAX, 0);
01294             ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
01295                                       INT_MAX, 1);
01296         }
01297     }
01298 
01299     if (s->last_picture_ptr)
01300         ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01301     if (s->next_picture_ptr)
01302         ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01303 
01304     if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
01305         (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
01306         if (s->next_picture_ptr)
01307             s->next_picture_ptr->owner2 = s;
01308         if (s->last_picture_ptr)
01309             s->last_picture_ptr->owner2 = s;
01310     }
01311 
01312     assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
01313                                                  s->last_picture_ptr->f.data[0]));
01314 
01315     if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
01316         int i;
01317         for (i = 0; i < 4; i++) {
01318             if (s->picture_structure == PICT_BOTTOM_FIELD) {
01319                 s->current_picture.f.data[i] +=
01320                     s->current_picture.f.linesize[i];
01321             }
01322             s->current_picture.f.linesize[i] *= 2;
01323             s->last_picture.f.linesize[i]    *= 2;
01324             s->next_picture.f.linesize[i]    *= 2;
01325         }
01326     }
01327 
01328     s->err_recognition = avctx->err_recognition;
01329 
01330     /* set dequantizer, we can't do it during init as
01331      * it might change for mpeg4 and we can't do it in the header
01332      * decode as init is not called for mpeg4 there yet */
01333     if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
01334         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01335         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01336     } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
01337         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01338         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01339     } else {
01340         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01341         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01342     }
01343 
01344     if (s->dct_error_sum) {
01345         assert(s->avctx->noise_reduction && s->encoding);
01346         update_noise_reduction(s);
01347     }
01348 
01349     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01350         return ff_xvmc_field_start(s, avctx);
01351 
01352     return 0;
01353 }
01354 
01355 /* generic function for encode/decode called after a
01356  * frame has been coded/decoded. */
01357 void MPV_frame_end(MpegEncContext *s)
01358 {
01359     int i;
01360     /* redraw edges for the frame if decoding didn't complete */
01361     // just to make sure that all data is rendered.
01362     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
01363         ff_xvmc_field_end(s);
01364    } else if ((s->error_count || s->encoding) &&
01365               !s->avctx->hwaccel &&
01366               !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
01367               s->unrestricted_mv &&
01368               s->current_picture.f.reference &&
01369               !s->intra_only &&
01370               !(s->flags & CODEC_FLAG_EMU_EDGE)) {
01371         int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
01372         int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
01373         s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
01374                           s->h_edge_pos, s->v_edge_pos,
01375                           EDGE_WIDTH, EDGE_WIDTH,
01376                           EDGE_TOP | EDGE_BOTTOM);
01377         s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
01378                           s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01379                           EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01380                           EDGE_TOP | EDGE_BOTTOM);
01381         s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
01382                           s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
01383                           EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
01384                           EDGE_TOP | EDGE_BOTTOM);
01385     }
01386 
01387     emms_c();
01388 
01389     s->last_pict_type                 = s->pict_type;
01390     s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
01391     if (s->pict_type!= AV_PICTURE_TYPE_B) {
01392         s->last_non_b_pict_type = s->pict_type;
01393     }
01394 #if 0
01395     /* copy back current_picture variables */
01396     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
01397         if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
01398             s->picture[i] = s->current_picture;
01399             break;
01400         }
01401     }
01402     assert(i < MAX_PICTURE_COUNT);
01403 #endif
01404 
01405     if (s->encoding) {
01406         /* release non-reference frames */
01407         for (i = 0; i < s->picture_count; i++) {
01408             if (s->picture[i].f.data[0] && !s->picture[i].f.reference
01409                 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
01410                 free_frame_buffer(s, &s->picture[i]);
01411             }
01412         }
01413     }
01414     // clear copies, to avoid confusion
01415 #if 0
01416     memset(&s->last_picture,    0, sizeof(Picture));
01417     memset(&s->next_picture,    0, sizeof(Picture));
01418     memset(&s->current_picture, 0, sizeof(Picture));
01419 #endif
01420     s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr;
01421 
01422     if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
01423         ff_thread_report_progress((AVFrame *) s->current_picture_ptr, INT_MAX, 0);
01424     }
01425 }
01426 
01434 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
01435                       int w, int h, int stride, int color)
01436 {
01437     int x, y, fr, f;
01438 
01439     sx = av_clip(sx, 0, w - 1);
01440     sy = av_clip(sy, 0, h - 1);
01441     ex = av_clip(ex, 0, w - 1);
01442     ey = av_clip(ey, 0, h - 1);
01443 
01444     buf[sy * stride + sx] += color;
01445 
01446     if (FFABS(ex - sx) > FFABS(ey - sy)) {
01447         if (sx > ex) {
01448             FFSWAP(int, sx, ex);
01449             FFSWAP(int, sy, ey);
01450         }
01451         buf += sx + sy * stride;
01452         ex  -= sx;
01453         f    = ((ey - sy) << 16) / ex;
01454         for (x = 0; x = ex; x++) {
01455             y  = (x * f) >> 16;
01456             fr = (x * f) & 0xFFFF;
01457             buf[y * stride + x]       += (color * (0x10000 - fr)) >> 16;
01458             buf[(y + 1) * stride + x] += (color *            fr ) >> 16;
01459         }
01460     } else {
01461         if (sy > ey) {
01462             FFSWAP(int, sx, ex);
01463             FFSWAP(int, sy, ey);
01464         }
01465         buf += sx + sy * stride;
01466         ey  -= sy;
01467         if (ey)
01468             f  = ((ex - sx) << 16) / ey;
01469         else
01470             f = 0;
01471         for (y = 0; y = ey; y++) {
01472             x  = (y * f) >> 16;
01473             fr = (y * f) & 0xFFFF;
01474             buf[y * stride + x]     += (color * (0x10000 - fr)) >> 16;
01475             buf[y * stride + x + 1] += (color *            fr ) >> 16;
01476         }
01477     }
01478 }
01479 
01487 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
01488                        int ey, int w, int h, int stride, int color)
01489 {
01490     int dx,dy;
01491 
01492     sx = av_clip(sx, -100, w + 100);
01493     sy = av_clip(sy, -100, h + 100);
01494     ex = av_clip(ex, -100, w + 100);
01495     ey = av_clip(ey, -100, h + 100);
01496 
01497     dx = ex - sx;
01498     dy = ey - sy;
01499 
01500     if (dx * dx + dy * dy > 3 * 3) {
01501         int rx =  dx + dy;
01502         int ry = -dx + dy;
01503         int length = ff_sqrt((rx * rx + ry * ry) << 8);
01504 
01505         // FIXME subpixel accuracy
01506         rx = ROUNDED_DIV(rx * 3 << 4, length);
01507         ry = ROUNDED_DIV(ry * 3 << 4, length);
01508 
01509         draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01510         draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01511     }
01512     draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01513 }
01514 
01518 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
01519 {
01520     if (s->avctx->hwaccel || !pict || !pict->mb_type)
01521         return;
01522 
01523     if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
01524         int x,y;
01525 
01526         av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
01527         switch (pict->pict_type) {
01528         case AV_PICTURE_TYPE_I:
01529             av_log(s->avctx,AV_LOG_DEBUG,"I\n");
01530             break;
01531         case AV_PICTURE_TYPE_P:
01532             av_log(s->avctx,AV_LOG_DEBUG,"P\n");
01533             break;
01534         case AV_PICTURE_TYPE_B:
01535             av_log(s->avctx,AV_LOG_DEBUG,"B\n");
01536             break;
01537         case AV_PICTURE_TYPE_S:
01538             av_log(s->avctx,AV_LOG_DEBUG,"S\n");
01539             break;
01540         case AV_PICTURE_TYPE_SI:
01541             av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
01542             break;
01543         case AV_PICTURE_TYPE_SP:
01544             av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
01545             break;
01546         }
01547         for (y = 0; y < s->mb_height; y++) {
01548             for (x = 0; x < s->mb_width; x++) {
01549                 if (s->avctx->debug & FF_DEBUG_SKIP) {
01550                     int count = s->mbskip_table[x + y * s->mb_stride];
01551                     if (count > 9)
01552                         count = 9;
01553                     av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01554                 }
01555                 if (s->avctx->debug & FF_DEBUG_QP) {
01556                     av_log(s->avctx, AV_LOG_DEBUG, "%2d",
01557                            pict->qscale_table[x + y * s->mb_stride]);
01558                 }
01559                 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
01560                     int mb_type = pict->mb_type[x + y * s->mb_stride];
01561                     // Type & MV direction
01562                     if (IS_PCM(mb_type))
01563                         av_log(s->avctx, AV_LOG_DEBUG, "P");
01564                     else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01565                         av_log(s->avctx, AV_LOG_DEBUG, "A");
01566                     else if (IS_INTRA4x4(mb_type))
01567                         av_log(s->avctx, AV_LOG_DEBUG, "i");
01568                     else if (IS_INTRA16x16(mb_type))
01569                         av_log(s->avctx, AV_LOG_DEBUG, "I");
01570                     else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01571                         av_log(s->avctx, AV_LOG_DEBUG, "d");
01572                     else if (IS_DIRECT(mb_type))
01573                         av_log(s->avctx, AV_LOG_DEBUG, "D");
01574                     else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
01575                         av_log(s->avctx, AV_LOG_DEBUG, "g");
01576                     else if (IS_GMC(mb_type))
01577                         av_log(s->avctx, AV_LOG_DEBUG, "G");
01578                     else if (IS_SKIP(mb_type))
01579                         av_log(s->avctx, AV_LOG_DEBUG, "S");
01580                     else if (!USES_LIST(mb_type, 1))
01581                         av_log(s->avctx, AV_LOG_DEBUG, ">");
01582                     else if (!USES_LIST(mb_type, 0))
01583                         av_log(s->avctx, AV_LOG_DEBUG, "<");
01584                     else {
01585                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01586                         av_log(s->avctx, AV_LOG_DEBUG, "X");
01587                     }
01588 
01589                     // segmentation
01590                     if (IS_8X8(mb_type))
01591                         av_log(s->avctx, AV_LOG_DEBUG, "+");
01592                     else if (IS_16X8(mb_type))
01593                         av_log(s->avctx, AV_LOG_DEBUG, "-");
01594                     else if (IS_8X16(mb_type))
01595                         av_log(s->avctx, AV_LOG_DEBUG, "|");
01596                     else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
01597                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01598                     else
01599                         av_log(s->avctx, AV_LOG_DEBUG, "?");
01600 
01601 
01602                     if (IS_INTERLACED(mb_type))
01603                         av_log(s->avctx, AV_LOG_DEBUG, "=");
01604                     else
01605                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01606                 }
01607                 // av_log(s->avctx, AV_LOG_DEBUG, " ");
01608             }
01609             av_log(s->avctx, AV_LOG_DEBUG, "\n");
01610         }
01611     }
01612 
01613     if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
01614         (s->avctx->debug_mv)) {
01615         const int shift = 1 + s->quarter_sample;
01616         int mb_y;
01617         uint8_t *ptr;
01618         int i;
01619         int h_chroma_shift, v_chroma_shift, block_height;
01620         const int width          = s->avctx->width;
01621         const int height         = s->avctx->height;
01622         const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
01623         const int mv_stride      = (s->mb_width << mv_sample_log2) +
01624                                    (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01625         s->low_delay = 0; // needed to see the vectors without trashing the buffers
01626 
01627         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
01628                                       &h_chroma_shift, &v_chroma_shift);
01629         for (i = 0; i < 3; i++) {
01630             memcpy(s->visualization_buffer[i], pict->data[i],
01631                    (i == 0) ? pict->linesize[i] * height:
01632                               pict->linesize[i] * height >> v_chroma_shift);
01633             pict->data[i] = s->visualization_buffer[i];
01634         }
01635         pict->type   = FF_BUFFER_TYPE_COPY;
01636         ptr          = pict->data[0];
01637         block_height = 16 >> v_chroma_shift;
01638 
01639         for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
01640             int mb_x;
01641             for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
01642                 const int mb_index = mb_x + mb_y * s->mb_stride;
01643                 if ((s->avctx->debug_mv) && pict->motion_val) {
01644                     int type;
01645                     for (type = 0; type < 3; type++) {
01646                         int direction = 0;
01647                         switch (type) {
01648                         case 0:
01649                             if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
01650                                 (pict->pict_type!= AV_PICTURE_TYPE_P))
01651                                 continue;
01652                             direction = 0;
01653                             break;
01654                         case 1:
01655                             if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
01656                                 (pict->pict_type!= AV_PICTURE_TYPE_B))
01657                                 continue;
01658                             direction = 0;
01659                             break;
01660                         case 2:
01661                             if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
01662                                 (pict->pict_type!= AV_PICTURE_TYPE_B))
01663                                 continue;
01664                             direction = 1;
01665                             break;
01666                         }
01667                         if (!USES_LIST(pict->mb_type[mb_index], direction))
01668                             continue;
01669 
01670                         if (IS_8X8(pict->mb_type[mb_index])) {
01671                             int i;
01672                             for (i = 0; i < 4; i++) {
01673                                 int sx = mb_x * 16 + 4 + 8 * (i & 1);
01674                                 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
01675                                 int xy = (mb_x * 2 + (i & 1) +
01676                                           (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01677                                 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
01678                                 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
01679                                 draw_arrow(ptr, sx, sy, mx, my, width,
01680                                            height, s->linesize, 100);
01681                             }
01682                         } else if (IS_16X8(pict->mb_type[mb_index])) {
01683                             int i;
01684                             for (i = 0; i < 2; i++) {
01685                                 int sx = mb_x * 16 + 8;
01686                                 int sy = mb_y * 16 + 4 + 8 * i;
01687                                 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
01688                                 int mx = (pict->motion_val[direction][xy][0] >> shift);
01689                                 int my = (pict->motion_val[direction][xy][1] >> shift);
01690 
01691                                 if (IS_INTERLACED(pict->mb_type[mb_index]))
01692                                     my *= 2;
01693 
01694                             draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01695                                        height, s->linesize, 100);
01696                             }
01697                         } else if (IS_8X16(pict->mb_type[mb_index])) {
01698                             int i;
01699                             for (i = 0; i < 2; i++) {
01700                                 int sx = mb_x * 16 + 4 + 8 * i;
01701                                 int sy = mb_y * 16 + 8;
01702                                 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
01703                                 int mx = pict->motion_val[direction][xy][0] >> shift;
01704                                 int my = pict->motion_val[direction][xy][1] >> shift;
01705 
01706                                 if (IS_INTERLACED(pict->mb_type[mb_index]))
01707                                     my *= 2;
01708 
01709                                 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
01710                                            height, s->linesize, 100);
01711                             }
01712                         } else {
01713                               int sx = mb_x * 16 + 8;
01714                               int sy = mb_y * 16 + 8;
01715                               int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
01716                               int mx = pict->motion_val[direction][xy][0] >> shift + sx;
01717                               int my = pict->motion_val[direction][xy][1] >> shift + sy;
01718                               draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01719                         }
01720                     }
01721                 }
01722                 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
01723                     uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
01724                                  0x0101010101010101ULL;
01725                     int y;
01726                     for (y = 0; y < block_height; y++) {
01727                         *(uint64_t *)(pict->data[1] + 8 * mb_x +
01728                                       (block_height * mb_y + y) *
01729                                       pict->linesize[1]) = c;
01730                         *(uint64_t *)(pict->data[2] + 8 * mb_x +
01731                                       (block_height * mb_y + y) *
01732                                       pict->linesize[2]) = c;
01733                     }
01734                 }
01735                 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
01736                     pict->motion_val) {
01737                     int mb_type = pict->mb_type[mb_index];
01738                     uint64_t u,v;
01739                     int y;
01740 #define COLOR(theta, r) \
01741     u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
01742     v = (int)(128 + r * sin(theta * 3.141592 / 180));
01743 
01744 
01745                     u = v = 128;
01746                     if (IS_PCM(mb_type)) {
01747                         COLOR(120, 48)
01748                     } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
01749                                IS_INTRA16x16(mb_type)) {
01750                         COLOR(30, 48)
01751                     } else if (IS_INTRA4x4(mb_type)) {
01752                         COLOR(90, 48)
01753                     } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
01754                         // COLOR(120, 48)
01755                     } else if (IS_DIRECT(mb_type)) {
01756                         COLOR(150, 48)
01757                     } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
01758                         COLOR(170, 48)
01759                     } else if (IS_GMC(mb_type)) {
01760                         COLOR(190, 48)
01761                     } else if (IS_SKIP(mb_type)) {
01762                         // COLOR(180, 48)
01763                     } else if (!USES_LIST(mb_type, 1)) {
01764                         COLOR(240, 48)
01765                     } else if (!USES_LIST(mb_type, 0)) {
01766                         COLOR(0, 48)
01767                     } else {
01768                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01769                         COLOR(300,48)
01770                     }
01771 
01772                     u *= 0x0101010101010101ULL;
01773                     v *= 0x0101010101010101ULL;
01774                     for (y = 0; y < block_height; y++) {
01775                         *(uint64_t *)(pict->data[1] + 8 * mb_x +
01776                                       (block_height * mb_y + y) * pict->linesize[1]) = u;
01777                         *(uint64_t *)(pict->data[2] + 8 * mb_x +
01778                                       (block_height * mb_y + y) * pict->linesize[2]) = v;
01779                     }
01780 
01781                     // segmentation
01782                     if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
01783                         *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
01784                                       (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01785                         *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
01786                                       (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
01787                     }
01788                     if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
01789                         for (y = 0; y < 16; y++)
01790                             pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
01791                                           pict->linesize[0]] ^= 0x80;
01792                     }
01793                     if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
01794                         int dm = 1 << (mv_sample_log2 - 2);
01795                         for (i = 0; i < 4; i++) {
01796                             int sx = mb_x * 16 + 8 * (i & 1);
01797                             int sy = mb_y * 16 + 8 * (i >> 1);
01798                             int xy = (mb_x * 2 + (i & 1) +
01799                                      (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
01800                             // FIXME bidir
01801                             int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
01802                             if (mv[0] != mv[dm] ||
01803                                 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
01804                                 for (y = 0; y < 8; y++)
01805                                     pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
01806                             if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
01807                                 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
01808                                               pict->linesize[0]) ^= 0x8080808080808080ULL;
01809                         }
01810                     }
01811 
01812                     if (IS_INTERLACED(mb_type) &&
01813                         s->codec_id == CODEC_ID_H264) {
01814                         // hmm
01815                     }
01816                 }
01817                 s->mbskip_table[mb_index] = 0;
01818             }
01819         }
01820     }
01821 }
01822 
01823 static inline int hpel_motion_lowres(MpegEncContext *s,
01824                                      uint8_t *dest, uint8_t *src,
01825                                      int field_based, int field_select,
01826                                      int src_x, int src_y,
01827                                      int width, int height, int stride,
01828                                      int h_edge_pos, int v_edge_pos,
01829                                      int w, int h, h264_chroma_mc_func *pix_op,
01830                                      int motion_x, int motion_y)
01831 {
01832     const int lowres   = s->avctx->lowres;
01833     const int op_index = FFMIN(lowres, 2);
01834     const int s_mask   = (2 << lowres) - 1;
01835     int emu = 0;
01836     int sx, sy;
01837 
01838     if (s->quarter_sample) {
01839         motion_x /= 2;
01840         motion_y /= 2;
01841     }
01842 
01843     sx = motion_x & s_mask;
01844     sy = motion_y & s_mask;
01845     src_x += motion_x >> lowres + 1;
01846     src_y += motion_y >> lowres + 1;
01847 
01848     src   += src_y * stride + src_x;
01849 
01850     if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w,                 0) ||
01851         (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
01852         s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
01853                                 (h + 1) << field_based, src_x,
01854                                 src_y   << field_based,
01855                                 h_edge_pos,
01856                                 v_edge_pos);
01857         src = s->edge_emu_buffer;
01858         emu = 1;
01859     }
01860 
01861     sx = (sx << 2) >> lowres;
01862     sy = (sy << 2) >> lowres;
01863     if (field_select)
01864         src += s->linesize;
01865     pix_op[op_index](dest, src, stride, h, sx, sy);
01866     return emu;
01867 }
01868 
01869 /* apply one mpeg motion vector to the three components */
01870 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01871                                                 uint8_t *dest_y,
01872                                                 uint8_t *dest_cb,
01873                                                 uint8_t *dest_cr,
01874                                                 int field_based,
01875                                                 int bottom_field,
01876                                                 int field_select,
01877                                                 uint8_t **ref_picture,
01878                                                 h264_chroma_mc_func *pix_op,
01879                                                 int motion_x, int motion_y,
01880                                                 int h, int mb_y)
01881 {
01882     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01883     int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
01884         uvsx, uvsy;
01885     const int lowres     = s->avctx->lowres;
01886     const int op_index   = FFMIN(lowres, 2);
01887     const int block_s    = 8>>lowres;
01888     const int s_mask     = (2 << lowres) - 1;
01889     const int h_edge_pos = s->h_edge_pos >> lowres;
01890     const int v_edge_pos = s->v_edge_pos >> lowres;
01891     linesize   = s->current_picture.f.linesize[0] << field_based;
01892     uvlinesize = s->current_picture.f.linesize[1] << field_based;
01893 
01894     // FIXME obviously not perfect but qpel will not work in lowres anyway
01895     if (s->quarter_sample) {
01896         motion_x /= 2;
01897         motion_y /= 2;
01898     }
01899 
01900     if (field_based) {
01901         motion_y += (bottom_field - field_select) * (1 << lowres - 1);
01902     }
01903 
01904     sx = motion_x & s_mask;
01905     sy = motion_y & s_mask;
01906     src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
01907     src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
01908 
01909     if (s->out_format == FMT_H263) {
01910         uvsx    = ((motion_x >> 1) & s_mask) | (sx & 1);
01911         uvsy    = ((motion_y >> 1) & s_mask) | (sy & 1);
01912         uvsrc_x = src_x >> 1;
01913         uvsrc_y = src_y >> 1;
01914     } else if (s->out_format == FMT_H261) {
01915         // even chroma mv's are full pel in H261
01916         mx      = motion_x / 4;
01917         my      = motion_y / 4;
01918         uvsx    = (2 * mx) & s_mask;
01919         uvsy    = (2 * my) & s_mask;
01920         uvsrc_x = s->mb_x * block_s + (mx >> lowres);
01921         uvsrc_y =    mb_y * block_s + (my >> lowres);
01922     } else {
01923         mx      = motion_x / 2;
01924         my      = motion_y / 2;
01925         uvsx    = mx & s_mask;
01926         uvsy    = my & s_mask;
01927         uvsrc_x = s->mb_x * block_s                 + (mx >> lowres + 1);
01928         uvsrc_y =   (mb_y * block_s >> field_based) + (my >> lowres + 1);
01929     }
01930 
01931     ptr_y  = ref_picture[0] + src_y   * linesize   + src_x;
01932     ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01933     ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01934 
01935     if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s,       0) ||
01936         (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
01937         s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
01938                                 s->linesize, 17, 17 + field_based,
01939                                 src_x, src_y << field_based, h_edge_pos,
01940                                 v_edge_pos);
01941         ptr_y = s->edge_emu_buffer;
01942         if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
01943             uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
01944             s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
01945                                     9 + field_based,
01946                                     uvsrc_x, uvsrc_y << field_based,
01947                                     h_edge_pos >> 1, v_edge_pos >> 1);
01948             s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
01949                                     9 + field_based,
01950                                     uvsrc_x, uvsrc_y << field_based,
01951                                     h_edge_pos >> 1, v_edge_pos >> 1);
01952             ptr_cb = uvbuf;
01953             ptr_cr = uvbuf + 16;
01954         }
01955     }
01956 
01957     // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
01958     if (bottom_field) {
01959         dest_y  += s->linesize;
01960         dest_cb += s->uvlinesize;
01961         dest_cr += s->uvlinesize;
01962     }
01963 
01964     if (field_select) {
01965         ptr_y   += s->linesize;
01966         ptr_cb  += s->uvlinesize;
01967         ptr_cr  += s->uvlinesize;
01968     }
01969 
01970     sx = (sx << 2) >> lowres;
01971     sy = (sy << 2) >> lowres;
01972     pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
01973 
01974     if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
01975         uvsx = (uvsx << 2) >> lowres;
01976         uvsy = (uvsy << 2) >> lowres;
01977         pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift,
01978                          uvsx, uvsy);
01979         pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift,
01980                          uvsx, uvsy);
01981     }
01982     // FIXME h261 lowres loop filter
01983 }
01984 
01985 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01986                                             uint8_t *dest_cb, uint8_t *dest_cr,
01987                                             uint8_t **ref_picture,
01988                                             h264_chroma_mc_func * pix_op,
01989                                             int mx, int my)
01990 {
01991     const int lowres     = s->avctx->lowres;
01992     const int op_index   = FFMIN(lowres, 2);
01993     const int block_s    = 8 >> lowres;
01994     const int s_mask     = (2 << lowres) - 1;
01995     const int h_edge_pos = s->h_edge_pos >> lowres + 1;
01996     const int v_edge_pos = s->v_edge_pos >> lowres + 1;
01997     int emu = 0, src_x, src_y, offset, sx, sy;
01998     uint8_t *ptr;
01999 
02000     if (s->quarter_sample) {
02001         mx /= 2;
02002         my /= 2;
02003     }
02004 
02005     /* In case of 8X8, we construct a single chroma motion vector
02006        with a special rounding */
02007     mx = ff_h263_round_chroma(mx);
02008     my = ff_h263_round_chroma(my);
02009 
02010     sx = mx & s_mask;
02011     sy = my & s_mask;
02012     src_x = s->mb_x * block_s + (mx >> lowres + 1);
02013     src_y = s->mb_y * block_s + (my >> lowres + 1);
02014 
02015     offset = src_y * s->uvlinesize + src_x;
02016     ptr = ref_picture[1] + offset;
02017     if (s->flags & CODEC_FLAG_EMU_EDGE) {
02018         if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
02019             (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
02020             s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
02021                                     9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
02022             ptr = s->edge_emu_buffer;
02023             emu = 1;
02024         }
02025     }
02026     sx = (sx << 2) >> lowres;
02027     sy = (sy << 2) >> lowres;
02028     pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
02029 
02030     ptr = ref_picture[2] + offset;
02031     if (emu) {
02032         s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
02033                                 src_x, src_y, h_edge_pos, v_edge_pos);
02034         ptr = s->edge_emu_buffer;
02035     }
02036     pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
02037 }
02038 
02050 static inline void MPV_motion_lowres(MpegEncContext *s,
02051                                      uint8_t *dest_y, uint8_t *dest_cb,
02052                                      uint8_t *dest_cr,
02053                                      int dir, uint8_t **ref_picture,
02054                                      h264_chroma_mc_func *pix_op)
02055 {
02056     int mx, my;
02057     int mb_x, mb_y, i;
02058     const int lowres  = s->avctx->lowres;
02059     const int block_s = 8 >>lowres;
02060 
02061     mb_x = s->mb_x;
02062     mb_y = s->mb_y;
02063 
02064     switch (s->mv_type) {
02065     case MV_TYPE_16X16:
02066         mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02067                            0, 0, 0,
02068                            ref_picture, pix_op,
02069                            s->mv[dir][0][0], s->mv[dir][0][1],
02070                            2 * block_s, mb_y);
02071         break;
02072     case MV_TYPE_8X8:
02073         mx = 0;
02074         my = 0;
02075         for (i = 0; i < 4; i++) {
02076             hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
02077                                s->linesize) * block_s,
02078                                ref_picture[0], 0, 0,
02079                                (2 * mb_x + (i & 1)) * block_s,
02080                                (2 * mb_y + (i >> 1)) * block_s,
02081                                s->width, s->height, s->linesize,
02082                                s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
02083                                block_s, block_s, pix_op,
02084                                s->mv[dir][i][0], s->mv[dir][i][1]);
02085 
02086             mx += s->mv[dir][i][0];
02087             my += s->mv[dir][i][1];
02088         }
02089 
02090         if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
02091             chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
02092                                      pix_op, mx, my);
02093         break;
02094     case MV_TYPE_FIELD:
02095         if (s->picture_structure == PICT_FRAME) {
02096             /* top field */
02097             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02098                                1, 0, s->field_select[dir][0],
02099                                ref_picture, pix_op,
02100                                s->mv[dir][0][0], s->mv[dir][0][1],
02101                                block_s, mb_y);
02102             /* bottom field */
02103             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02104                                1, 1, s->field_select[dir][1],
02105                                ref_picture, pix_op,
02106                                s->mv[dir][1][0], s->mv[dir][1][1],
02107                                block_s, mb_y);
02108         } else {
02109             if (s->picture_structure != s->field_select[dir][0] + 1 &&
02110                 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
02111                 ref_picture = s->current_picture_ptr->f.data;
02112 
02113             }
02114             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02115                                0, 0, s->field_select[dir][0],
02116                                ref_picture, pix_op,
02117                                s->mv[dir][0][0],
02118                                s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
02119             }
02120         break;
02121     case MV_TYPE_16X8:
02122         for (i = 0; i < 2; i++) {
02123             uint8_t **ref2picture;
02124 
02125             if (s->picture_structure == s->field_select[dir][i] + 1 ||
02126                 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
02127                 ref2picture = ref_picture;
02128             } else {
02129                 ref2picture = s->current_picture_ptr->f.data;
02130             }
02131 
02132             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02133                                0, 0, s->field_select[dir][i],
02134                                ref2picture, pix_op,
02135                                s->mv[dir][i][0], s->mv[dir][i][1] +
02136                                2 * block_s * i, block_s, mb_y >> 1);
02137 
02138             dest_y  +=  2 * block_s *  s->linesize;
02139             dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02140             dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
02141         }
02142         break;
02143     case MV_TYPE_DMV:
02144         if (s->picture_structure == PICT_FRAME) {
02145             for (i = 0; i < 2; i++) {
02146                 int j;
02147                 for (j = 0; j < 2; j++) {
02148                     mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02149                                        1, j, j ^ i,
02150                                        ref_picture, pix_op,
02151                                        s->mv[dir][2 * i + j][0],
02152                                        s->mv[dir][2 * i + j][1],
02153                                        block_s, mb_y);
02154                 }
02155                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02156             }
02157         } else {
02158             for (i = 0; i < 2; i++) {
02159                 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02160                                    0, 0, s->picture_structure != i + 1,
02161                                    ref_picture, pix_op,
02162                                    s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
02163                                    2 * block_s, mb_y >> 1);
02164 
02165                 // after put we make avg of the same block
02166                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02167 
02168                 // opposite parity is always in the same
02169                 // frame if this is second field
02170                 if (!s->first_field) {
02171                     ref_picture = s->current_picture_ptr->f.data;
02172                 }
02173             }
02174         }
02175         break;
02176     default:
02177         assert(0);
02178     }
02179 }
02180 
02184 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
02185 {
02186     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
02187     int my, off, i, mvs;
02188 
02189     if (s->picture_structure != PICT_FRAME) goto unhandled;
02190 
02191     switch (s->mv_type) {
02192         case MV_TYPE_16X16:
02193             mvs = 1;
02194             break;
02195         case MV_TYPE_16X8:
02196             mvs = 2;
02197             break;
02198         case MV_TYPE_8X8:
02199             mvs = 4;
02200             break;
02201         default:
02202             goto unhandled;
02203     }
02204 
02205     for (i = 0; i < mvs; i++) {
02206         my = s->mv[dir][i][1]<<qpel_shift;
02207         my_max = FFMAX(my_max, my);
02208         my_min = FFMIN(my_min, my);
02209     }
02210 
02211     off = (FFMAX(-my_min, my_max) + 63) >> 6;
02212 
02213     return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
02214 unhandled:
02215     return s->mb_height-1;
02216 }
02217 
02218 /* put block[] to dest[] */
02219 static inline void put_dct(MpegEncContext *s,
02220                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02221 {
02222     s->dct_unquantize_intra(s, block, i, qscale);
02223     s->dsp.idct_put (dest, line_size, block);
02224 }
02225 
02226 /* add block[] to dest[] */
02227 static inline void add_dct(MpegEncContext *s,
02228                            DCTELEM *block, int i, uint8_t *dest, int line_size)
02229 {
02230     if (s->block_last_index[i] >= 0) {
02231         s->dsp.idct_add (dest, line_size, block);
02232     }
02233 }
02234 
02235 static inline void add_dequant_dct(MpegEncContext *s,
02236                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02237 {
02238     if (s->block_last_index[i] >= 0) {
02239         s->dct_unquantize_inter(s, block, i, qscale);
02240 
02241         s->dsp.idct_add (dest, line_size, block);
02242     }
02243 }
02244 
02248 void ff_clean_intra_table_entries(MpegEncContext *s)
02249 {
02250     int wrap = s->b8_stride;
02251     int xy = s->block_index[0];
02252 
02253     s->dc_val[0][xy           ] =
02254     s->dc_val[0][xy + 1       ] =
02255     s->dc_val[0][xy     + wrap] =
02256     s->dc_val[0][xy + 1 + wrap] = 1024;
02257     /* ac pred */
02258     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
02259     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
02260     if (s->msmpeg4_version>=3) {
02261         s->coded_block[xy           ] =
02262         s->coded_block[xy + 1       ] =
02263         s->coded_block[xy     + wrap] =
02264         s->coded_block[xy + 1 + wrap] = 0;
02265     }
02266     /* chroma */
02267     wrap = s->mb_stride;
02268     xy = s->mb_x + s->mb_y * wrap;
02269     s->dc_val[1][xy] =
02270     s->dc_val[2][xy] = 1024;
02271     /* ac pred */
02272     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
02273     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
02274 
02275     s->mbintra_table[xy]= 0;
02276 }
02277 
02278 /* generic function called after a macroblock has been parsed by the
02279    decoder or after it has been encoded by the encoder.
02280 
02281    Important variables used:
02282    s->mb_intra : true if intra macroblock
02283    s->mv_dir   : motion vector direction
02284    s->mv_type  : motion vector type
02285    s->mv       : motion vector
02286    s->interlaced_dct : true if interlaced dct used (mpeg2)
02287  */
02288 static av_always_inline
02289 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
02290                             int lowres_flag, int is_mpeg12)
02291 {
02292     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
02293     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
02294         ff_xvmc_decode_mb(s);//xvmc uses pblocks
02295         return;
02296     }
02297 
02298     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
02299        /* save DCT coefficients */
02300        int i,j;
02301        DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
02302        av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
02303        for(i=0; i<6; i++){
02304            for(j=0; j<64; j++){
02305                *dct++ = block[i][s->dsp.idct_permutation[j]];
02306                av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
02307            }
02308            av_log(s->avctx, AV_LOG_DEBUG, "\n");
02309        }
02310     }
02311 
02312     s->current_picture.f.qscale_table[mb_xy] = s->qscale;
02313 
02314     /* update DC predictors for P macroblocks */
02315     if (!s->mb_intra) {
02316         if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
02317             if(s->mbintra_table[mb_xy])
02318                 ff_clean_intra_table_entries(s);
02319         } else {
02320             s->last_dc[0] =
02321             s->last_dc[1] =
02322             s->last_dc[2] = 128 << s->intra_dc_precision;
02323         }
02324     }
02325     else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
02326         s->mbintra_table[mb_xy]=1;
02327 
02328     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
02329         uint8_t *dest_y, *dest_cb, *dest_cr;
02330         int dct_linesize, dct_offset;
02331         op_pixels_func (*op_pix)[4];
02332         qpel_mc_func (*op_qpix)[16];
02333         const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
02334         const int uvlinesize = s->current_picture.f.linesize[1];
02335         const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
02336         const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
02337 
02338         /* avoid copy if macroblock skipped in last frame too */
02339         /* skip only during decoding as we might trash the buffers during encoding a bit */
02340         if(!s->encoding){
02341             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
02342 
02343             if (s->mb_skipped) {
02344                 s->mb_skipped= 0;
02345                 assert(s->pict_type!=AV_PICTURE_TYPE_I);
02346                 *mbskip_ptr = 1;
02347             } else if(!s->current_picture.f.reference) {
02348                 *mbskip_ptr = 1;
02349             } else{
02350                 *mbskip_ptr = 0; /* not skipped */
02351             }
02352         }
02353 
02354         dct_linesize = linesize << s->interlaced_dct;
02355         dct_offset   = s->interlaced_dct ? linesize : linesize * block_size;
02356 
02357         if(readable){
02358             dest_y=  s->dest[0];
02359             dest_cb= s->dest[1];
02360             dest_cr= s->dest[2];
02361         }else{
02362             dest_y = s->b_scratchpad;
02363             dest_cb= s->b_scratchpad+16*linesize;
02364             dest_cr= s->b_scratchpad+32*linesize;
02365         }
02366 
02367         if (!s->mb_intra) {
02368             /* motion handling */
02369             /* decoding or more than one mb_type (MC was already done otherwise) */
02370             if(!s->encoding){
02371 
02372                 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
02373                     if (s->mv_dir & MV_DIR_FORWARD) {
02374                         ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
02375                     }
02376                     if (s->mv_dir & MV_DIR_BACKWARD) {
02377                         ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
02378                     }
02379                 }
02380 
02381                 if(lowres_flag){
02382                     h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
02383 
02384                     if (s->mv_dir & MV_DIR_FORWARD) {
02385                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
02386                         op_pix = s->dsp.avg_h264_chroma_pixels_tab;
02387                     }
02388                     if (s->mv_dir & MV_DIR_BACKWARD) {
02389                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
02390                     }
02391                 }else{
02392                     op_qpix= s->me.qpel_put;
02393                     if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
02394                         op_pix = s->dsp.put_pixels_tab;
02395                     }else{
02396                         op_pix = s->dsp.put_no_rnd_pixels_tab;
02397                     }
02398                     if (s->mv_dir & MV_DIR_FORWARD) {
02399                         MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
02400                         op_pix = s->dsp.avg_pixels_tab;
02401                         op_qpix= s->me.qpel_avg;
02402                     }
02403                     if (s->mv_dir & MV_DIR_BACKWARD) {
02404                         MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
02405                     }
02406                 }
02407             }
02408 
02409             /* skip dequant / idct if we are really late ;) */
02410             if(s->avctx->skip_idct){
02411                 if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
02412                    ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
02413                    || s->avctx->skip_idct >= AVDISCARD_ALL)
02414                     goto skip_idct;
02415             }
02416 
02417             /* add dct residue */
02418             if(s->encoding || !(   s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
02419                                 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
02420                 add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
02421                 add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
02422                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
02423                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02424 
02425                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02426                     if (s->chroma_y_shift){
02427                         add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02428                         add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02429                     }else{
02430                         dct_linesize >>= 1;
02431                         dct_offset >>=1;
02432                         add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
02433                         add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
02434                         add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02435                         add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02436                     }
02437                 }
02438             } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
02439                 add_dct(s, block[0], 0, dest_y                          , dct_linesize);
02440                 add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
02441                 add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
02442                 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
02443 
02444                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02445                     if(s->chroma_y_shift){//Chroma420
02446                         add_dct(s, block[4], 4, dest_cb, uvlinesize);
02447                         add_dct(s, block[5], 5, dest_cr, uvlinesize);
02448                     }else{
02449                         //chroma422
02450                         dct_linesize = uvlinesize << s->interlaced_dct;
02451                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
02452 
02453                         add_dct(s, block[4], 4, dest_cb, dct_linesize);
02454                         add_dct(s, block[5], 5, dest_cr, dct_linesize);
02455                         add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
02456                         add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
02457                         if(!s->chroma_x_shift){//Chroma444
02458                             add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
02459                             add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
02460                             add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
02461                             add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
02462                         }
02463                     }
02464                 }//fi gray
02465             }
02466             else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02467                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02468             }
02469         } else {
02470             /* dct only in intra block */
02471             if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
02472                 put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
02473                 put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
02474                 put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
02475                 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02476 
02477                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02478                     if(s->chroma_y_shift){
02479                         put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02480                         put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02481                     }else{
02482                         dct_offset >>=1;
02483                         dct_linesize >>=1;
02484                         put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
02485                         put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
02486                         put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02487                         put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02488                     }
02489                 }
02490             }else{
02491                 s->dsp.idct_put(dest_y                          , dct_linesize, block[0]);
02492                 s->dsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
02493                 s->dsp.idct_put(dest_y + dct_offset             , dct_linesize, block[2]);
02494                 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02495 
02496                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02497                     if(s->chroma_y_shift){
02498                         s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02499                         s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02500                     }else{
02501 
02502                         dct_linesize = uvlinesize << s->interlaced_dct;
02503                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
02504 
02505                         s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
02506                         s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
02507                         s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02508                         s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02509                         if(!s->chroma_x_shift){//Chroma444
02510                             s->dsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
02511                             s->dsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
02512                             s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
02513                             s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
02514                         }
02515                     }
02516                 }//gray
02517             }
02518         }
02519 skip_idct:
02520         if(!readable){
02521             s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
02522             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02523             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02524         }
02525     }
02526 }
02527 
02528 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02529 #if !CONFIG_SMALL
02530     if(s->out_format == FMT_MPEG1) {
02531         if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02532         else                 MPV_decode_mb_internal(s, block, 0, 1);
02533     } else
02534 #endif
02535     if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02536     else                  MPV_decode_mb_internal(s, block, 0, 0);
02537 }
02538 
02542 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02543     const int field_pic= s->picture_structure != PICT_FRAME;
02544     if(field_pic){
02545         h <<= 1;
02546         y <<= 1;
02547     }
02548 
02549     if (!s->avctx->hwaccel
02550        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
02551        && s->unrestricted_mv
02552        && s->current_picture.f.reference
02553        && !s->intra_only
02554        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
02555         int sides = 0, edge_h;
02556         int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
02557         int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
02558         if (y==0) sides |= EDGE_TOP;
02559         if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
02560 
02561         edge_h= FFMIN(h, s->v_edge_pos - y);
02562 
02563         s->dsp.draw_edges(s->current_picture_ptr->f.data[0] +  y         *s->linesize,
02564                           s->linesize,           s->h_edge_pos,         edge_h,
02565                           EDGE_WIDTH,            EDGE_WIDTH,            sides);
02566         s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
02567                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
02568                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
02569         s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
02570                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
02571                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
02572     }
02573 
02574     h= FFMIN(h, s->avctx->height - y);
02575 
02576     if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
02577 
02578     if (s->avctx->draw_horiz_band) {
02579         AVFrame *src;
02580         int offset[AV_NUM_DATA_POINTERS];
02581         int i;
02582 
02583         if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02584             src= (AVFrame*)s->current_picture_ptr;
02585         else if(s->last_picture_ptr)
02586             src= (AVFrame*)s->last_picture_ptr;
02587         else
02588             return;
02589 
02590         if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02591             for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
02592                 offset[i] = 0;
02593         }else{
02594             offset[0]= y * s->linesize;
02595             offset[1]=
02596             offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02597             for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
02598                 offset[i] = 0;
02599         }
02600 
02601         emms_c();
02602 
02603         s->avctx->draw_horiz_band(s->avctx, src, offset,
02604                                   y, s->picture_structure, h);
02605     }
02606 }
02607 
02608 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
02609     const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
02610     const int uvlinesize = s->current_picture.f.linesize[1];
02611     const int mb_size= 4 - s->avctx->lowres;
02612 
02613     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
02614     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
02615     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02616     s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02617     s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02618     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02619     //block_index is not used by mpeg2, so it is not affected by chroma_format
02620 
02621     s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) <<  mb_size);
02622     s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02623     s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02624 
02625     if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02626     {
02627         if(s->picture_structure==PICT_FRAME){
02628         s->dest[0] += s->mb_y *   linesize << mb_size;
02629         s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02630         s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02631         }else{
02632             s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
02633             s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02634             s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02635             assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02636         }
02637     }
02638 }
02639 
02640 void ff_mpeg_flush(AVCodecContext *avctx){
02641     int i;
02642     MpegEncContext *s = avctx->priv_data;
02643 
02644     if(s==NULL || s->picture==NULL)
02645         return;
02646 
02647     for(i=0; i<s->picture_count; i++){
02648        if (s->picture[i].f.data[0] &&
02649            (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
02650             s->picture[i].f.type == FF_BUFFER_TYPE_USER))
02651         free_frame_buffer(s, &s->picture[i]);
02652     }
02653     s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02654 
02655     s->mb_x= s->mb_y= 0;
02656 
02657     s->parse_context.state= -1;
02658     s->parse_context.frame_start_found= 0;
02659     s->parse_context.overread= 0;
02660     s->parse_context.overread_index= 0;
02661     s->parse_context.index= 0;
02662     s->parse_context.last_index= 0;
02663     s->bitstream_buffer_size=0;
02664     s->pp_time=0;
02665 }
02666 
02667 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02668                                    DCTELEM *block, int n, int qscale)
02669 {
02670     int i, level, nCoeffs;
02671     const uint16_t *quant_matrix;
02672 
02673     nCoeffs= s->block_last_index[n];
02674 
02675     if (n < 4)
02676         block[0] = block[0] * s->y_dc_scale;
02677     else
02678         block[0] = block[0] * s->c_dc_scale;
02679     /* XXX: only mpeg1 */
02680     quant_matrix = s->intra_matrix;
02681     for(i=1;i<=nCoeffs;i++) {
02682         int j= s->intra_scantable.permutated[i];
02683         level = block[j];
02684         if (level) {
02685             if (level < 0) {
02686                 level = -level;
02687                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02688                 level = (level - 1) | 1;
02689                 level = -level;
02690             } else {
02691                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02692                 level = (level - 1) | 1;
02693             }
02694             block[j] = level;
02695         }
02696     }
02697 }
02698 
02699 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02700                                    DCTELEM *block, int n, int qscale)
02701 {
02702     int i, level, nCoeffs;
02703     const uint16_t *quant_matrix;
02704 
02705     nCoeffs= s->block_last_index[n];
02706 
02707     quant_matrix = s->inter_matrix;
02708     for(i=0; i<=nCoeffs; i++) {
02709         int j= s->intra_scantable.permutated[i];
02710         level = block[j];
02711         if (level) {
02712             if (level < 0) {
02713                 level = -level;
02714                 level = (((level << 1) + 1) * qscale *
02715                          ((int) (quant_matrix[j]))) >> 4;
02716                 level = (level - 1) | 1;
02717                 level = -level;
02718             } else {
02719                 level = (((level << 1) + 1) * qscale *
02720                          ((int) (quant_matrix[j]))) >> 4;
02721                 level = (level - 1) | 1;
02722             }
02723             block[j] = level;
02724         }
02725     }
02726 }
02727 
02728 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02729                                    DCTELEM *block, int n, int qscale)
02730 {
02731     int i, level, nCoeffs;
02732     const uint16_t *quant_matrix;
02733 
02734     if(s->alternate_scan) nCoeffs= 63;
02735     else nCoeffs= s->block_last_index[n];
02736 
02737     if (n < 4)
02738         block[0] = block[0] * s->y_dc_scale;
02739     else
02740         block[0] = block[0] * s->c_dc_scale;
02741     quant_matrix = s->intra_matrix;
02742     for(i=1;i<=nCoeffs;i++) {
02743         int j= s->intra_scantable.permutated[i];
02744         level = block[j];
02745         if (level) {
02746             if (level < 0) {
02747                 level = -level;
02748                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02749                 level = -level;
02750             } else {
02751                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02752             }
02753             block[j] = level;
02754         }
02755     }
02756 }
02757 
02758 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02759                                    DCTELEM *block, int n, int qscale)
02760 {
02761     int i, level, nCoeffs;
02762     const uint16_t *quant_matrix;
02763     int sum=-1;
02764 
02765     if(s->alternate_scan) nCoeffs= 63;
02766     else nCoeffs= s->block_last_index[n];
02767 
02768     if (n < 4)
02769         block[0] = block[0] * s->y_dc_scale;
02770     else
02771         block[0] = block[0] * s->c_dc_scale;
02772     quant_matrix = s->intra_matrix;
02773     for(i=1;i<=nCoeffs;i++) {
02774         int j= s->intra_scantable.permutated[i];
02775         level = block[j];
02776         if (level) {
02777             if (level < 0) {
02778                 level = -level;
02779                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02780                 level = -level;
02781             } else {
02782                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02783             }
02784             block[j] = level;
02785             sum+=level;
02786         }
02787     }
02788     block[63]^=sum&1;
02789 }
02790 
02791 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02792                                    DCTELEM *block, int n, int qscale)
02793 {
02794     int i, level, nCoeffs;
02795     const uint16_t *quant_matrix;
02796     int sum=-1;
02797 
02798     if(s->alternate_scan) nCoeffs= 63;
02799     else nCoeffs= s->block_last_index[n];
02800 
02801     quant_matrix = s->inter_matrix;
02802     for(i=0; i<=nCoeffs; i++) {
02803         int j= s->intra_scantable.permutated[i];
02804         level = block[j];
02805         if (level) {
02806             if (level < 0) {
02807                 level = -level;
02808                 level = (((level << 1) + 1) * qscale *
02809                          ((int) (quant_matrix[j]))) >> 4;
02810                 level = -level;
02811             } else {
02812                 level = (((level << 1) + 1) * qscale *
02813                          ((int) (quant_matrix[j]))) >> 4;
02814             }
02815             block[j] = level;
02816             sum+=level;
02817         }
02818     }
02819     block[63]^=sum&1;
02820 }
02821 
02822 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02823                                   DCTELEM *block, int n, int qscale)
02824 {
02825     int i, level, qmul, qadd;
02826     int nCoeffs;
02827 
02828     assert(s->block_last_index[n]>=0);
02829 
02830     qmul = qscale << 1;
02831 
02832     if (!s->h263_aic) {
02833         if (n < 4)
02834             block[0] = block[0] * s->y_dc_scale;
02835         else
02836             block[0] = block[0] * s->c_dc_scale;
02837         qadd = (qscale - 1) | 1;
02838     }else{
02839         qadd = 0;
02840     }
02841     if(s->ac_pred)
02842         nCoeffs=63;
02843     else
02844         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02845 
02846     for(i=1; i<=nCoeffs; i++) {
02847         level = block[i];
02848         if (level) {
02849             if (level < 0) {
02850                 level = level * qmul - qadd;
02851             } else {
02852                 level = level * qmul + qadd;
02853             }
02854             block[i] = level;
02855         }
02856     }
02857 }
02858 
02859 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02860                                   DCTELEM *block, int n, int qscale)
02861 {
02862     int i, level, qmul, qadd;
02863     int nCoeffs;
02864 
02865     assert(s->block_last_index[n]>=0);
02866 
02867     qadd = (qscale - 1) | 1;
02868     qmul = qscale << 1;
02869 
02870     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02871 
02872     for(i=0; i<=nCoeffs; i++) {
02873         level = block[i];
02874         if (level) {
02875             if (level < 0) {
02876                 level = level * qmul - qadd;
02877             } else {
02878                 level = level * qmul + qadd;
02879             }
02880             block[i] = level;
02881         }
02882     }
02883 }
02884 
02888 void ff_set_qscale(MpegEncContext * s, int qscale)
02889 {
02890     if (qscale < 1)
02891         qscale = 1;
02892     else if (qscale > 31)
02893         qscale = 31;
02894 
02895     s->qscale = qscale;
02896     s->chroma_qscale= s->chroma_qscale_table[qscale];
02897 
02898     s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02899     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02900 }
02901 
02902 void MPV_report_decode_progress(MpegEncContext *s)
02903 {
02904     if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
02905         ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
02906 }