• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • File List
  • Globals

libavcodec/mpegvideo.c

Go to the documentation of this file.
00001 /*
00002  * The simplest mpeg encoder (well, it was the simplest!)
00003  * Copyright (c) 2000,2001 Fabrice Bellard
00004  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
00005  *
00006  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
00007  *
00008  * This file is part of FFmpeg.
00009  *
00010  * FFmpeg is free software; you can redistribute it and/or
00011  * modify it under the terms of the GNU Lesser General Public
00012  * License as published by the Free Software Foundation; either
00013  * version 2.1 of the License, or (at your option) any later version.
00014  *
00015  * FFmpeg is distributed in the hope that it will be useful,
00016  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00017  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00018  * Lesser General Public License for more details.
00019  *
00020  * You should have received a copy of the GNU Lesser General Public
00021  * License along with FFmpeg; if not, write to the Free Software
00022  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00023  */
00024 
00030 #include "libavutil/intmath.h"
00031 #include "avcodec.h"
00032 #include "dsputil.h"
00033 #include "mpegvideo.h"
00034 #include "mpegvideo_common.h"
00035 #include "mjpegenc.h"
00036 #include "msmpeg4.h"
00037 #include "faandct.h"
00038 #include "xvmc_internal.h"
00039 #include <limits.h>
00040 
00041 //#undef NDEBUG
00042 //#include <assert.h>
00043 
00044 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00045                                    DCTELEM *block, int n, int qscale);
00046 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00047                                    DCTELEM *block, int n, int qscale);
00048 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00049                                    DCTELEM *block, int n, int qscale);
00050 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00051                                    DCTELEM *block, int n, int qscale);
00052 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00053                                    DCTELEM *block, int n, int qscale);
00054 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00055                                   DCTELEM *block, int n, int qscale);
00056 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00057                                   DCTELEM *block, int n, int qscale);
00058 
00059 
00060 /* enable all paranoid tests for rounding, overflows, etc... */
00061 //#define PARANOID
00062 
00063 //#define DEBUG
00064 
00065 
00066 static const uint8_t ff_default_chroma_qscale_table[32]={
00067 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00068     0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
00069 };
00070 
00071 const uint8_t ff_mpeg1_dc_scale_table[128]={
00072 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00073     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00074     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00075     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00076     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00077 };
00078 
00079 static const uint8_t mpeg2_dc_scale_table1[128]={
00080 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00081     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00082     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00083     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00084     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00085 };
00086 
00087 static const uint8_t mpeg2_dc_scale_table2[128]={
00088 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00089     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00090     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00091     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00092     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00093 };
00094 
00095 static const uint8_t mpeg2_dc_scale_table3[128]={
00096 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00097     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00098     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00099     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00100     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00101 };
00102 
00103 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
00104     ff_mpeg1_dc_scale_table,
00105     mpeg2_dc_scale_table1,
00106     mpeg2_dc_scale_table2,
00107     mpeg2_dc_scale_table3,
00108 };
00109 
00110 const enum PixelFormat ff_pixfmt_list_420[] = {
00111     PIX_FMT_YUV420P,
00112     PIX_FMT_NONE
00113 };
00114 
00115 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00116     PIX_FMT_DXVA2_VLD,
00117     PIX_FMT_VAAPI_VLD,
00118     PIX_FMT_YUV420P,
00119     PIX_FMT_NONE
00120 };
00121 
00122 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
00123     int i;
00124 
00125     assert(p<=end);
00126     if(p>=end)
00127         return end;
00128 
00129     for(i=0; i<3; i++){
00130         uint32_t tmp= *state << 8;
00131         *state= tmp + *(p++);
00132         if(tmp == 0x100 || p==end)
00133             return p;
00134     }
00135 
00136     while(p<end){
00137         if     (p[-1] > 1      ) p+= 3;
00138         else if(p[-2]          ) p+= 2;
00139         else if(p[-3]|(p[-1]-1)) p++;
00140         else{
00141             p++;
00142             break;
00143         }
00144     }
00145 
00146     p= FFMIN(p, end)-4;
00147     *state= AV_RB32(p);
00148 
00149     return p+4;
00150 }
00151 
00152 /* init common dct for both encoder and decoder */
00153 av_cold int ff_dct_common_init(MpegEncContext *s)
00154 {
00155     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00156     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00157     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00158     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00159     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00160     if(s->flags & CODEC_FLAG_BITEXACT)
00161         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00162     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00163 
00164 #if   HAVE_MMX
00165     MPV_common_init_mmx(s);
00166 #elif ARCH_ALPHA
00167     MPV_common_init_axp(s);
00168 #elif CONFIG_MLIB
00169     MPV_common_init_mlib(s);
00170 #elif HAVE_MMI
00171     MPV_common_init_mmi(s);
00172 #elif ARCH_ARM
00173     MPV_common_init_arm(s);
00174 #elif HAVE_ALTIVEC
00175     MPV_common_init_altivec(s);
00176 #elif ARCH_BFIN
00177     MPV_common_init_bfin(s);
00178 #endif
00179 
00180     /* load & permutate scantables
00181        note: only wmv uses different ones
00182     */
00183     if(s->alternate_scan){
00184         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
00185         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
00186     }else{
00187         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
00188         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
00189     }
00190     ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00191     ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00192 
00193     return 0;
00194 }
00195 
00196 void ff_copy_picture(Picture *dst, Picture *src){
00197     *dst = *src;
00198     dst->type= FF_BUFFER_TYPE_COPY;
00199 }
00200 
00204 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00205 {
00206     s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
00207     av_freep(&pic->hwaccel_picture_private);
00208 }
00209 
00213 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00214 {
00215     int r;
00216 
00217     if (s->avctx->hwaccel) {
00218         assert(!pic->hwaccel_picture_private);
00219         if (s->avctx->hwaccel->priv_data_size) {
00220             pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00221             if (!pic->hwaccel_picture_private) {
00222                 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00223                 return -1;
00224             }
00225         }
00226     }
00227 
00228     r = s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
00229 
00230     if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
00231         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
00232         av_freep(&pic->hwaccel_picture_private);
00233         return -1;
00234     }
00235 
00236     if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
00237         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
00238         free_frame_buffer(s, pic);
00239         return -1;
00240     }
00241 
00242     if (pic->linesize[1] != pic->linesize[2]) {
00243         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
00244         free_frame_buffer(s, pic);
00245         return -1;
00246     }
00247 
00248     return 0;
00249 }
00250 
00255 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
00256     const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
00257     const int mb_array_size= s->mb_stride*s->mb_height;
00258     const int b8_array_size= s->b8_stride*s->mb_height*2;
00259     const int b4_array_size= s->b4_stride*s->mb_height*4;
00260     int i;
00261     int r= -1;
00262 
00263     if(shared){
00264         assert(pic->data[0]);
00265         assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
00266         pic->type= FF_BUFFER_TYPE_SHARED;
00267     }else{
00268         assert(!pic->data[0]);
00269 
00270         if (alloc_frame_buffer(s, pic) < 0)
00271             return -1;
00272 
00273         s->linesize  = pic->linesize[0];
00274         s->uvlinesize= pic->linesize[1];
00275     }
00276 
00277     if(pic->qscale_table==NULL){
00278         if (s->encoding) {
00279             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var   , mb_array_size * sizeof(int16_t)  , fail)
00280             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t)  , fail)
00281             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean  , mb_array_size * sizeof(int8_t )  , fail)
00282         }
00283 
00284         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
00285         FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table , mb_array_size * sizeof(uint8_t)  , fail)
00286         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
00287         pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
00288         if(s->out_format == FMT_H264){
00289             for(i=0; i<2; i++){
00290                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4)  * sizeof(int16_t), fail)
00291                 pic->motion_val[i]= pic->motion_val_base[i]+4;
00292                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
00293             }
00294             pic->motion_subsample_log2= 2;
00295         }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
00296             for(i=0; i<2; i++){
00297                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
00298                 pic->motion_val[i]= pic->motion_val_base[i]+4;
00299                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
00300             }
00301             pic->motion_subsample_log2= 3;
00302         }
00303         if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00304             FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
00305         }
00306         pic->qstride= s->mb_stride;
00307         FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
00308     }
00309 
00310     /* It might be nicer if the application would keep track of these
00311      * but it would require an API change. */
00312     memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
00313     s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type;
00314     if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
00315         pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
00316 
00317     return 0;
00318 fail: //for the FF_ALLOCZ_OR_GOTO macro
00319     if(r>=0)
00320         free_frame_buffer(s, pic);
00321     return -1;
00322 }
00323 
00327 static void free_picture(MpegEncContext *s, Picture *pic){
00328     int i;
00329 
00330     if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
00331         free_frame_buffer(s, pic);
00332     }
00333 
00334     av_freep(&pic->mb_var);
00335     av_freep(&pic->mc_mb_var);
00336     av_freep(&pic->mb_mean);
00337     av_freep(&pic->mbskip_table);
00338     av_freep(&pic->qscale_table);
00339     av_freep(&pic->mb_type_base);
00340     av_freep(&pic->dct_coeff);
00341     av_freep(&pic->pan_scan);
00342     pic->mb_type= NULL;
00343     for(i=0; i<2; i++){
00344         av_freep(&pic->motion_val_base[i]);
00345         av_freep(&pic->ref_index[i]);
00346     }
00347 
00348     if(pic->type == FF_BUFFER_TYPE_SHARED){
00349         for(i=0; i<4; i++){
00350             pic->base[i]=
00351             pic->data[i]= NULL;
00352         }
00353         pic->type= 0;
00354     }
00355 }
00356 
00357 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
00358     int i;
00359 
00360     // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
00361     FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
00362     s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
00363 
00364      //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
00365     FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,  (s->width+64)*4*16*2*sizeof(uint8_t), fail)
00366     s->me.temp=         s->me.scratchpad;
00367     s->rd_scratchpad=   s->me.scratchpad;
00368     s->b_scratchpad=    s->me.scratchpad;
00369     s->obmc_scratchpad= s->me.scratchpad + 16;
00370     if (s->encoding) {
00371         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map      , ME_MAP_SIZE*sizeof(uint32_t), fail)
00372         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
00373         if(s->avctx->noise_reduction){
00374             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
00375         }
00376     }
00377     FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
00378     s->block= s->blocks[0];
00379 
00380     for(i=0;i<12;i++){
00381         s->pblocks[i] = &s->block[i];
00382     }
00383     return 0;
00384 fail:
00385     return -1; //free() through MPV_common_end()
00386 }
00387 
00388 static void free_duplicate_context(MpegEncContext *s){
00389     if(s==NULL) return;
00390 
00391     av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
00392     av_freep(&s->me.scratchpad);
00393     s->me.temp=
00394     s->rd_scratchpad=
00395     s->b_scratchpad=
00396     s->obmc_scratchpad= NULL;
00397 
00398     av_freep(&s->dct_error_sum);
00399     av_freep(&s->me.map);
00400     av_freep(&s->me.score_map);
00401     av_freep(&s->blocks);
00402     s->block= NULL;
00403 }
00404 
00405 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
00406 #define COPY(a) bak->a= src->a
00407     COPY(allocated_edge_emu_buffer);
00408     COPY(edge_emu_buffer);
00409     COPY(me.scratchpad);
00410     COPY(me.temp);
00411     COPY(rd_scratchpad);
00412     COPY(b_scratchpad);
00413     COPY(obmc_scratchpad);
00414     COPY(me.map);
00415     COPY(me.score_map);
00416     COPY(blocks);
00417     COPY(block);
00418     COPY(start_mb_y);
00419     COPY(end_mb_y);
00420     COPY(me.map_generation);
00421     COPY(pb);
00422     COPY(dct_error_sum);
00423     COPY(dct_count[0]);
00424     COPY(dct_count[1]);
00425 #undef COPY
00426 }
00427 
00428 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
00429     MpegEncContext bak;
00430     int i;
00431     //FIXME copy only needed parts
00432 //START_TIMER
00433     backup_duplicate_context(&bak, dst);
00434     memcpy(dst, src, sizeof(MpegEncContext));
00435     backup_duplicate_context(dst, &bak);
00436     for(i=0;i<12;i++){
00437         dst->pblocks[i] = &dst->block[i];
00438     }
00439 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
00440 }
00441 
00446 void MPV_common_defaults(MpegEncContext *s){
00447     s->y_dc_scale_table=
00448     s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
00449     s->chroma_qscale_table= ff_default_chroma_qscale_table;
00450     s->progressive_frame= 1;
00451     s->progressive_sequence= 1;
00452     s->picture_structure= PICT_FRAME;
00453 
00454     s->coded_picture_number = 0;
00455     s->picture_number = 0;
00456     s->input_picture_number = 0;
00457 
00458     s->picture_in_gop_number = 0;
00459 
00460     s->f_code = 1;
00461     s->b_code = 1;
00462 }
00463 
00468 void MPV_decode_defaults(MpegEncContext *s){
00469     MPV_common_defaults(s);
00470 }
00471 
00476 av_cold int MPV_common_init(MpegEncContext *s)
00477 {
00478     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
00479 
00480     if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00481         s->mb_height = (s->height + 31) / 32 * 2;
00482     else
00483         s->mb_height = (s->height + 15) / 16;
00484 
00485     if(s->avctx->pix_fmt == PIX_FMT_NONE){
00486         av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
00487         return -1;
00488     }
00489 
00490     if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
00491         av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
00492         return -1;
00493     }
00494 
00495     if((s->width || s->height) && avcodec_check_dimensions(s->avctx, s->width, s->height))
00496         return -1;
00497 
00498     dsputil_init(&s->dsp, s->avctx);
00499     ff_dct_common_init(s);
00500 
00501     s->flags= s->avctx->flags;
00502     s->flags2= s->avctx->flags2;
00503 
00504     s->mb_width  = (s->width  + 15) / 16;
00505     s->mb_stride = s->mb_width + 1;
00506     s->b8_stride = s->mb_width*2 + 1;
00507     s->b4_stride = s->mb_width*4 + 1;
00508     mb_array_size= s->mb_height * s->mb_stride;
00509     mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
00510 
00511     /* set chroma shifts */
00512     avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
00513                                                     &(s->chroma_y_shift) );
00514 
00515     /* set default edge pos, will be overriden in decode_header if needed */
00516     s->h_edge_pos= s->mb_width*16;
00517     s->v_edge_pos= s->mb_height*16;
00518 
00519     s->mb_num = s->mb_width * s->mb_height;
00520 
00521     s->block_wrap[0]=
00522     s->block_wrap[1]=
00523     s->block_wrap[2]=
00524     s->block_wrap[3]= s->b8_stride;
00525     s->block_wrap[4]=
00526     s->block_wrap[5]= s->mb_stride;
00527 
00528     y_size = s->b8_stride * (2 * s->mb_height + 1);
00529     c_size = s->mb_stride * (s->mb_height + 1);
00530     yc_size = y_size + 2 * c_size;
00531 
00532     /* convert fourcc to upper case */
00533     s->codec_tag=          toupper( s->avctx->codec_tag     &0xFF)
00534                         + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
00535                         + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
00536                         + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
00537 
00538     s->stream_codec_tag=          toupper( s->avctx->stream_codec_tag     &0xFF)
00539                                + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
00540                                + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
00541                                + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
00542 
00543     s->avctx->coded_frame= (AVFrame*)&s->current_picture;
00544 
00545     FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
00546     for(y=0; y<s->mb_height; y++){
00547         for(x=0; x<s->mb_width; x++){
00548             s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
00549         }
00550     }
00551     s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
00552 
00553     if (s->encoding) {
00554         /* Allocate MV tables */
00555         FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base            , mv_table_size * 2 * sizeof(int16_t), fail)
00556         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
00557         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
00558         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00559         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00560         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base     , mv_table_size * 2 * sizeof(int16_t), fail)
00561         s->p_mv_table           = s->p_mv_table_base            + s->mb_stride + 1;
00562         s->b_forw_mv_table      = s->b_forw_mv_table_base       + s->mb_stride + 1;
00563         s->b_back_mv_table      = s->b_back_mv_table_base       + s->mb_stride + 1;
00564         s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
00565         s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
00566         s->b_direct_mv_table    = s->b_direct_mv_table_base     + s->mb_stride + 1;
00567 
00568         if(s->msmpeg4_version){
00569             FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
00570         }
00571         FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00572 
00573         /* Allocate MB type table */
00574         FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type  , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
00575 
00576         FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
00577 
00578         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix  , 64*32   * sizeof(int), fail)
00579         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix  , 64*32   * sizeof(int), fail)
00580         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00581         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00582         FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00583         FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00584 
00585         if(s->avctx->noise_reduction){
00586             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
00587         }
00588     }
00589     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, MAX_PICTURE_COUNT * sizeof(Picture), fail)
00590     for(i = 0; i < MAX_PICTURE_COUNT; i++) {
00591         avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
00592     }
00593 
00594     FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
00595 
00596     if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
00597         /* interlaced direct mode decoding tables */
00598             for(i=0; i<2; i++){
00599                 int j, k;
00600                 for(j=0; j<2; j++){
00601                     for(k=0; k<2; k++){
00602                         FF_ALLOCZ_OR_GOTO(s->avctx,    s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
00603                         s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
00604                     }
00605                     FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
00606                     FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
00607                     s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
00608                 }
00609                 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
00610             }
00611     }
00612     if (s->out_format == FMT_H263) {
00613         /* ac values */
00614         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
00615         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00616         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00617         s->ac_val[2] = s->ac_val[1] + c_size;
00618 
00619         /* cbp values */
00620         FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00621         s->coded_block= s->coded_block_base + s->b8_stride + 1;
00622 
00623         /* cbp, ac_pred, pred_dir */
00624         FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table     , mb_array_size * sizeof(uint8_t), fail)
00625         FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
00626     }
00627 
00628     if (s->h263_pred || s->h263_plus || !s->encoding) {
00629         /* dc values */
00630         //MN: we need these for error resilience of intra-frames
00631         FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
00632         s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00633         s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00634         s->dc_val[2] = s->dc_val[1] + c_size;
00635         for(i=0;i<yc_size;i++)
00636             s->dc_val_base[i] = 1024;
00637     }
00638 
00639     /* which mb is a intra block */
00640     FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00641     memset(s->mbintra_table, 1, mb_array_size);
00642 
00643     /* init macroblock skip table */
00644     FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
00645     //Note the +1 is for a quicker mpeg4 slice_end detection
00646     FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
00647 
00648     s->parse_context.state= -1;
00649     if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
00650        s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00651        s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00652        s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00653     }
00654 
00655     s->context_initialized = 1;
00656 
00657     s->thread_context[0]= s;
00658     threads = s->avctx->thread_count;
00659 
00660     for(i=1; i<threads; i++){
00661         s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
00662         memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00663     }
00664 
00665     for(i=0; i<threads; i++){
00666         if(init_duplicate_context(s->thread_context[i], s) < 0)
00667            goto fail;
00668         s->thread_context[i]->start_mb_y= (s->mb_height*(i  ) + s->avctx->thread_count/2) / s->avctx->thread_count;
00669         s->thread_context[i]->end_mb_y  = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
00670     }
00671 
00672     return 0;
00673  fail:
00674     MPV_common_end(s);
00675     return -1;
00676 }
00677 
00678 /* init common structure for both encoder and decoder */
00679 void MPV_common_end(MpegEncContext *s)
00680 {
00681     int i, j, k;
00682 
00683     for(i=0; i<s->avctx->thread_count; i++){
00684         free_duplicate_context(s->thread_context[i]);
00685     }
00686     for(i=1; i<s->avctx->thread_count; i++){
00687         av_freep(&s->thread_context[i]);
00688     }
00689 
00690     av_freep(&s->parse_context.buffer);
00691     s->parse_context.buffer_size=0;
00692 
00693     av_freep(&s->mb_type);
00694     av_freep(&s->p_mv_table_base);
00695     av_freep(&s->b_forw_mv_table_base);
00696     av_freep(&s->b_back_mv_table_base);
00697     av_freep(&s->b_bidir_forw_mv_table_base);
00698     av_freep(&s->b_bidir_back_mv_table_base);
00699     av_freep(&s->b_direct_mv_table_base);
00700     s->p_mv_table= NULL;
00701     s->b_forw_mv_table= NULL;
00702     s->b_back_mv_table= NULL;
00703     s->b_bidir_forw_mv_table= NULL;
00704     s->b_bidir_back_mv_table= NULL;
00705     s->b_direct_mv_table= NULL;
00706     for(i=0; i<2; i++){
00707         for(j=0; j<2; j++){
00708             for(k=0; k<2; k++){
00709                 av_freep(&s->b_field_mv_table_base[i][j][k]);
00710                 s->b_field_mv_table[i][j][k]=NULL;
00711             }
00712             av_freep(&s->b_field_select_table[i][j]);
00713             av_freep(&s->p_field_mv_table_base[i][j]);
00714             s->p_field_mv_table[i][j]=NULL;
00715         }
00716         av_freep(&s->p_field_select_table[i]);
00717     }
00718 
00719     av_freep(&s->dc_val_base);
00720     av_freep(&s->ac_val_base);
00721     av_freep(&s->coded_block_base);
00722     av_freep(&s->mbintra_table);
00723     av_freep(&s->cbp_table);
00724     av_freep(&s->pred_dir_table);
00725 
00726     av_freep(&s->mbskip_table);
00727     av_freep(&s->prev_pict_types);
00728     av_freep(&s->bitstream_buffer);
00729     s->allocated_bitstream_buffer_size=0;
00730 
00731     av_freep(&s->avctx->stats_out);
00732     av_freep(&s->ac_stats);
00733     av_freep(&s->error_status_table);
00734     av_freep(&s->mb_index2xy);
00735     av_freep(&s->lambda_table);
00736     av_freep(&s->q_intra_matrix);
00737     av_freep(&s->q_inter_matrix);
00738     av_freep(&s->q_intra_matrix16);
00739     av_freep(&s->q_inter_matrix16);
00740     av_freep(&s->input_picture);
00741     av_freep(&s->reordered_input_picture);
00742     av_freep(&s->dct_offset);
00743 
00744     if(s->picture){
00745         for(i=0; i<MAX_PICTURE_COUNT; i++){
00746             free_picture(s, &s->picture[i]);
00747         }
00748     }
00749     av_freep(&s->picture);
00750     s->context_initialized = 0;
00751     s->last_picture_ptr=
00752     s->next_picture_ptr=
00753     s->current_picture_ptr= NULL;
00754     s->linesize= s->uvlinesize= 0;
00755 
00756     for(i=0; i<3; i++)
00757         av_freep(&s->visualization_buffer[i]);
00758 
00759     avcodec_default_free_buffers(s->avctx);
00760 }
00761 
00762 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
00763 {
00764     int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
00765     uint8_t index_run[MAX_RUN+1];
00766     int last, run, level, start, end, i;
00767 
00768     /* If table is static, we can quit if rl->max_level[0] is not NULL */
00769     if(static_store && rl->max_level[0])
00770         return;
00771 
00772     /* compute max_level[], max_run[] and index_run[] */
00773     for(last=0;last<2;last++) {
00774         if (last == 0) {
00775             start = 0;
00776             end = rl->last;
00777         } else {
00778             start = rl->last;
00779             end = rl->n;
00780         }
00781 
00782         memset(max_level, 0, MAX_RUN + 1);
00783         memset(max_run, 0, MAX_LEVEL + 1);
00784         memset(index_run, rl->n, MAX_RUN + 1);
00785         for(i=start;i<end;i++) {
00786             run = rl->table_run[i];
00787             level = rl->table_level[i];
00788             if (index_run[run] == rl->n)
00789                 index_run[run] = i;
00790             if (level > max_level[run])
00791                 max_level[run] = level;
00792             if (run > max_run[level])
00793                 max_run[level] = run;
00794         }
00795         if(static_store)
00796             rl->max_level[last] = static_store[last];
00797         else
00798             rl->max_level[last] = av_malloc(MAX_RUN + 1);
00799         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
00800         if(static_store)
00801             rl->max_run[last] = static_store[last] + MAX_RUN + 1;
00802         else
00803             rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
00804         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
00805         if(static_store)
00806             rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
00807         else
00808             rl->index_run[last] = av_malloc(MAX_RUN + 1);
00809         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
00810     }
00811 }
00812 
00813 void init_vlc_rl(RLTable *rl)
00814 {
00815     int i, q;
00816 
00817     for(q=0; q<32; q++){
00818         int qmul= q*2;
00819         int qadd= (q-1)|1;
00820 
00821         if(q==0){
00822             qmul=1;
00823             qadd=0;
00824         }
00825         for(i=0; i<rl->vlc.table_size; i++){
00826             int code= rl->vlc.table[i][0];
00827             int len = rl->vlc.table[i][1];
00828             int level, run;
00829 
00830             if(len==0){ // illegal code
00831                 run= 66;
00832                 level= MAX_LEVEL;
00833             }else if(len<0){ //more bits needed
00834                 run= 0;
00835                 level= code;
00836             }else{
00837                 if(code==rl->n){ //esc
00838                     run= 66;
00839                     level= 0;
00840                 }else{
00841                     run=   rl->table_run  [code] + 1;
00842                     level= rl->table_level[code] * qmul + qadd;
00843                     if(code >= rl->last) run+=192;
00844                 }
00845             }
00846             rl->rl_vlc[q][i].len= len;
00847             rl->rl_vlc[q][i].level= level;
00848             rl->rl_vlc[q][i].run= run;
00849         }
00850     }
00851 }
00852 
00853 int ff_find_unused_picture(MpegEncContext *s, int shared){
00854     int i;
00855 
00856     if(shared){
00857         for(i=0; i<MAX_PICTURE_COUNT; i++){
00858             if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
00859         }
00860     }else{
00861         for(i=0; i<MAX_PICTURE_COUNT; i++){
00862             if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
00863         }
00864         for(i=0; i<MAX_PICTURE_COUNT; i++){
00865             if(s->picture[i].data[0]==NULL) return i;
00866         }
00867     }
00868 
00869     av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
00870     /* We could return -1, but the codec would crash trying to draw into a
00871      * non-existing frame anyway. This is safer than waiting for a random crash.
00872      * Also the return of this is never useful, an encoder must only allocate
00873      * as much as allowed in the specification. This has no relationship to how
00874      * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
00875      * enough for such valid streams).
00876      * Plus, a decoder has to check stream validity and remove frames if too
00877      * many reference frames are around. Waiting for "OOM" is not correct at
00878      * all. Similarly, missing reference frames have to be replaced by
00879      * interpolated/MC frames, anything else is a bug in the codec ...
00880      */
00881     abort();
00882     return -1;
00883 }
00884 
00885 static void update_noise_reduction(MpegEncContext *s){
00886     int intra, i;
00887 
00888     for(intra=0; intra<2; intra++){
00889         if(s->dct_count[intra] > (1<<16)){
00890             for(i=0; i<64; i++){
00891                 s->dct_error_sum[intra][i] >>=1;
00892             }
00893             s->dct_count[intra] >>= 1;
00894         }
00895 
00896         for(i=0; i<64; i++){
00897             s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
00898         }
00899     }
00900 }
00901 
00905 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
00906 {
00907     int i;
00908     Picture *pic;
00909     s->mb_skipped = 0;
00910 
00911     assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
00912 
00913     /* mark&release old frames */
00914     if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
00915       if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
00916           free_frame_buffer(s, s->last_picture_ptr);
00917 
00918         /* release forgotten pictures */
00919         /* if(mpeg124/h263) */
00920         if(!s->encoding){
00921             for(i=0; i<MAX_PICTURE_COUNT; i++){
00922                 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
00923                     av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
00924                     free_frame_buffer(s, &s->picture[i]);
00925                 }
00926             }
00927         }
00928       }
00929     }
00930 
00931     if(!s->encoding){
00932         /* release non reference frames */
00933         for(i=0; i<MAX_PICTURE_COUNT; i++){
00934             if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
00935                 free_frame_buffer(s, &s->picture[i]);
00936             }
00937         }
00938 
00939         if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
00940             pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
00941         else{
00942             i= ff_find_unused_picture(s, 0);
00943             pic= &s->picture[i];
00944         }
00945 
00946         pic->reference= 0;
00947         if (!s->dropable){
00948             if (s->codec_id == CODEC_ID_H264)
00949                 pic->reference = s->picture_structure;
00950             else if (s->pict_type != FF_B_TYPE)
00951                 pic->reference = 3;
00952         }
00953 
00954         pic->coded_picture_number= s->coded_picture_number++;
00955 
00956         if(ff_alloc_picture(s, pic, 0) < 0)
00957             return -1;
00958 
00959         s->current_picture_ptr= pic;
00960         s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic
00961         s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
00962     }
00963 
00964     s->current_picture_ptr->pict_type= s->pict_type;
00965 //    if(s->flags && CODEC_FLAG_QSCALE)
00966   //      s->current_picture_ptr->quality= s->new_picture_ptr->quality;
00967     s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
00968 
00969     ff_copy_picture(&s->current_picture, s->current_picture_ptr);
00970 
00971     if (s->pict_type != FF_B_TYPE) {
00972         s->last_picture_ptr= s->next_picture_ptr;
00973         if(!s->dropable)
00974             s->next_picture_ptr= s->current_picture_ptr;
00975     }
00976 /*    av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
00977         s->last_picture_ptr    ? s->last_picture_ptr->data[0] : NULL,
00978         s->next_picture_ptr    ? s->next_picture_ptr->data[0] : NULL,
00979         s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
00980         s->pict_type, s->dropable);*/
00981 
00982     if(s->codec_id != CODEC_ID_H264){
00983         if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && s->pict_type!=FF_I_TYPE){
00984             av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
00985             /* Allocate a dummy frame */
00986             i= ff_find_unused_picture(s, 0);
00987             s->last_picture_ptr= &s->picture[i];
00988             if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
00989                 return -1;
00990         }
00991         if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==FF_B_TYPE){
00992             /* Allocate a dummy frame */
00993             i= ff_find_unused_picture(s, 0);
00994             s->next_picture_ptr= &s->picture[i];
00995             if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
00996                 return -1;
00997         }
00998     }
00999 
01000     if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01001     if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01002 
01003     assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
01004 
01005     if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
01006         int i;
01007         for(i=0; i<4; i++){
01008             if(s->picture_structure == PICT_BOTTOM_FIELD){
01009                  s->current_picture.data[i] += s->current_picture.linesize[i];
01010             }
01011             s->current_picture.linesize[i] *= 2;
01012             s->last_picture.linesize[i] *=2;
01013             s->next_picture.linesize[i] *=2;
01014         }
01015     }
01016 
01017     s->hurry_up= s->avctx->hurry_up;
01018     s->error_recognition= avctx->error_recognition;
01019 
01020     /* set dequantizer, we can't do it during init as it might change for mpeg4
01021        and we can't do it in the header decode as init is not called for mpeg4 there yet */
01022     if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
01023         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01024         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01025     }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
01026         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01027         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01028     }else{
01029         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01030         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01031     }
01032 
01033     if(s->dct_error_sum){
01034         assert(s->avctx->noise_reduction && s->encoding);
01035 
01036         update_noise_reduction(s);
01037     }
01038 
01039     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01040         return ff_xvmc_field_start(s, avctx);
01041 
01042     return 0;
01043 }
01044 
01045 /* generic function for encode/decode called after a frame has been coded/decoded */
01046 void MPV_frame_end(MpegEncContext *s)
01047 {
01048     int i;
01049     /* draw edge for correct motion prediction if outside */
01050     //just to make sure that all data is rendered.
01051     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
01052         ff_xvmc_field_end(s);
01053     }else if(!s->avctx->hwaccel
01054        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
01055        && s->unrestricted_mv
01056        && s->current_picture.reference
01057        && !s->intra_only
01058        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
01059             s->dsp.draw_edges(s->current_picture.data[0], s->linesize  , s->h_edge_pos   , s->v_edge_pos   , EDGE_WIDTH  );
01060             s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
01061             s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
01062     }
01063     emms_c();
01064 
01065     s->last_pict_type    = s->pict_type;
01066     s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
01067     if(s->pict_type!=FF_B_TYPE){
01068         s->last_non_b_pict_type= s->pict_type;
01069     }
01070 #if 0
01071         /* copy back current_picture variables */
01072     for(i=0; i<MAX_PICTURE_COUNT; i++){
01073         if(s->picture[i].data[0] == s->current_picture.data[0]){
01074             s->picture[i]= s->current_picture;
01075             break;
01076         }
01077     }
01078     assert(i<MAX_PICTURE_COUNT);
01079 #endif
01080 
01081     if(s->encoding){
01082         /* release non-reference frames */
01083         for(i=0; i<MAX_PICTURE_COUNT; i++){
01084             if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
01085                 free_frame_buffer(s, &s->picture[i]);
01086             }
01087         }
01088     }
01089     // clear copies, to avoid confusion
01090 #if 0
01091     memset(&s->last_picture, 0, sizeof(Picture));
01092     memset(&s->next_picture, 0, sizeof(Picture));
01093     memset(&s->current_picture, 0, sizeof(Picture));
01094 #endif
01095     s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
01096 }
01097 
01105 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01106     int x, y, fr, f;
01107 
01108     sx= av_clip(sx, 0, w-1);
01109     sy= av_clip(sy, 0, h-1);
01110     ex= av_clip(ex, 0, w-1);
01111     ey= av_clip(ey, 0, h-1);
01112 
01113     buf[sy*stride + sx]+= color;
01114 
01115     if(FFABS(ex - sx) > FFABS(ey - sy)){
01116         if(sx > ex){
01117             FFSWAP(int, sx, ex);
01118             FFSWAP(int, sy, ey);
01119         }
01120         buf+= sx + sy*stride;
01121         ex-= sx;
01122         f= ((ey-sy)<<16)/ex;
01123         for(x= 0; x <= ex; x++){
01124             y = (x*f)>>16;
01125             fr= (x*f)&0xFFFF;
01126             buf[ y   *stride + x]+= (color*(0x10000-fr))>>16;
01127             buf[(y+1)*stride + x]+= (color*         fr )>>16;
01128         }
01129     }else{
01130         if(sy > ey){
01131             FFSWAP(int, sx, ex);
01132             FFSWAP(int, sy, ey);
01133         }
01134         buf+= sx + sy*stride;
01135         ey-= sy;
01136         if(ey) f= ((ex-sx)<<16)/ey;
01137         else   f= 0;
01138         for(y= 0; y <= ey; y++){
01139             x = (y*f)>>16;
01140             fr= (y*f)&0xFFFF;
01141             buf[y*stride + x  ]+= (color*(0x10000-fr))>>16;
01142             buf[y*stride + x+1]+= (color*         fr )>>16;
01143         }
01144     }
01145 }
01146 
01154 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01155     int dx,dy;
01156 
01157     sx= av_clip(sx, -100, w+100);
01158     sy= av_clip(sy, -100, h+100);
01159     ex= av_clip(ex, -100, w+100);
01160     ey= av_clip(ey, -100, h+100);
01161 
01162     dx= ex - sx;
01163     dy= ey - sy;
01164 
01165     if(dx*dx + dy*dy > 3*3){
01166         int rx=  dx + dy;
01167         int ry= -dx + dy;
01168         int length= ff_sqrt((rx*rx + ry*ry)<<8);
01169 
01170         //FIXME subpixel accuracy
01171         rx= ROUNDED_DIV(rx*3<<4, length);
01172         ry= ROUNDED_DIV(ry*3<<4, length);
01173 
01174         draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01175         draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01176     }
01177     draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01178 }
01179 
01183 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
01184 
01185     if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
01186 
01187     if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
01188         int x,y;
01189 
01190         av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
01191         switch (pict->pict_type) {
01192             case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
01193             case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
01194             case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
01195             case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
01196             case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
01197             case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
01198         }
01199         for(y=0; y<s->mb_height; y++){
01200             for(x=0; x<s->mb_width; x++){
01201                 if(s->avctx->debug&FF_DEBUG_SKIP){
01202                     int count= s->mbskip_table[x + y*s->mb_stride];
01203                     if(count>9) count=9;
01204                     av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01205                 }
01206                 if(s->avctx->debug&FF_DEBUG_QP){
01207                     av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
01208                 }
01209                 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
01210                     int mb_type= pict->mb_type[x + y*s->mb_stride];
01211                     //Type & MV direction
01212                     if(IS_PCM(mb_type))
01213                         av_log(s->avctx, AV_LOG_DEBUG, "P");
01214                     else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01215                         av_log(s->avctx, AV_LOG_DEBUG, "A");
01216                     else if(IS_INTRA4x4(mb_type))
01217                         av_log(s->avctx, AV_LOG_DEBUG, "i");
01218                     else if(IS_INTRA16x16(mb_type))
01219                         av_log(s->avctx, AV_LOG_DEBUG, "I");
01220                     else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01221                         av_log(s->avctx, AV_LOG_DEBUG, "d");
01222                     else if(IS_DIRECT(mb_type))
01223                         av_log(s->avctx, AV_LOG_DEBUG, "D");
01224                     else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
01225                         av_log(s->avctx, AV_LOG_DEBUG, "g");
01226                     else if(IS_GMC(mb_type))
01227                         av_log(s->avctx, AV_LOG_DEBUG, "G");
01228                     else if(IS_SKIP(mb_type))
01229                         av_log(s->avctx, AV_LOG_DEBUG, "S");
01230                     else if(!USES_LIST(mb_type, 1))
01231                         av_log(s->avctx, AV_LOG_DEBUG, ">");
01232                     else if(!USES_LIST(mb_type, 0))
01233                         av_log(s->avctx, AV_LOG_DEBUG, "<");
01234                     else{
01235                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01236                         av_log(s->avctx, AV_LOG_DEBUG, "X");
01237                     }
01238 
01239                     //segmentation
01240                     if(IS_8X8(mb_type))
01241                         av_log(s->avctx, AV_LOG_DEBUG, "+");
01242                     else if(IS_16X8(mb_type))
01243                         av_log(s->avctx, AV_LOG_DEBUG, "-");
01244                     else if(IS_8X16(mb_type))
01245                         av_log(s->avctx, AV_LOG_DEBUG, "|");
01246                     else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
01247                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01248                     else
01249                         av_log(s->avctx, AV_LOG_DEBUG, "?");
01250 
01251 
01252                     if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
01253                         av_log(s->avctx, AV_LOG_DEBUG, "=");
01254                     else
01255                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01256                 }
01257 //                av_log(s->avctx, AV_LOG_DEBUG, " ");
01258             }
01259             av_log(s->avctx, AV_LOG_DEBUG, "\n");
01260         }
01261     }
01262 
01263     if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
01264         const int shift= 1 + s->quarter_sample;
01265         int mb_y;
01266         uint8_t *ptr;
01267         int i;
01268         int h_chroma_shift, v_chroma_shift, block_height;
01269         const int width = s->avctx->width;
01270         const int height= s->avctx->height;
01271         const int mv_sample_log2= 4 - pict->motion_subsample_log2;
01272         const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01273         s->low_delay=0; //needed to see the vectors without trashing the buffers
01274 
01275         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
01276         for(i=0; i<3; i++){
01277             memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
01278             pict->data[i]= s->visualization_buffer[i];
01279         }
01280         pict->type= FF_BUFFER_TYPE_COPY;
01281         ptr= pict->data[0];
01282         block_height = 16>>v_chroma_shift;
01283 
01284         for(mb_y=0; mb_y<s->mb_height; mb_y++){
01285             int mb_x;
01286             for(mb_x=0; mb_x<s->mb_width; mb_x++){
01287                 const int mb_index= mb_x + mb_y*s->mb_stride;
01288                 if((s->avctx->debug_mv) && pict->motion_val){
01289                   int type;
01290                   for(type=0; type<3; type++){
01291                     int direction = 0;
01292                     switch (type) {
01293                       case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
01294                                 continue;
01295                               direction = 0;
01296                               break;
01297                       case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
01298                                 continue;
01299                               direction = 0;
01300                               break;
01301                       case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
01302                                 continue;
01303                               direction = 1;
01304                               break;
01305                     }
01306                     if(!USES_LIST(pict->mb_type[mb_index], direction))
01307                         continue;
01308 
01309                     if(IS_8X8(pict->mb_type[mb_index])){
01310                       int i;
01311                       for(i=0; i<4; i++){
01312                         int sx= mb_x*16 + 4 + 8*(i&1);
01313                         int sy= mb_y*16 + 4 + 8*(i>>1);
01314                         int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01315                         int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01316                         int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01317                         draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01318                       }
01319                     }else if(IS_16X8(pict->mb_type[mb_index])){
01320                       int i;
01321                       for(i=0; i<2; i++){
01322                         int sx=mb_x*16 + 8;
01323                         int sy=mb_y*16 + 4 + 8*i;
01324                         int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
01325                         int mx=(pict->motion_val[direction][xy][0]>>shift);
01326                         int my=(pict->motion_val[direction][xy][1]>>shift);
01327 
01328                         if(IS_INTERLACED(pict->mb_type[mb_index]))
01329                             my*=2;
01330 
01331                         draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01332                       }
01333                     }else if(IS_8X16(pict->mb_type[mb_index])){
01334                       int i;
01335                       for(i=0; i<2; i++){
01336                         int sx=mb_x*16 + 4 + 8*i;
01337                         int sy=mb_y*16 + 8;
01338                         int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
01339                         int mx=(pict->motion_val[direction][xy][0]>>shift);
01340                         int my=(pict->motion_val[direction][xy][1]>>shift);
01341 
01342                         if(IS_INTERLACED(pict->mb_type[mb_index]))
01343                             my*=2;
01344 
01345                         draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01346                       }
01347                     }else{
01348                       int sx= mb_x*16 + 8;
01349                       int sy= mb_y*16 + 8;
01350                       int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
01351                       int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01352                       int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01353                       draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01354                     }
01355                   }
01356                 }
01357                 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
01358                     uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
01359                     int y;
01360                     for(y=0; y<block_height; y++){
01361                         *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
01362                         *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
01363                     }
01364                 }
01365                 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
01366                     int mb_type= pict->mb_type[mb_index];
01367                     uint64_t u,v;
01368                     int y;
01369 #define COLOR(theta, r)\
01370 u= (int)(128 + r*cos(theta*3.141592/180));\
01371 v= (int)(128 + r*sin(theta*3.141592/180));
01372 
01373 
01374                     u=v=128;
01375                     if(IS_PCM(mb_type)){
01376                         COLOR(120,48)
01377                     }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
01378                         COLOR(30,48)
01379                     }else if(IS_INTRA4x4(mb_type)){
01380                         COLOR(90,48)
01381                     }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
01382 //                        COLOR(120,48)
01383                     }else if(IS_DIRECT(mb_type)){
01384                         COLOR(150,48)
01385                     }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
01386                         COLOR(170,48)
01387                     }else if(IS_GMC(mb_type)){
01388                         COLOR(190,48)
01389                     }else if(IS_SKIP(mb_type)){
01390 //                        COLOR(180,48)
01391                     }else if(!USES_LIST(mb_type, 1)){
01392                         COLOR(240,48)
01393                     }else if(!USES_LIST(mb_type, 0)){
01394                         COLOR(0,48)
01395                     }else{
01396                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01397                         COLOR(300,48)
01398                     }
01399 
01400                     u*= 0x0101010101010101ULL;
01401                     v*= 0x0101010101010101ULL;
01402                     for(y=0; y<block_height; y++){
01403                         *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
01404                         *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
01405                     }
01406 
01407                     //segmentation
01408                     if(IS_8X8(mb_type) || IS_16X8(mb_type)){
01409                         *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01410                         *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01411                     }
01412                     if(IS_8X8(mb_type) || IS_8X16(mb_type)){
01413                         for(y=0; y<16; y++)
01414                             pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
01415                     }
01416                     if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
01417                         int dm= 1 << (mv_sample_log2-2);
01418                         for(i=0; i<4; i++){
01419                             int sx= mb_x*16 + 8*(i&1);
01420                             int sy= mb_y*16 + 8*(i>>1);
01421                             int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01422                             //FIXME bidir
01423                             int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
01424                             if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
01425                                 for(y=0; y<8; y++)
01426                                     pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
01427                             if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
01428                                 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
01429                         }
01430                     }
01431 
01432                     if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
01433                         // hmm
01434                     }
01435                 }
01436                 s->mbskip_table[mb_index]=0;
01437             }
01438         }
01439     }
01440 }
01441 
01442 static inline int hpel_motion_lowres(MpegEncContext *s,
01443                                   uint8_t *dest, uint8_t *src,
01444                                   int field_based, int field_select,
01445                                   int src_x, int src_y,
01446                                   int width, int height, int stride,
01447                                   int h_edge_pos, int v_edge_pos,
01448                                   int w, int h, h264_chroma_mc_func *pix_op,
01449                                   int motion_x, int motion_y)
01450 {
01451     const int lowres= s->avctx->lowres;
01452     const int op_index= FFMIN(lowres, 2);
01453     const int s_mask= (2<<lowres)-1;
01454     int emu=0;
01455     int sx, sy;
01456 
01457     if(s->quarter_sample){
01458         motion_x/=2;
01459         motion_y/=2;
01460     }
01461 
01462     sx= motion_x & s_mask;
01463     sy= motion_y & s_mask;
01464     src_x += motion_x >> (lowres+1);
01465     src_y += motion_y >> (lowres+1);
01466 
01467     src += src_y * stride + src_x;
01468 
01469     if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - w
01470        || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01471         ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
01472                             src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01473         src= s->edge_emu_buffer;
01474         emu=1;
01475     }
01476 
01477     sx= (sx << 2) >> lowres;
01478     sy= (sy << 2) >> lowres;
01479     if(field_select)
01480         src += s->linesize;
01481     pix_op[op_index](dest, src, stride, h, sx, sy);
01482     return emu;
01483 }
01484 
01485 /* apply one mpeg motion vector to the three components */
01486 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01487                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01488                                int field_based, int bottom_field, int field_select,
01489                                uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
01490                                int motion_x, int motion_y, int h, int mb_y)
01491 {
01492     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01493     int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
01494     const int lowres= s->avctx->lowres;
01495     const int op_index= FFMIN(lowres, 2);
01496     const int block_s= 8>>lowres;
01497     const int s_mask= (2<<lowres)-1;
01498     const int h_edge_pos = s->h_edge_pos >> lowres;
01499     const int v_edge_pos = s->v_edge_pos >> lowres;
01500     linesize   = s->current_picture.linesize[0] << field_based;
01501     uvlinesize = s->current_picture.linesize[1] << field_based;
01502 
01503     if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
01504         motion_x/=2;
01505         motion_y/=2;
01506     }
01507 
01508     if(field_based){
01509         motion_y += (bottom_field - field_select)*((1<<lowres)-1);
01510     }
01511 
01512     sx= motion_x & s_mask;
01513     sy= motion_y & s_mask;
01514     src_x = s->mb_x*2*block_s               + (motion_x >> (lowres+1));
01515     src_y =(   mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
01516 
01517     if (s->out_format == FMT_H263) {
01518         uvsx = ((motion_x>>1) & s_mask) | (sx&1);
01519         uvsy = ((motion_y>>1) & s_mask) | (sy&1);
01520         uvsrc_x = src_x>>1;
01521         uvsrc_y = src_y>>1;
01522     }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
01523         mx = motion_x / 4;
01524         my = motion_y / 4;
01525         uvsx = (2*mx) & s_mask;
01526         uvsy = (2*my) & s_mask;
01527         uvsrc_x = s->mb_x*block_s               + (mx >> lowres);
01528         uvsrc_y =    mb_y*block_s               + (my >> lowres);
01529     } else {
01530         mx = motion_x / 2;
01531         my = motion_y / 2;
01532         uvsx = mx & s_mask;
01533         uvsy = my & s_mask;
01534         uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
01535         uvsrc_y =(   mb_y*block_s>>field_based) + (my >> (lowres+1));
01536     }
01537 
01538     ptr_y  = ref_picture[0] + src_y * linesize + src_x;
01539     ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01540     ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01541 
01542     if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - 2*block_s
01543        || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01544             ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
01545                              src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01546             ptr_y = s->edge_emu_buffer;
01547             if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01548                 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
01549                 ff_emulated_edge_mc(uvbuf  , ptr_cb, s->uvlinesize, 9, 9+field_based,
01550                                  uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01551                 ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
01552                                  uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01553                 ptr_cb= uvbuf;
01554                 ptr_cr= uvbuf+16;
01555             }
01556     }
01557 
01558     if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
01559         dest_y += s->linesize;
01560         dest_cb+= s->uvlinesize;
01561         dest_cr+= s->uvlinesize;
01562     }
01563 
01564     if(field_select){
01565         ptr_y += s->linesize;
01566         ptr_cb+= s->uvlinesize;
01567         ptr_cr+= s->uvlinesize;
01568     }
01569 
01570     sx= (sx << 2) >> lowres;
01571     sy= (sy << 2) >> lowres;
01572     pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
01573 
01574     if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01575         uvsx= (uvsx << 2) >> lowres;
01576         uvsy= (uvsy << 2) >> lowres;
01577         pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01578         pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01579     }
01580     //FIXME h261 lowres loop filter
01581 }
01582 
01583 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01584                                      uint8_t *dest_cb, uint8_t *dest_cr,
01585                                      uint8_t **ref_picture,
01586                                      h264_chroma_mc_func *pix_op,
01587                                      int mx, int my){
01588     const int lowres= s->avctx->lowres;
01589     const int op_index= FFMIN(lowres, 2);
01590     const int block_s= 8>>lowres;
01591     const int s_mask= (2<<lowres)-1;
01592     const int h_edge_pos = s->h_edge_pos >> (lowres+1);
01593     const int v_edge_pos = s->v_edge_pos >> (lowres+1);
01594     int emu=0, src_x, src_y, offset, sx, sy;
01595     uint8_t *ptr;
01596 
01597     if(s->quarter_sample){
01598         mx/=2;
01599         my/=2;
01600     }
01601 
01602     /* In case of 8X8, we construct a single chroma motion vector
01603        with a special rounding */
01604     mx= ff_h263_round_chroma(mx);
01605     my= ff_h263_round_chroma(my);
01606 
01607     sx= mx & s_mask;
01608     sy= my & s_mask;
01609     src_x = s->mb_x*block_s + (mx >> (lowres+1));
01610     src_y = s->mb_y*block_s + (my >> (lowres+1));
01611 
01612     offset = src_y * s->uvlinesize + src_x;
01613     ptr = ref_picture[1] + offset;
01614     if(s->flags&CODEC_FLAG_EMU_EDGE){
01615         if(   (unsigned)src_x > h_edge_pos - (!!sx) - block_s
01616            || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
01617             ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01618             ptr= s->edge_emu_buffer;
01619             emu=1;
01620         }
01621     }
01622     sx= (sx << 2) >> lowres;
01623     sy= (sy << 2) >> lowres;
01624     pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
01625 
01626     ptr = ref_picture[2] + offset;
01627     if(emu){
01628         ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01629         ptr= s->edge_emu_buffer;
01630     }
01631     pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
01632 }
01633 
01645 static inline void MPV_motion_lowres(MpegEncContext *s,
01646                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01647                               int dir, uint8_t **ref_picture,
01648                               h264_chroma_mc_func *pix_op)
01649 {
01650     int mx, my;
01651     int mb_x, mb_y, i;
01652     const int lowres= s->avctx->lowres;
01653     const int block_s= 8>>lowres;
01654 
01655     mb_x = s->mb_x;
01656     mb_y = s->mb_y;
01657 
01658     switch(s->mv_type) {
01659     case MV_TYPE_16X16:
01660         mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01661                     0, 0, 0,
01662                     ref_picture, pix_op,
01663                     s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
01664         break;
01665     case MV_TYPE_8X8:
01666         mx = 0;
01667         my = 0;
01668             for(i=0;i<4;i++) {
01669                 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
01670                             ref_picture[0], 0, 0,
01671                             (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
01672                             s->width, s->height, s->linesize,
01673                             s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
01674                             block_s, block_s, pix_op,
01675                             s->mv[dir][i][0], s->mv[dir][i][1]);
01676 
01677                 mx += s->mv[dir][i][0];
01678                 my += s->mv[dir][i][1];
01679             }
01680 
01681         if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
01682             chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
01683         break;
01684     case MV_TYPE_FIELD:
01685         if (s->picture_structure == PICT_FRAME) {
01686             /* top field */
01687             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01688                         1, 0, s->field_select[dir][0],
01689                         ref_picture, pix_op,
01690                         s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
01691             /* bottom field */
01692             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01693                         1, 1, s->field_select[dir][1],
01694                         ref_picture, pix_op,
01695                         s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
01696         } else {
01697             if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
01698                 ref_picture= s->current_picture_ptr->data;
01699             }
01700 
01701             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01702                         0, 0, s->field_select[dir][0],
01703                         ref_picture, pix_op,
01704                         s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
01705         }
01706         break;
01707     case MV_TYPE_16X8:
01708         for(i=0; i<2; i++){
01709             uint8_t ** ref2picture;
01710 
01711             if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
01712                 ref2picture= ref_picture;
01713             }else{
01714                 ref2picture= s->current_picture_ptr->data;
01715             }
01716 
01717             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01718                         0, 0, s->field_select[dir][i],
01719                         ref2picture, pix_op,
01720                         s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
01721 
01722             dest_y += 2*block_s*s->linesize;
01723             dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01724             dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01725         }
01726         break;
01727     case MV_TYPE_DMV:
01728         if(s->picture_structure == PICT_FRAME){
01729             for(i=0; i<2; i++){
01730                 int j;
01731                 for(j=0; j<2; j++){
01732                     mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01733                                 1, j, j^i,
01734                                 ref_picture, pix_op,
01735                                 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
01736                 }
01737                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01738             }
01739         }else{
01740             for(i=0; i<2; i++){
01741                 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01742                             0, 0, s->picture_structure != i+1,
01743                             ref_picture, pix_op,
01744                             s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
01745 
01746                 // after put we make avg of the same block
01747                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01748 
01749                 //opposite parity is always in the same frame if this is second field
01750                 if(!s->first_field){
01751                     ref_picture = s->current_picture_ptr->data;
01752                 }
01753             }
01754         }
01755     break;
01756     default: assert(0);
01757     }
01758 }
01759 
01760 /* put block[] to dest[] */
01761 static inline void put_dct(MpegEncContext *s,
01762                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01763 {
01764     s->dct_unquantize_intra(s, block, i, qscale);
01765     s->dsp.idct_put (dest, line_size, block);
01766 }
01767 
01768 /* add block[] to dest[] */
01769 static inline void add_dct(MpegEncContext *s,
01770                            DCTELEM *block, int i, uint8_t *dest, int line_size)
01771 {
01772     if (s->block_last_index[i] >= 0) {
01773         s->dsp.idct_add (dest, line_size, block);
01774     }
01775 }
01776 
01777 static inline void add_dequant_dct(MpegEncContext *s,
01778                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01779 {
01780     if (s->block_last_index[i] >= 0) {
01781         s->dct_unquantize_inter(s, block, i, qscale);
01782 
01783         s->dsp.idct_add (dest, line_size, block);
01784     }
01785 }
01786 
01790 void ff_clean_intra_table_entries(MpegEncContext *s)
01791 {
01792     int wrap = s->b8_stride;
01793     int xy = s->block_index[0];
01794 
01795     s->dc_val[0][xy           ] =
01796     s->dc_val[0][xy + 1       ] =
01797     s->dc_val[0][xy     + wrap] =
01798     s->dc_val[0][xy + 1 + wrap] = 1024;
01799     /* ac pred */
01800     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
01801     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
01802     if (s->msmpeg4_version>=3) {
01803         s->coded_block[xy           ] =
01804         s->coded_block[xy + 1       ] =
01805         s->coded_block[xy     + wrap] =
01806         s->coded_block[xy + 1 + wrap] = 0;
01807     }
01808     /* chroma */
01809     wrap = s->mb_stride;
01810     xy = s->mb_x + s->mb_y * wrap;
01811     s->dc_val[1][xy] =
01812     s->dc_val[2][xy] = 1024;
01813     /* ac pred */
01814     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
01815     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
01816 
01817     s->mbintra_table[xy]= 0;
01818 }
01819 
01820 /* generic function called after a macroblock has been parsed by the
01821    decoder or after it has been encoded by the encoder.
01822 
01823    Important variables used:
01824    s->mb_intra : true if intra macroblock
01825    s->mv_dir   : motion vector direction
01826    s->mv_type  : motion vector type
01827    s->mv       : motion vector
01828    s->interlaced_dct : true if interlaced dct used (mpeg2)
01829  */
01830 static av_always_inline
01831 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
01832                             int lowres_flag, int is_mpeg12)
01833 {
01834     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
01835     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
01836         ff_xvmc_decode_mb(s);//xvmc uses pblocks
01837         return;
01838     }
01839 
01840     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
01841        /* save DCT coefficients */
01842        int i,j;
01843        DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
01844        for(i=0; i<6; i++)
01845            for(j=0; j<64; j++)
01846                *dct++ = block[i][s->dsp.idct_permutation[j]];
01847     }
01848 
01849     s->current_picture.qscale_table[mb_xy]= s->qscale;
01850 
01851     /* update DC predictors for P macroblocks */
01852     if (!s->mb_intra) {
01853         if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
01854             if(s->mbintra_table[mb_xy])
01855                 ff_clean_intra_table_entries(s);
01856         } else {
01857             s->last_dc[0] =
01858             s->last_dc[1] =
01859             s->last_dc[2] = 128 << s->intra_dc_precision;
01860         }
01861     }
01862     else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
01863         s->mbintra_table[mb_xy]=1;
01864 
01865     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
01866         uint8_t *dest_y, *dest_cb, *dest_cr;
01867         int dct_linesize, dct_offset;
01868         op_pixels_func (*op_pix)[4];
01869         qpel_mc_func (*op_qpix)[16];
01870         const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
01871         const int uvlinesize= s->current_picture.linesize[1];
01872         const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
01873         const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
01874 
01875         /* avoid copy if macroblock skipped in last frame too */
01876         /* skip only during decoding as we might trash the buffers during encoding a bit */
01877         if(!s->encoding){
01878             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
01879             const int age= s->current_picture.age;
01880 
01881             assert(age);
01882 
01883             if (s->mb_skipped) {
01884                 s->mb_skipped= 0;
01885                 assert(s->pict_type!=FF_I_TYPE);
01886 
01887                 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
01888                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
01889 
01890                 /* if previous was skipped too, then nothing to do !  */
01891                 if (*mbskip_ptr >= age && s->current_picture.reference){
01892                     return;
01893                 }
01894             } else if(!s->current_picture.reference){
01895                 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
01896                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
01897             } else{
01898                 *mbskip_ptr = 0; /* not skipped */
01899             }
01900         }
01901 
01902         dct_linesize = linesize << s->interlaced_dct;
01903         dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
01904 
01905         if(readable){
01906             dest_y=  s->dest[0];
01907             dest_cb= s->dest[1];
01908             dest_cr= s->dest[2];
01909         }else{
01910             dest_y = s->b_scratchpad;
01911             dest_cb= s->b_scratchpad+16*linesize;
01912             dest_cr= s->b_scratchpad+32*linesize;
01913         }
01914 
01915         if (!s->mb_intra) {
01916             /* motion handling */
01917             /* decoding or more than one mb_type (MC was already done otherwise) */
01918             if(!s->encoding){
01919                 if(lowres_flag){
01920                     h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
01921 
01922                     if (s->mv_dir & MV_DIR_FORWARD) {
01923                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
01924                         op_pix = s->dsp.avg_h264_chroma_pixels_tab;
01925                     }
01926                     if (s->mv_dir & MV_DIR_BACKWARD) {
01927                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
01928                     }
01929                 }else{
01930                     op_qpix= s->me.qpel_put;
01931                     if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
01932                         op_pix = s->dsp.put_pixels_tab;
01933                     }else{
01934                         op_pix = s->dsp.put_no_rnd_pixels_tab;
01935                     }
01936                     if (s->mv_dir & MV_DIR_FORWARD) {
01937                         MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
01938                         op_pix = s->dsp.avg_pixels_tab;
01939                         op_qpix= s->me.qpel_avg;
01940                     }
01941                     if (s->mv_dir & MV_DIR_BACKWARD) {
01942                         MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
01943                     }
01944                 }
01945             }
01946 
01947             /* skip dequant / idct if we are really late ;) */
01948             if(s->hurry_up>1) goto skip_idct;
01949             if(s->avctx->skip_idct){
01950                 if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
01951                    ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
01952                    || s->avctx->skip_idct >= AVDISCARD_ALL)
01953                     goto skip_idct;
01954             }
01955 
01956             /* add dct residue */
01957             if(s->encoding || !(   s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
01958                                 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
01959                 add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
01960                 add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
01961                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
01962                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
01963 
01964                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01965                     if (s->chroma_y_shift){
01966                         add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
01967                         add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
01968                     }else{
01969                         dct_linesize >>= 1;
01970                         dct_offset >>=1;
01971                         add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
01972                         add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
01973                         add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
01974                         add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
01975                     }
01976                 }
01977             } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
01978                 add_dct(s, block[0], 0, dest_y                          , dct_linesize);
01979                 add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
01980                 add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
01981                 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
01982 
01983                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01984                     if(s->chroma_y_shift){//Chroma420
01985                         add_dct(s, block[4], 4, dest_cb, uvlinesize);
01986                         add_dct(s, block[5], 5, dest_cr, uvlinesize);
01987                     }else{
01988                         //chroma422
01989                         dct_linesize = uvlinesize << s->interlaced_dct;
01990                         dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
01991 
01992                         add_dct(s, block[4], 4, dest_cb, dct_linesize);
01993                         add_dct(s, block[5], 5, dest_cr, dct_linesize);
01994                         add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
01995                         add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
01996                         if(!s->chroma_x_shift){//Chroma444
01997                             add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
01998                             add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
01999                             add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
02000                             add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
02001                         }
02002                     }
02003                 }//fi gray
02004             }
02005             else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02006                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02007             }
02008         } else {
02009             /* dct only in intra block */
02010             if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
02011                 put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
02012                 put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
02013                 put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
02014                 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02015 
02016                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02017                     if(s->chroma_y_shift){
02018                         put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02019                         put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02020                     }else{
02021                         dct_offset >>=1;
02022                         dct_linesize >>=1;
02023                         put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
02024                         put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
02025                         put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02026                         put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02027                     }
02028                 }
02029             }else{
02030                 s->dsp.idct_put(dest_y                          , dct_linesize, block[0]);
02031                 s->dsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
02032                 s->dsp.idct_put(dest_y + dct_offset             , dct_linesize, block[2]);
02033                 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02034 
02035                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02036                     if(s->chroma_y_shift){
02037                         s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02038                         s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02039                     }else{
02040 
02041                         dct_linesize = uvlinesize << s->interlaced_dct;
02042                         dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
02043 
02044                         s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
02045                         s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
02046                         s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02047                         s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02048                         if(!s->chroma_x_shift){//Chroma444
02049                             s->dsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
02050                             s->dsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
02051                             s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
02052                             s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
02053                         }
02054                     }
02055                 }//gray
02056             }
02057         }
02058 skip_idct:
02059         if(!readable){
02060             s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
02061             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02062             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02063         }
02064     }
02065 }
02066 
02067 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02068 #if !CONFIG_SMALL
02069     if(s->out_format == FMT_MPEG1) {
02070         if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02071         else                 MPV_decode_mb_internal(s, block, 0, 1);
02072     } else
02073 #endif
02074     if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02075     else                  MPV_decode_mb_internal(s, block, 0, 0);
02076 }
02077 
02082 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02083     if (s->avctx->draw_horiz_band) {
02084         AVFrame *src;
02085         const int field_pic= s->picture_structure != PICT_FRAME;
02086         int offset[4];
02087 
02088         h= FFMIN(h, (s->avctx->height>>field_pic) - y);
02089 
02090         if(field_pic && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)){
02091             h <<= 1;
02092             y <<= 1;
02093             if(s->first_field) return;
02094         }
02095 
02096         if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02097             src= (AVFrame*)s->current_picture_ptr;
02098         else if(s->last_picture_ptr)
02099             src= (AVFrame*)s->last_picture_ptr;
02100         else
02101             return;
02102 
02103         if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02104             offset[0]=
02105             offset[1]=
02106             offset[2]=
02107             offset[3]= 0;
02108         }else{
02109             offset[0]= y * s->linesize;
02110             offset[1]=
02111             offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02112             offset[3]= 0;
02113         }
02114 
02115         emms_c();
02116 
02117         s->avctx->draw_horiz_band(s->avctx, src, offset,
02118                                   y, s->picture_structure, h);
02119     }
02120 }
02121 
02122 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
02123     const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
02124     const int uvlinesize= s->current_picture.linesize[1];
02125     const int mb_size= 4 - s->avctx->lowres;
02126 
02127     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
02128     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
02129     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02130     s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02131     s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02132     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02133     //block_index is not used by mpeg2, so it is not affected by chroma_format
02134 
02135     s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
02136     s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02137     s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02138 
02139     if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02140     {
02141         if(s->picture_structure==PICT_FRAME){
02142         s->dest[0] += s->mb_y *   linesize << mb_size;
02143         s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02144         s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02145         }else{
02146             s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
02147             s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02148             s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02149             assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02150         }
02151     }
02152 }
02153 
02154 void ff_mpeg_flush(AVCodecContext *avctx){
02155     int i;
02156     MpegEncContext *s = avctx->priv_data;
02157 
02158     if(s==NULL || s->picture==NULL)
02159         return;
02160 
02161     for(i=0; i<MAX_PICTURE_COUNT; i++){
02162        if(s->picture[i].data[0] && (   s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
02163                                     || s->picture[i].type == FF_BUFFER_TYPE_USER))
02164         free_frame_buffer(s, &s->picture[i]);
02165     }
02166     s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02167 
02168     s->mb_x= s->mb_y= 0;
02169     s->closed_gop= 0;
02170 
02171     s->parse_context.state= -1;
02172     s->parse_context.frame_start_found= 0;
02173     s->parse_context.overread= 0;
02174     s->parse_context.overread_index= 0;
02175     s->parse_context.index= 0;
02176     s->parse_context.last_index= 0;
02177     s->bitstream_buffer_size=0;
02178     s->pp_time=0;
02179 }
02180 
02181 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02182                                    DCTELEM *block, int n, int qscale)
02183 {
02184     int i, level, nCoeffs;
02185     const uint16_t *quant_matrix;
02186 
02187     nCoeffs= s->block_last_index[n];
02188 
02189     if (n < 4)
02190         block[0] = block[0] * s->y_dc_scale;
02191     else
02192         block[0] = block[0] * s->c_dc_scale;
02193     /* XXX: only mpeg1 */
02194     quant_matrix = s->intra_matrix;
02195     for(i=1;i<=nCoeffs;i++) {
02196         int j= s->intra_scantable.permutated[i];
02197         level = block[j];
02198         if (level) {
02199             if (level < 0) {
02200                 level = -level;
02201                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02202                 level = (level - 1) | 1;
02203                 level = -level;
02204             } else {
02205                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02206                 level = (level - 1) | 1;
02207             }
02208             block[j] = level;
02209         }
02210     }
02211 }
02212 
02213 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02214                                    DCTELEM *block, int n, int qscale)
02215 {
02216     int i, level, nCoeffs;
02217     const uint16_t *quant_matrix;
02218 
02219     nCoeffs= s->block_last_index[n];
02220 
02221     quant_matrix = s->inter_matrix;
02222     for(i=0; i<=nCoeffs; i++) {
02223         int j= s->intra_scantable.permutated[i];
02224         level = block[j];
02225         if (level) {
02226             if (level < 0) {
02227                 level = -level;
02228                 level = (((level << 1) + 1) * qscale *
02229                          ((int) (quant_matrix[j]))) >> 4;
02230                 level = (level - 1) | 1;
02231                 level = -level;
02232             } else {
02233                 level = (((level << 1) + 1) * qscale *
02234                          ((int) (quant_matrix[j]))) >> 4;
02235                 level = (level - 1) | 1;
02236             }
02237             block[j] = level;
02238         }
02239     }
02240 }
02241 
02242 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02243                                    DCTELEM *block, int n, int qscale)
02244 {
02245     int i, level, nCoeffs;
02246     const uint16_t *quant_matrix;
02247 
02248     if(s->alternate_scan) nCoeffs= 63;
02249     else nCoeffs= s->block_last_index[n];
02250 
02251     if (n < 4)
02252         block[0] = block[0] * s->y_dc_scale;
02253     else
02254         block[0] = block[0] * s->c_dc_scale;
02255     quant_matrix = s->intra_matrix;
02256     for(i=1;i<=nCoeffs;i++) {
02257         int j= s->intra_scantable.permutated[i];
02258         level = block[j];
02259         if (level) {
02260             if (level < 0) {
02261                 level = -level;
02262                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02263                 level = -level;
02264             } else {
02265                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02266             }
02267             block[j] = level;
02268         }
02269     }
02270 }
02271 
02272 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02273                                    DCTELEM *block, int n, int qscale)
02274 {
02275     int i, level, nCoeffs;
02276     const uint16_t *quant_matrix;
02277     int sum=-1;
02278 
02279     if(s->alternate_scan) nCoeffs= 63;
02280     else nCoeffs= s->block_last_index[n];
02281 
02282     if (n < 4)
02283         block[0] = block[0] * s->y_dc_scale;
02284     else
02285         block[0] = block[0] * s->c_dc_scale;
02286     quant_matrix = s->intra_matrix;
02287     for(i=1;i<=nCoeffs;i++) {
02288         int j= s->intra_scantable.permutated[i];
02289         level = block[j];
02290         if (level) {
02291             if (level < 0) {
02292                 level = -level;
02293                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02294                 level = -level;
02295             } else {
02296                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02297             }
02298             block[j] = level;
02299             sum+=level;
02300         }
02301     }
02302     block[63]^=sum&1;
02303 }
02304 
02305 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02306                                    DCTELEM *block, int n, int qscale)
02307 {
02308     int i, level, nCoeffs;
02309     const uint16_t *quant_matrix;
02310     int sum=-1;
02311 
02312     if(s->alternate_scan) nCoeffs= 63;
02313     else nCoeffs= s->block_last_index[n];
02314 
02315     quant_matrix = s->inter_matrix;
02316     for(i=0; i<=nCoeffs; i++) {
02317         int j= s->intra_scantable.permutated[i];
02318         level = block[j];
02319         if (level) {
02320             if (level < 0) {
02321                 level = -level;
02322                 level = (((level << 1) + 1) * qscale *
02323                          ((int) (quant_matrix[j]))) >> 4;
02324                 level = -level;
02325             } else {
02326                 level = (((level << 1) + 1) * qscale *
02327                          ((int) (quant_matrix[j]))) >> 4;
02328             }
02329             block[j] = level;
02330             sum+=level;
02331         }
02332     }
02333     block[63]^=sum&1;
02334 }
02335 
02336 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02337                                   DCTELEM *block, int n, int qscale)
02338 {
02339     int i, level, qmul, qadd;
02340     int nCoeffs;
02341 
02342     assert(s->block_last_index[n]>=0);
02343 
02344     qmul = qscale << 1;
02345 
02346     if (!s->h263_aic) {
02347         if (n < 4)
02348             block[0] = block[0] * s->y_dc_scale;
02349         else
02350             block[0] = block[0] * s->c_dc_scale;
02351         qadd = (qscale - 1) | 1;
02352     }else{
02353         qadd = 0;
02354     }
02355     if(s->ac_pred)
02356         nCoeffs=63;
02357     else
02358         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02359 
02360     for(i=1; i<=nCoeffs; i++) {
02361         level = block[i];
02362         if (level) {
02363             if (level < 0) {
02364                 level = level * qmul - qadd;
02365             } else {
02366                 level = level * qmul + qadd;
02367             }
02368             block[i] = level;
02369         }
02370     }
02371 }
02372 
02373 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02374                                   DCTELEM *block, int n, int qscale)
02375 {
02376     int i, level, qmul, qadd;
02377     int nCoeffs;
02378 
02379     assert(s->block_last_index[n]>=0);
02380 
02381     qadd = (qscale - 1) | 1;
02382     qmul = qscale << 1;
02383 
02384     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02385 
02386     for(i=0; i<=nCoeffs; i++) {
02387         level = block[i];
02388         if (level) {
02389             if (level < 0) {
02390                 level = level * qmul - qadd;
02391             } else {
02392                 level = level * qmul + qadd;
02393             }
02394             block[i] = level;
02395         }
02396     }
02397 }
02398 
02402 void ff_set_qscale(MpegEncContext * s, int qscale)
02403 {
02404     if (qscale < 1)
02405         qscale = 1;
02406     else if (qscale > 31)
02407         qscale = 31;
02408 
02409     s->qscale = qscale;
02410     s->chroma_qscale= s->chroma_qscale_table[qscale];
02411 
02412     s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02413     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02414 }

Generated on Fri Sep 16 2011 17:17:40 for FFmpeg by  doxygen 1.7.1