Libav 0.7.1
|
00001 /* 00002 * Copyright (c) 2003 The Libav Project 00003 * 00004 * This file is part of Libav. 00005 * 00006 * Libav is free software; you can redistribute it and/or 00007 * modify it under the terms of the GNU Lesser General Public 00008 * License as published by the Free Software Foundation; either 00009 * version 2.1 of the License, or (at your option) any later version. 00010 * 00011 * Libav is distributed in the hope that it will be useful, 00012 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00014 * Lesser General Public License for more details. 00015 * 00016 * You should have received a copy of the GNU Lesser General Public 00017 * License along with Libav; if not, write to the Free Software 00018 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00019 */ 00020 00021 /* 00022 * How to use this decoder: 00023 * SVQ3 data is transported within Apple Quicktime files. Quicktime files 00024 * have stsd atoms to describe media trak properties. A stsd atom for a 00025 * video trak contains 1 or more ImageDescription atoms. These atoms begin 00026 * with the 4-byte length of the atom followed by the codec fourcc. Some 00027 * decoders need information in this atom to operate correctly. Such 00028 * is the case with SVQ3. In order to get the best use out of this decoder, 00029 * the calling app must make the SVQ3 ImageDescription atom available 00030 * via the AVCodecContext's extradata[_size] field: 00031 * 00032 * AVCodecContext.extradata = pointer to ImageDescription, first characters 00033 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length 00034 * AVCodecContext.extradata_size = size of ImageDescription atom memory 00035 * buffer (which will be the same as the ImageDescription atom size field 00036 * from the QT file, minus 4 bytes since the length is missing) 00037 * 00038 * You will know you have these parameters passed correctly when the decoder 00039 * correctly decodes this file: 00040 * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov 00041 */ 00042 #include "internal.h" 00043 #include "dsputil.h" 00044 #include "avcodec.h" 00045 #include "mpegvideo.h" 00046 #include "h264.h" 00047 00048 #include "h264data.h" //FIXME FIXME FIXME 00049 00050 #include "h264_mvpred.h" 00051 #include "golomb.h" 00052 #include "rectangle.h" 00053 #include "vdpau_internal.h" 00054 00055 #if CONFIG_ZLIB 00056 #include <zlib.h> 00057 #endif 00058 00059 #include "svq1.h" 00060 00066 typedef struct { 00067 H264Context h; 00068 int halfpel_flag; 00069 int thirdpel_flag; 00070 int unknown_flag; 00071 int next_slice_index; 00072 uint32_t watermark_key; 00073 } SVQ3Context; 00074 00075 #define FULLPEL_MODE 1 00076 #define HALFPEL_MODE 2 00077 #define THIRDPEL_MODE 3 00078 #define PREDICT_MODE 4 00079 00080 /* dual scan (from some older h264 draft) 00081 o-->o-->o o 00082 | /| 00083 o o o / o 00084 | / | |/ | 00085 o o o o 00086 / 00087 o-->o-->o-->o 00088 */ 00089 static const uint8_t svq3_scan[16] = { 00090 0+0*4, 1+0*4, 2+0*4, 2+1*4, 00091 2+2*4, 3+0*4, 3+1*4, 3+2*4, 00092 0+1*4, 0+2*4, 1+1*4, 1+2*4, 00093 0+3*4, 1+3*4, 2+3*4, 3+3*4, 00094 }; 00095 00096 static const uint8_t svq3_pred_0[25][2] = { 00097 { 0, 0 }, 00098 { 1, 0 }, { 0, 1 }, 00099 { 0, 2 }, { 1, 1 }, { 2, 0 }, 00100 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 }, 00101 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 }, 00102 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 }, 00103 { 2, 4 }, { 3, 3 }, { 4, 2 }, 00104 { 4, 3 }, { 3, 4 }, 00105 { 4, 4 } 00106 }; 00107 00108 static const int8_t svq3_pred_1[6][6][5] = { 00109 { { 2,-1,-1,-1,-1 }, { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 }, 00110 { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 }, { 1, 2,-1,-1,-1 } }, 00111 { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 }, 00112 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } }, 00113 { { 2, 0,-1,-1,-1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 }, 00114 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } }, 00115 { { 2, 0,-1,-1,-1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 }, 00116 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } }, 00117 { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 }, 00118 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } }, 00119 { { 0, 2,-1,-1,-1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 }, 00120 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } }, 00121 }; 00122 00123 static const struct { uint8_t run; uint8_t level; } svq3_dct_tables[2][16] = { 00124 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 }, 00125 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } }, 00126 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 }, 00127 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } } 00128 }; 00129 00130 static const uint32_t svq3_dequant_coeff[32] = { 00131 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718, 00132 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873, 00133 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683, 00134 61694, 68745, 77615, 89113,100253,109366,126635,141533 00135 }; 00136 00137 void ff_svq3_luma_dc_dequant_idct_c(DCTELEM *output, DCTELEM *input, int qp){ 00138 const int qmul = svq3_dequant_coeff[qp]; 00139 #define stride 16 00140 int i; 00141 int temp[16]; 00142 static const uint8_t x_offset[4]={0, 1*stride, 4*stride, 5*stride}; 00143 00144 for(i=0; i<4; i++){ 00145 const int z0 = 13*(input[4*i+0] + input[4*i+2]); 00146 const int z1 = 13*(input[4*i+0] - input[4*i+2]); 00147 const int z2 = 7* input[4*i+1] - 17*input[4*i+3]; 00148 const int z3 = 17* input[4*i+1] + 7*input[4*i+3]; 00149 00150 temp[4*i+0] = z0+z3; 00151 temp[4*i+1] = z1+z2; 00152 temp[4*i+2] = z1-z2; 00153 temp[4*i+3] = z0-z3; 00154 } 00155 00156 for(i=0; i<4; i++){ 00157 const int offset= x_offset[i]; 00158 const int z0= 13*(temp[4*0+i] + temp[4*2+i]); 00159 const int z1= 13*(temp[4*0+i] - temp[4*2+i]); 00160 const int z2= 7* temp[4*1+i] - 17*temp[4*3+i]; 00161 const int z3= 17* temp[4*1+i] + 7*temp[4*3+i]; 00162 00163 output[stride* 0+offset] = ((z0 + z3)*qmul + 0x80000) >> 20; 00164 output[stride* 2+offset] = ((z1 + z2)*qmul + 0x80000) >> 20; 00165 output[stride* 8+offset] = ((z1 - z2)*qmul + 0x80000) >> 20; 00166 output[stride*10+offset] = ((z0 - z3)*qmul + 0x80000) >> 20; 00167 } 00168 } 00169 #undef stride 00170 00171 void ff_svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, 00172 int dc) 00173 { 00174 const int qmul = svq3_dequant_coeff[qp]; 00175 int i; 00176 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; 00177 00178 if (dc) { 00179 dc = 13*13*((dc == 1) ? 1538*block[0] : ((qmul*(block[0] >> 3)) / 2)); 00180 block[0] = 0; 00181 } 00182 00183 for (i = 0; i < 4; i++) { 00184 const int z0 = 13*(block[0 + 4*i] + block[2 + 4*i]); 00185 const int z1 = 13*(block[0 + 4*i] - block[2 + 4*i]); 00186 const int z2 = 7* block[1 + 4*i] - 17*block[3 + 4*i]; 00187 const int z3 = 17* block[1 + 4*i] + 7*block[3 + 4*i]; 00188 00189 block[0 + 4*i] = z0 + z3; 00190 block[1 + 4*i] = z1 + z2; 00191 block[2 + 4*i] = z1 - z2; 00192 block[3 + 4*i] = z0 - z3; 00193 } 00194 00195 for (i = 0; i < 4; i++) { 00196 const int z0 = 13*(block[i + 4*0] + block[i + 4*2]); 00197 const int z1 = 13*(block[i + 4*0] - block[i + 4*2]); 00198 const int z2 = 7* block[i + 4*1] - 17*block[i + 4*3]; 00199 const int z3 = 17* block[i + 4*1] + 7*block[i + 4*3]; 00200 const int rr = (dc + 0x80000); 00201 00202 dst[i + stride*0] = cm[ dst[i + stride*0] + (((z0 + z3)*qmul + rr) >> 20) ]; 00203 dst[i + stride*1] = cm[ dst[i + stride*1] + (((z1 + z2)*qmul + rr) >> 20) ]; 00204 dst[i + stride*2] = cm[ dst[i + stride*2] + (((z1 - z2)*qmul + rr) >> 20) ]; 00205 dst[i + stride*3] = cm[ dst[i + stride*3] + (((z0 - z3)*qmul + rr) >> 20) ]; 00206 } 00207 } 00208 00209 static inline int svq3_decode_block(GetBitContext *gb, DCTELEM *block, 00210 int index, const int type) 00211 { 00212 static const uint8_t *const scan_patterns[4] = 00213 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan }; 00214 00215 int run, level, sign, vlc, limit; 00216 const int intra = (3 * type) >> 2; 00217 const uint8_t *const scan = scan_patterns[type]; 00218 00219 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) { 00220 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) { 00221 00222 if (vlc == INVALID_VLC) 00223 return -1; 00224 00225 sign = (vlc & 0x1) - 1; 00226 vlc = (vlc + 1) >> 1; 00227 00228 if (type == 3) { 00229 if (vlc < 3) { 00230 run = 0; 00231 level = vlc; 00232 } else if (vlc < 4) { 00233 run = 1; 00234 level = 1; 00235 } else { 00236 run = (vlc & 0x3); 00237 level = ((vlc + 9) >> 2) - run; 00238 } 00239 } else { 00240 if (vlc < 16) { 00241 run = svq3_dct_tables[intra][vlc].run; 00242 level = svq3_dct_tables[intra][vlc].level; 00243 } else if (intra) { 00244 run = (vlc & 0x7); 00245 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1))); 00246 } else { 00247 run = (vlc & 0xF); 00248 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0))); 00249 } 00250 } 00251 00252 if ((index += run) >= limit) 00253 return -1; 00254 00255 block[scan[index]] = (level ^ sign) - sign; 00256 } 00257 00258 if (type != 2) { 00259 break; 00260 } 00261 } 00262 00263 return 0; 00264 } 00265 00266 static inline void svq3_mc_dir_part(MpegEncContext *s, 00267 int x, int y, int width, int height, 00268 int mx, int my, int dxy, 00269 int thirdpel, int dir, int avg) 00270 { 00271 const Picture *pic = (dir == 0) ? &s->last_picture : &s->next_picture; 00272 uint8_t *src, *dest; 00273 int i, emu = 0; 00274 int blocksize = 2 - (width>>3); //16->0, 8->1, 4->2 00275 00276 mx += x; 00277 my += y; 00278 00279 if (mx < 0 || mx >= (s->h_edge_pos - width - 1) || 00280 my < 0 || my >= (s->v_edge_pos - height - 1)) { 00281 00282 if ((s->flags & CODEC_FLAG_EMU_EDGE)) { 00283 emu = 1; 00284 } 00285 00286 mx = av_clip (mx, -16, (s->h_edge_pos - width + 15)); 00287 my = av_clip (my, -16, (s->v_edge_pos - height + 15)); 00288 } 00289 00290 /* form component predictions */ 00291 dest = s->current_picture.data[0] + x + y*s->linesize; 00292 src = pic->data[0] + mx + my*s->linesize; 00293 00294 if (emu) { 00295 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, (width + 1), (height + 1), 00296 mx, my, s->h_edge_pos, s->v_edge_pos); 00297 src = s->edge_emu_buffer; 00298 } 00299 if (thirdpel) 00300 (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->linesize, width, height); 00301 else 00302 (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->linesize, height); 00303 00304 if (!(s->flags & CODEC_FLAG_GRAY)) { 00305 mx = (mx + (mx < (int) x)) >> 1; 00306 my = (my + (my < (int) y)) >> 1; 00307 width = (width >> 1); 00308 height = (height >> 1); 00309 blocksize++; 00310 00311 for (i = 1; i < 3; i++) { 00312 dest = s->current_picture.data[i] + (x >> 1) + (y >> 1)*s->uvlinesize; 00313 src = pic->data[i] + mx + my*s->uvlinesize; 00314 00315 if (emu) { 00316 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->uvlinesize, (width + 1), (height + 1), 00317 mx, my, (s->h_edge_pos >> 1), (s->v_edge_pos >> 1)); 00318 src = s->edge_emu_buffer; 00319 } 00320 if (thirdpel) 00321 (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->uvlinesize, width, height); 00322 else 00323 (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->uvlinesize, height); 00324 } 00325 } 00326 } 00327 00328 static inline int svq3_mc_dir(H264Context *h, int size, int mode, int dir, 00329 int avg) 00330 { 00331 int i, j, k, mx, my, dx, dy, x, y; 00332 MpegEncContext *const s = (MpegEncContext *) h; 00333 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1); 00334 const int part_height = 16 >> ((unsigned) (size + 1) / 3); 00335 const int extra_width = (mode == PREDICT_MODE) ? -16*6 : 0; 00336 const int h_edge_pos = 6*(s->h_edge_pos - part_width ) - extra_width; 00337 const int v_edge_pos = 6*(s->v_edge_pos - part_height) - extra_width; 00338 00339 for (i = 0; i < 16; i += part_height) { 00340 for (j = 0; j < 16; j += part_width) { 00341 const int b_xy = (4*s->mb_x + (j >> 2)) + (4*s->mb_y + (i >> 2))*h->b_stride; 00342 int dxy; 00343 x = 16*s->mb_x + j; 00344 y = 16*s->mb_y + i; 00345 k = ((j >> 2) & 1) + ((i >> 1) & 2) + ((j >> 1) & 4) + (i & 8); 00346 00347 if (mode != PREDICT_MODE) { 00348 pred_motion(h, k, (part_width >> 2), dir, 1, &mx, &my); 00349 } else { 00350 mx = s->next_picture.motion_val[0][b_xy][0]<<1; 00351 my = s->next_picture.motion_val[0][b_xy][1]<<1; 00352 00353 if (dir == 0) { 00354 mx = ((mx * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1; 00355 my = ((my * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1; 00356 } else { 00357 mx = ((mx * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1; 00358 my = ((my * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1; 00359 } 00360 } 00361 00362 /* clip motion vector prediction to frame border */ 00363 mx = av_clip(mx, extra_width - 6*x, h_edge_pos - 6*x); 00364 my = av_clip(my, extra_width - 6*y, v_edge_pos - 6*y); 00365 00366 /* get (optional) motion vector differential */ 00367 if (mode == PREDICT_MODE) { 00368 dx = dy = 0; 00369 } else { 00370 dy = svq3_get_se_golomb(&s->gb); 00371 dx = svq3_get_se_golomb(&s->gb); 00372 00373 if (dx == INVALID_VLC || dy == INVALID_VLC) { 00374 av_log(h->s.avctx, AV_LOG_ERROR, "invalid MV vlc\n"); 00375 return -1; 00376 } 00377 } 00378 00379 /* compute motion vector */ 00380 if (mode == THIRDPEL_MODE) { 00381 int fx, fy; 00382 mx = ((mx + 1)>>1) + dx; 00383 my = ((my + 1)>>1) + dy; 00384 fx = ((unsigned)(mx + 0x3000))/3 - 0x1000; 00385 fy = ((unsigned)(my + 0x3000))/3 - 0x1000; 00386 dxy = (mx - 3*fx) + 4*(my - 3*fy); 00387 00388 svq3_mc_dir_part(s, x, y, part_width, part_height, fx, fy, dxy, 1, dir, avg); 00389 mx += mx; 00390 my += my; 00391 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) { 00392 mx = ((unsigned)(mx + 1 + 0x3000))/3 + dx - 0x1000; 00393 my = ((unsigned)(my + 1 + 0x3000))/3 + dy - 0x1000; 00394 dxy = (mx&1) + 2*(my&1); 00395 00396 svq3_mc_dir_part(s, x, y, part_width, part_height, mx>>1, my>>1, dxy, 0, dir, avg); 00397 mx *= 3; 00398 my *= 3; 00399 } else { 00400 mx = ((unsigned)(mx + 3 + 0x6000))/6 + dx - 0x1000; 00401 my = ((unsigned)(my + 3 + 0x6000))/6 + dy - 0x1000; 00402 00403 svq3_mc_dir_part(s, x, y, part_width, part_height, mx, my, 0, 0, dir, avg); 00404 mx *= 6; 00405 my *= 6; 00406 } 00407 00408 /* update mv_cache */ 00409 if (mode != PREDICT_MODE) { 00410 int32_t mv = pack16to32(mx,my); 00411 00412 if (part_height == 8 && i < 8) { 00413 *(int32_t *) h->mv_cache[dir][scan8[k] + 1*8] = mv; 00414 00415 if (part_width == 8 && j < 8) { 00416 *(int32_t *) h->mv_cache[dir][scan8[k] + 1 + 1*8] = mv; 00417 } 00418 } 00419 if (part_width == 8 && j < 8) { 00420 *(int32_t *) h->mv_cache[dir][scan8[k] + 1] = mv; 00421 } 00422 if (part_width == 4 || part_height == 4) { 00423 *(int32_t *) h->mv_cache[dir][scan8[k]] = mv; 00424 } 00425 } 00426 00427 /* write back motion vectors */ 00428 fill_rectangle(s->current_picture.motion_val[dir][b_xy], part_width>>2, part_height>>2, h->b_stride, pack16to32(mx,my), 4); 00429 } 00430 } 00431 00432 return 0; 00433 } 00434 00435 static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) 00436 { 00437 H264Context *h = &svq3->h; 00438 int i, j, k, m, dir, mode; 00439 int cbp = 0; 00440 uint32_t vlc; 00441 int8_t *top, *left; 00442 MpegEncContext *const s = (MpegEncContext *) h; 00443 const int mb_xy = h->mb_xy; 00444 const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride; 00445 00446 h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF; 00447 h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF; 00448 h->topright_samples_available = 0xFFFF; 00449 00450 if (mb_type == 0) { /* SKIP */ 00451 if (s->pict_type == AV_PICTURE_TYPE_P || s->next_picture.mb_type[mb_xy] == -1) { 00452 svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0); 00453 00454 if (s->pict_type == AV_PICTURE_TYPE_B) { 00455 svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 1, 1); 00456 } 00457 00458 mb_type = MB_TYPE_SKIP; 00459 } else { 00460 mb_type = FFMIN(s->next_picture.mb_type[mb_xy], 6); 00461 if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0) 00462 return -1; 00463 if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0) 00464 return -1; 00465 00466 mb_type = MB_TYPE_16x16; 00467 } 00468 } else if (mb_type < 8) { /* INTER */ 00469 if (svq3->thirdpel_flag && svq3->halfpel_flag == !get_bits1 (&s->gb)) { 00470 mode = THIRDPEL_MODE; 00471 } else if (svq3->halfpel_flag && svq3->thirdpel_flag == !get_bits1 (&s->gb)) { 00472 mode = HALFPEL_MODE; 00473 } else { 00474 mode = FULLPEL_MODE; 00475 } 00476 00477 /* fill caches */ 00478 /* note ref_cache should contain here: 00479 ???????? 00480 ???11111 00481 N??11111 00482 N??11111 00483 N??11111 00484 */ 00485 00486 for (m = 0; m < 2; m++) { 00487 if (s->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1]+6] != -1) { 00488 for (i = 0; i < 4; i++) { 00489 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - 1 + i*h->b_stride]; 00490 } 00491 } else { 00492 for (i = 0; i < 4; i++) { 00493 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = 0; 00494 } 00495 } 00496 if (s->mb_y > 0) { 00497 memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t)); 00498 memset(&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4); 00499 00500 if (s->mb_x < (s->mb_width - 1)) { 00501 *(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride + 4]; 00502 h->ref_cache[m][scan8[0] + 4 - 1*8] = 00503 (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride + 1]+6] == -1 || 00504 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride ] ] == -1) ? PART_NOT_AVAILABLE : 1; 00505 }else 00506 h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE; 00507 if (s->mb_x > 0) { 00508 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride - 1]; 00509 h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1]+3] == -1) ? PART_NOT_AVAILABLE : 1; 00510 }else 00511 h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE; 00512 }else 00513 memset(&h->ref_cache[m][scan8[0] - 1*8 - 1], PART_NOT_AVAILABLE, 8); 00514 00515 if (s->pict_type != AV_PICTURE_TYPE_B) 00516 break; 00517 } 00518 00519 /* decode motion vector(s) and form prediction(s) */ 00520 if (s->pict_type == AV_PICTURE_TYPE_P) { 00521 if (svq3_mc_dir(h, (mb_type - 1), mode, 0, 0) < 0) 00522 return -1; 00523 } else { /* AV_PICTURE_TYPE_B */ 00524 if (mb_type != 2) { 00525 if (svq3_mc_dir(h, 0, mode, 0, 0) < 0) 00526 return -1; 00527 } else { 00528 for (i = 0; i < 4; i++) { 00529 memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); 00530 } 00531 } 00532 if (mb_type != 1) { 00533 if (svq3_mc_dir(h, 0, mode, 1, (mb_type == 3)) < 0) 00534 return -1; 00535 } else { 00536 for (i = 0; i < 4; i++) { 00537 memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); 00538 } 00539 } 00540 } 00541 00542 mb_type = MB_TYPE_16x16; 00543 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */ 00544 memset(h->intra4x4_pred_mode_cache, -1, 8*5*sizeof(int8_t)); 00545 00546 if (mb_type == 8) { 00547 if (s->mb_x > 0) { 00548 for (i = 0; i < 4; i++) { 00549 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i*8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1]+6-i]; 00550 } 00551 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1) { 00552 h->left_samples_available = 0x5F5F; 00553 } 00554 } 00555 if (s->mb_y > 0) { 00556 h->intra4x4_pred_mode_cache[4+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+0]; 00557 h->intra4x4_pred_mode_cache[5+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+1]; 00558 h->intra4x4_pred_mode_cache[6+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+2]; 00559 h->intra4x4_pred_mode_cache[7+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+3]; 00560 00561 if (h->intra4x4_pred_mode_cache[4+8*0] == -1) { 00562 h->top_samples_available = 0x33FF; 00563 } 00564 } 00565 00566 /* decode prediction codes for luma blocks */ 00567 for (i = 0; i < 16; i+=2) { 00568 vlc = svq3_get_ue_golomb(&s->gb); 00569 00570 if (vlc >= 25){ 00571 av_log(h->s.avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc); 00572 return -1; 00573 } 00574 00575 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1]; 00576 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8]; 00577 00578 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]]; 00579 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]]; 00580 00581 if (left[1] == -1 || left[2] == -1){ 00582 av_log(h->s.avctx, AV_LOG_ERROR, "weird prediction\n"); 00583 return -1; 00584 } 00585 } 00586 } else { /* mb_type == 33, DC_128_PRED block type */ 00587 for (i = 0; i < 4; i++) { 00588 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_PRED, 4); 00589 } 00590 } 00591 00592 ff_h264_write_back_intra_pred_mode(h); 00593 00594 if (mb_type == 8) { 00595 ff_h264_check_intra4x4_pred_mode(h); 00596 00597 h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF; 00598 h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF; 00599 } else { 00600 for (i = 0; i < 4; i++) { 00601 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_128_PRED, 4); 00602 } 00603 00604 h->top_samples_available = 0x33FF; 00605 h->left_samples_available = 0x5F5F; 00606 } 00607 00608 mb_type = MB_TYPE_INTRA4x4; 00609 } else { /* INTRA16x16 */ 00610 dir = i_mb_type_info[mb_type - 8].pred_mode; 00611 dir = (dir >> 1) ^ 3*(dir & 1) ^ 1; 00612 00613 if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) == -1){ 00614 av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n"); 00615 return -1; 00616 } 00617 00618 cbp = i_mb_type_info[mb_type - 8].cbp; 00619 mb_type = MB_TYPE_INTRA16x16; 00620 } 00621 00622 if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) { 00623 for (i = 0; i < 4; i++) { 00624 memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); 00625 } 00626 if (s->pict_type == AV_PICTURE_TYPE_B) { 00627 for (i = 0; i < 4; i++) { 00628 memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); 00629 } 00630 } 00631 } 00632 if (!IS_INTRA4x4(mb_type)) { 00633 memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy], DC_PRED, 8); 00634 } 00635 if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) { 00636 memset(h->non_zero_count_cache + 8, 0, 14*8*sizeof(uint8_t)); 00637 s->dsp.clear_blocks(h->mb+ 0); 00638 s->dsp.clear_blocks(h->mb+384); 00639 } 00640 00641 if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) { 00642 if ((vlc = svq3_get_ue_golomb(&s->gb)) >= 48){ 00643 av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc); 00644 return -1; 00645 } 00646 00647 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc]; 00648 } 00649 if (IS_INTRA16x16(mb_type) || (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) { 00650 s->qscale += svq3_get_se_golomb(&s->gb); 00651 00652 if (s->qscale > 31u){ 00653 av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale); 00654 return -1; 00655 } 00656 } 00657 if (IS_INTRA16x16(mb_type)) { 00658 AV_ZERO128(h->mb_luma_dc[0]+0); 00659 AV_ZERO128(h->mb_luma_dc[0]+8); 00660 if (svq3_decode_block(&s->gb, h->mb_luma_dc, 0, 1)){ 00661 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding intra luma dc\n"); 00662 return -1; 00663 } 00664 } 00665 00666 if (cbp) { 00667 const int index = IS_INTRA16x16(mb_type) ? 1 : 0; 00668 const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1); 00669 00670 for (i = 0; i < 4; i++) { 00671 if ((cbp & (1 << i))) { 00672 for (j = 0; j < 4; j++) { 00673 k = index ? ((j&1) + 2*(i&1) + 2*(j&2) + 4*(i&2)) : (4*i + j); 00674 h->non_zero_count_cache[ scan8[k] ] = 1; 00675 00676 if (svq3_decode_block(&s->gb, &h->mb[16*k], index, type)){ 00677 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding block\n"); 00678 return -1; 00679 } 00680 } 00681 } 00682 } 00683 00684 if ((cbp & 0x30)) { 00685 for (i = 1; i < 3; ++i) { 00686 if (svq3_decode_block(&s->gb, &h->mb[16*16*i], 0, 3)){ 00687 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma dc block\n"); 00688 return -1; 00689 } 00690 } 00691 00692 if ((cbp & 0x20)) { 00693 for (i = 1; i < 3; i++) { 00694 for (j = 0; j < 4; j++) { 00695 k = 16*i + j; 00696 h->non_zero_count_cache[ scan8[k] ] = 1; 00697 00698 if (svq3_decode_block(&s->gb, &h->mb[16*k], 1, 1)){ 00699 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma ac block\n"); 00700 return -1; 00701 } 00702 } 00703 } 00704 } 00705 } 00706 } 00707 00708 h->cbp= cbp; 00709 s->current_picture.mb_type[mb_xy] = mb_type; 00710 00711 if (IS_INTRA(mb_type)) { 00712 h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1); 00713 } 00714 00715 return 0; 00716 } 00717 00718 static int svq3_decode_slice_header(AVCodecContext *avctx) 00719 { 00720 SVQ3Context *svq3 = avctx->priv_data; 00721 H264Context *h = &svq3->h; 00722 MpegEncContext *s = &h->s; 00723 const int mb_xy = h->mb_xy; 00724 int i, header; 00725 00726 header = get_bits(&s->gb, 8); 00727 00728 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) { 00729 /* TODO: what? */ 00730 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header); 00731 return -1; 00732 } else { 00733 int length = (header >> 5) & 3; 00734 00735 svq3->next_slice_index = get_bits_count(&s->gb) + 8*show_bits(&s->gb, 8*length) + 8*length; 00736 00737 if (svq3->next_slice_index > s->gb.size_in_bits) { 00738 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n"); 00739 return -1; 00740 } 00741 00742 s->gb.size_in_bits = svq3->next_slice_index - 8*(length - 1); 00743 skip_bits(&s->gb, 8); 00744 00745 if (svq3->watermark_key) { 00746 uint32_t header = AV_RL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1]); 00747 AV_WL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1], header ^ svq3->watermark_key); 00748 } 00749 if (length > 0) { 00750 memcpy((uint8_t *) &s->gb.buffer[get_bits_count(&s->gb) >> 3], 00751 &s->gb.buffer[s->gb.size_in_bits >> 3], (length - 1)); 00752 } 00753 skip_bits_long(&s->gb, 0); 00754 } 00755 00756 if ((i = svq3_get_ue_golomb(&s->gb)) == INVALID_VLC || i >= 3){ 00757 av_log(h->s.avctx, AV_LOG_ERROR, "illegal slice type %d \n", i); 00758 return -1; 00759 } 00760 00761 h->slice_type = golomb_to_pict_type[i]; 00762 00763 if ((header & 0x9F) == 2) { 00764 i = (s->mb_num < 64) ? 6 : (1 + av_log2 (s->mb_num - 1)); 00765 s->mb_skip_run = get_bits(&s->gb, i) - (s->mb_x + (s->mb_y * s->mb_width)); 00766 } else { 00767 skip_bits1(&s->gb); 00768 s->mb_skip_run = 0; 00769 } 00770 00771 h->slice_num = get_bits(&s->gb, 8); 00772 s->qscale = get_bits(&s->gb, 5); 00773 s->adaptive_quant = get_bits1(&s->gb); 00774 00775 /* unknown fields */ 00776 skip_bits1(&s->gb); 00777 00778 if (svq3->unknown_flag) { 00779 skip_bits1(&s->gb); 00780 } 00781 00782 skip_bits1(&s->gb); 00783 skip_bits(&s->gb, 2); 00784 00785 while (get_bits1(&s->gb)) { 00786 skip_bits(&s->gb, 8); 00787 } 00788 00789 /* reset intra predictors and invalidate motion vector references */ 00790 if (s->mb_x > 0) { 00791 memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy - 1 ]+3, -1, 4*sizeof(int8_t)); 00792 memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy - s->mb_x] , -1, 8*sizeof(int8_t)*s->mb_x); 00793 } 00794 if (s->mb_y > 0) { 00795 memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy - s->mb_stride], -1, 8*sizeof(int8_t)*(s->mb_width - s->mb_x)); 00796 00797 if (s->mb_x > 0) { 00798 h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1]+3] = -1; 00799 } 00800 } 00801 00802 return 0; 00803 } 00804 00805 static av_cold int svq3_decode_init(AVCodecContext *avctx) 00806 { 00807 SVQ3Context *svq3 = avctx->priv_data; 00808 H264Context *h = &svq3->h; 00809 MpegEncContext *s = &h->s; 00810 int m; 00811 unsigned char *extradata; 00812 unsigned char *extradata_end; 00813 unsigned int size; 00814 int marker_found = 0; 00815 00816 if (ff_h264_decode_init(avctx) < 0) 00817 return -1; 00818 00819 s->flags = avctx->flags; 00820 s->flags2 = avctx->flags2; 00821 s->unrestricted_mv = 1; 00822 h->is_complex=1; 00823 avctx->pix_fmt = avctx->codec->pix_fmts[0]; 00824 00825 if (!s->context_initialized) { 00826 h->chroma_qp[0] = h->chroma_qp[1] = 4; 00827 00828 svq3->halfpel_flag = 1; 00829 svq3->thirdpel_flag = 1; 00830 svq3->unknown_flag = 0; 00831 00832 /* prowl for the "SEQH" marker in the extradata */ 00833 extradata = (unsigned char *)avctx->extradata; 00834 extradata_end = avctx->extradata + avctx->extradata_size; 00835 if (extradata) { 00836 for (m = 0; m + 8 < avctx->extradata_size; m++) { 00837 if (!memcmp(extradata, "SEQH", 4)) { 00838 marker_found = 1; 00839 break; 00840 } 00841 extradata++; 00842 } 00843 } 00844 00845 /* if a match was found, parse the extra data */ 00846 if (marker_found) { 00847 00848 GetBitContext gb; 00849 int frame_size_code; 00850 00851 size = AV_RB32(&extradata[4]); 00852 if (size > extradata_end - extradata - 8) 00853 return AVERROR_INVALIDDATA; 00854 init_get_bits(&gb, extradata + 8, size*8); 00855 00856 /* 'frame size code' and optional 'width, height' */ 00857 frame_size_code = get_bits(&gb, 3); 00858 switch (frame_size_code) { 00859 case 0: avctx->width = 160; avctx->height = 120; break; 00860 case 1: avctx->width = 128; avctx->height = 96; break; 00861 case 2: avctx->width = 176; avctx->height = 144; break; 00862 case 3: avctx->width = 352; avctx->height = 288; break; 00863 case 4: avctx->width = 704; avctx->height = 576; break; 00864 case 5: avctx->width = 240; avctx->height = 180; break; 00865 case 6: avctx->width = 320; avctx->height = 240; break; 00866 case 7: 00867 avctx->width = get_bits(&gb, 12); 00868 avctx->height = get_bits(&gb, 12); 00869 break; 00870 } 00871 00872 svq3->halfpel_flag = get_bits1(&gb); 00873 svq3->thirdpel_flag = get_bits1(&gb); 00874 00875 /* unknown fields */ 00876 skip_bits1(&gb); 00877 skip_bits1(&gb); 00878 skip_bits1(&gb); 00879 skip_bits1(&gb); 00880 00881 s->low_delay = get_bits1(&gb); 00882 00883 /* unknown field */ 00884 skip_bits1(&gb); 00885 00886 while (get_bits1(&gb)) { 00887 skip_bits(&gb, 8); 00888 } 00889 00890 svq3->unknown_flag = get_bits1(&gb); 00891 avctx->has_b_frames = !s->low_delay; 00892 if (svq3->unknown_flag) { 00893 #if CONFIG_ZLIB 00894 unsigned watermark_width = svq3_get_ue_golomb(&gb); 00895 unsigned watermark_height = svq3_get_ue_golomb(&gb); 00896 int u1 = svq3_get_ue_golomb(&gb); 00897 int u2 = get_bits(&gb, 8); 00898 int u3 = get_bits(&gb, 2); 00899 int u4 = svq3_get_ue_golomb(&gb); 00900 unsigned long buf_len = watermark_width*watermark_height*4; 00901 int offset = (get_bits_count(&gb)+7)>>3; 00902 uint8_t *buf; 00903 00904 if ((uint64_t)watermark_width*4 > UINT_MAX/watermark_height) 00905 return -1; 00906 00907 buf = av_malloc(buf_len); 00908 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n", watermark_width, watermark_height); 00909 av_log(avctx, AV_LOG_DEBUG, "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n", u1, u2, u3, u4, offset); 00910 if (uncompress(buf, &buf_len, extradata + 8 + offset, size - offset) != Z_OK) { 00911 av_log(avctx, AV_LOG_ERROR, "could not uncompress watermark logo\n"); 00912 av_free(buf); 00913 return -1; 00914 } 00915 svq3->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0); 00916 svq3->watermark_key = svq3->watermark_key << 16 | svq3->watermark_key; 00917 av_log(avctx, AV_LOG_DEBUG, "watermark key %#x\n", svq3->watermark_key); 00918 av_free(buf); 00919 #else 00920 av_log(avctx, AV_LOG_ERROR, "this svq3 file contains watermark which need zlib support compiled in\n"); 00921 return -1; 00922 #endif 00923 } 00924 } 00925 00926 s->width = avctx->width; 00927 s->height = avctx->height; 00928 00929 if (MPV_common_init(s) < 0) 00930 return -1; 00931 00932 h->b_stride = 4*s->mb_width; 00933 00934 ff_h264_alloc_tables(h); 00935 } 00936 00937 return 0; 00938 } 00939 00940 static int svq3_decode_frame(AVCodecContext *avctx, 00941 void *data, int *data_size, 00942 AVPacket *avpkt) 00943 { 00944 const uint8_t *buf = avpkt->data; 00945 SVQ3Context *svq3 = avctx->priv_data; 00946 H264Context *h = &svq3->h; 00947 MpegEncContext *s = &h->s; 00948 int buf_size = avpkt->size; 00949 int m, mb_type; 00950 00951 /* special case for last picture */ 00952 if (buf_size == 0) { 00953 if (s->next_picture_ptr && !s->low_delay) { 00954 *(AVFrame *) data = *(AVFrame *) &s->next_picture; 00955 s->next_picture_ptr = NULL; 00956 *data_size = sizeof(AVFrame); 00957 } 00958 return 0; 00959 } 00960 00961 init_get_bits (&s->gb, buf, 8*buf_size); 00962 00963 s->mb_x = s->mb_y = h->mb_xy = 0; 00964 00965 if (svq3_decode_slice_header(avctx)) 00966 return -1; 00967 00968 s->pict_type = h->slice_type; 00969 s->picture_number = h->slice_num; 00970 00971 if (avctx->debug&FF_DEBUG_PICT_INFO){ 00972 av_log(h->s.avctx, AV_LOG_DEBUG, "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n", 00973 av_get_picture_type_char(s->pict_type), svq3->halfpel_flag, svq3->thirdpel_flag, 00974 s->adaptive_quant, s->qscale, h->slice_num); 00975 } 00976 00977 /* for skipping the frame */ 00978 s->current_picture.pict_type = s->pict_type; 00979 s->current_picture.key_frame = (s->pict_type == AV_PICTURE_TYPE_I); 00980 00981 /* Skip B-frames if we do not have reference frames. */ 00982 if (s->last_picture_ptr == NULL && s->pict_type == AV_PICTURE_TYPE_B) 00983 return 0; 00984 if ( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) 00985 ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) 00986 || avctx->skip_frame >= AVDISCARD_ALL) 00987 return 0; 00988 00989 if (s->next_p_frame_damaged) { 00990 if (s->pict_type == AV_PICTURE_TYPE_B) 00991 return 0; 00992 else 00993 s->next_p_frame_damaged = 0; 00994 } 00995 00996 if (ff_h264_frame_start(h) < 0) 00997 return -1; 00998 00999 if (s->pict_type == AV_PICTURE_TYPE_B) { 01000 h->frame_num_offset = (h->slice_num - h->prev_frame_num); 01001 01002 if (h->frame_num_offset < 0) { 01003 h->frame_num_offset += 256; 01004 } 01005 if (h->frame_num_offset == 0 || h->frame_num_offset >= h->prev_frame_num_offset) { 01006 av_log(h->s.avctx, AV_LOG_ERROR, "error in B-frame picture id\n"); 01007 return -1; 01008 } 01009 } else { 01010 h->prev_frame_num = h->frame_num; 01011 h->frame_num = h->slice_num; 01012 h->prev_frame_num_offset = (h->frame_num - h->prev_frame_num); 01013 01014 if (h->prev_frame_num_offset < 0) { 01015 h->prev_frame_num_offset += 256; 01016 } 01017 } 01018 01019 for (m = 0; m < 2; m++){ 01020 int i; 01021 for (i = 0; i < 4; i++){ 01022 int j; 01023 for (j = -1; j < 4; j++) 01024 h->ref_cache[m][scan8[0] + 8*i + j]= 1; 01025 if (i < 3) 01026 h->ref_cache[m][scan8[0] + 8*i + j]= PART_NOT_AVAILABLE; 01027 } 01028 } 01029 01030 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) { 01031 for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) { 01032 h->mb_xy = s->mb_x + s->mb_y*s->mb_stride; 01033 01034 if ( (get_bits_count(&s->gb) + 7) >= s->gb.size_in_bits && 01035 ((get_bits_count(&s->gb) & 7) == 0 || show_bits(&s->gb, (-get_bits_count(&s->gb) & 7)) == 0)) { 01036 01037 skip_bits(&s->gb, svq3->next_slice_index - get_bits_count(&s->gb)); 01038 s->gb.size_in_bits = 8*buf_size; 01039 01040 if (svq3_decode_slice_header(avctx)) 01041 return -1; 01042 01043 /* TODO: support s->mb_skip_run */ 01044 } 01045 01046 mb_type = svq3_get_ue_golomb(&s->gb); 01047 01048 if (s->pict_type == AV_PICTURE_TYPE_I) { 01049 mb_type += 8; 01050 } else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4) { 01051 mb_type += 4; 01052 } 01053 if ((unsigned)mb_type > 33 || svq3_decode_mb(svq3, mb_type)) { 01054 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y); 01055 return -1; 01056 } 01057 01058 if (mb_type != 0) { 01059 ff_h264_hl_decode_mb (h); 01060 } 01061 01062 if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay) { 01063 s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] = 01064 (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1; 01065 } 01066 } 01067 01068 ff_draw_horiz_band(s, 16*s->mb_y, 16); 01069 } 01070 01071 MPV_frame_end(s); 01072 01073 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { 01074 *(AVFrame *) data = *(AVFrame *) &s->current_picture; 01075 } else { 01076 *(AVFrame *) data = *(AVFrame *) &s->last_picture; 01077 } 01078 01079 /* Do not output the last pic after seeking. */ 01080 if (s->last_picture_ptr || s->low_delay) { 01081 *data_size = sizeof(AVFrame); 01082 } 01083 01084 return buf_size; 01085 } 01086 01087 static int svq3_decode_end(AVCodecContext *avctx) 01088 { 01089 SVQ3Context *svq3 = avctx->priv_data; 01090 H264Context *h = &svq3->h; 01091 MpegEncContext *s = &h->s; 01092 01093 ff_h264_free_context(h); 01094 01095 MPV_common_end(s); 01096 01097 return 0; 01098 } 01099 01100 AVCodec ff_svq3_decoder = { 01101 "svq3", 01102 AVMEDIA_TYPE_VIDEO, 01103 CODEC_ID_SVQ3, 01104 sizeof(SVQ3Context), 01105 svq3_decode_init, 01106 NULL, 01107 svq3_decode_end, 01108 svq3_decode_frame, 01109 CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_DELAY, 01110 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"), 01111 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_NONE}, 01112 };