Libav
|
00001 /* 00002 * Copyright (C) 2003-2004 the ffmpeg project 00003 * 00004 * This file is part of FFmpeg. 00005 * 00006 * FFmpeg is free software; you can redistribute it and/or 00007 * modify it under the terms of the GNU Lesser General Public 00008 * License as published by the Free Software Foundation; either 00009 * version 2.1 of the License, or (at your option) any later version. 00010 * 00011 * FFmpeg is distributed in the hope that it will be useful, 00012 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00014 * Lesser General Public License for more details. 00015 * 00016 * You should have received a copy of the GNU Lesser General Public 00017 * License along with FFmpeg; if not, write to the Free Software 00018 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00019 */ 00020 00032 #include <stdio.h> 00033 #include <stdlib.h> 00034 #include <string.h> 00035 00036 #include "avcodec.h" 00037 #include "dsputil.h" 00038 #include "get_bits.h" 00039 00040 #include "vp3data.h" 00041 #include "xiph.h" 00042 00043 #define FRAGMENT_PIXELS 8 00044 00045 static av_cold int vp3_decode_end(AVCodecContext *avctx); 00046 00047 //FIXME split things out into their own arrays 00048 typedef struct Vp3Fragment { 00049 int16_t dc; 00050 uint8_t coding_method; 00051 uint8_t qpi; 00052 } Vp3Fragment; 00053 00054 #define SB_NOT_CODED 0 00055 #define SB_PARTIALLY_CODED 1 00056 #define SB_FULLY_CODED 2 00057 00058 // This is the maximum length of a single long bit run that can be encoded 00059 // for superblock coding or block qps. Theora special-cases this to read a 00060 // bit instead of flipping the current bit to allow for runs longer than 4129. 00061 #define MAXIMUM_LONG_BIT_RUN 4129 00062 00063 #define MODE_INTER_NO_MV 0 00064 #define MODE_INTRA 1 00065 #define MODE_INTER_PLUS_MV 2 00066 #define MODE_INTER_LAST_MV 3 00067 #define MODE_INTER_PRIOR_LAST 4 00068 #define MODE_USING_GOLDEN 5 00069 #define MODE_GOLDEN_MV 6 00070 #define MODE_INTER_FOURMV 7 00071 #define CODING_MODE_COUNT 8 00072 00073 /* special internal mode */ 00074 #define MODE_COPY 8 00075 00076 /* There are 6 preset schemes, plus a free-form scheme */ 00077 static const int ModeAlphabet[6][CODING_MODE_COUNT] = 00078 { 00079 /* scheme 1: Last motion vector dominates */ 00080 { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, 00081 MODE_INTER_PLUS_MV, MODE_INTER_NO_MV, 00082 MODE_INTRA, MODE_USING_GOLDEN, 00083 MODE_GOLDEN_MV, MODE_INTER_FOURMV }, 00084 00085 /* scheme 2 */ 00086 { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, 00087 MODE_INTER_NO_MV, MODE_INTER_PLUS_MV, 00088 MODE_INTRA, MODE_USING_GOLDEN, 00089 MODE_GOLDEN_MV, MODE_INTER_FOURMV }, 00090 00091 /* scheme 3 */ 00092 { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV, 00093 MODE_INTER_PRIOR_LAST, MODE_INTER_NO_MV, 00094 MODE_INTRA, MODE_USING_GOLDEN, 00095 MODE_GOLDEN_MV, MODE_INTER_FOURMV }, 00096 00097 /* scheme 4 */ 00098 { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV, 00099 MODE_INTER_NO_MV, MODE_INTER_PRIOR_LAST, 00100 MODE_INTRA, MODE_USING_GOLDEN, 00101 MODE_GOLDEN_MV, MODE_INTER_FOURMV }, 00102 00103 /* scheme 5: No motion vector dominates */ 00104 { MODE_INTER_NO_MV, MODE_INTER_LAST_MV, 00105 MODE_INTER_PRIOR_LAST, MODE_INTER_PLUS_MV, 00106 MODE_INTRA, MODE_USING_GOLDEN, 00107 MODE_GOLDEN_MV, MODE_INTER_FOURMV }, 00108 00109 /* scheme 6 */ 00110 { MODE_INTER_NO_MV, MODE_USING_GOLDEN, 00111 MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, 00112 MODE_INTER_PLUS_MV, MODE_INTRA, 00113 MODE_GOLDEN_MV, MODE_INTER_FOURMV }, 00114 00115 }; 00116 00117 static const uint8_t hilbert_offset[16][2] = { 00118 {0,0}, {1,0}, {1,1}, {0,1}, 00119 {0,2}, {0,3}, {1,3}, {1,2}, 00120 {2,2}, {2,3}, {3,3}, {3,2}, 00121 {3,1}, {2,1}, {2,0}, {3,0} 00122 }; 00123 00124 #define MIN_DEQUANT_VAL 2 00125 00126 typedef struct Vp3DecodeContext { 00127 AVCodecContext *avctx; 00128 int theora, theora_tables; 00129 int version; 00130 int width, height; 00131 int chroma_x_shift, chroma_y_shift; 00132 AVFrame golden_frame; 00133 AVFrame last_frame; 00134 AVFrame current_frame; 00135 int keyframe; 00136 DSPContext dsp; 00137 int flipped_image; 00138 int last_slice_end; 00139 00140 int qps[3]; 00141 int nqps; 00142 int last_qps[3]; 00143 00144 int superblock_count; 00145 int y_superblock_width; 00146 int y_superblock_height; 00147 int y_superblock_count; 00148 int c_superblock_width; 00149 int c_superblock_height; 00150 int c_superblock_count; 00151 int u_superblock_start; 00152 int v_superblock_start; 00153 unsigned char *superblock_coding; 00154 00155 int macroblock_count; 00156 int macroblock_width; 00157 int macroblock_height; 00158 00159 int fragment_count; 00160 int fragment_width[2]; 00161 int fragment_height[2]; 00162 00163 Vp3Fragment *all_fragments; 00164 int fragment_start[3]; 00165 int data_offset[3]; 00166 00167 int8_t (*motion_val[2])[2]; 00168 00169 ScanTable scantable; 00170 00171 /* tables */ 00172 uint16_t coded_dc_scale_factor[64]; 00173 uint32_t coded_ac_scale_factor[64]; 00174 uint8_t base_matrix[384][64]; 00175 uint8_t qr_count[2][3]; 00176 uint8_t qr_size [2][3][64]; 00177 uint16_t qr_base[2][3][64]; 00178 00196 int16_t *dct_tokens[3][64]; 00197 int16_t *dct_tokens_base; 00198 #define TOKEN_EOB(eob_run) ((eob_run) << 2) 00199 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) << 9) + ((zero_run) << 2) + 1) 00200 #define TOKEN_COEFF(coeff) (((coeff) << 2) + 2) 00201 00205 int num_coded_frags[3][64]; 00206 int total_num_coded_frags; 00207 00208 /* this is a list of indexes into the all_fragments array indicating 00209 * which of the fragments are coded */ 00210 int *coded_fragment_list[3]; 00211 00212 VLC dc_vlc[16]; 00213 VLC ac_vlc_1[16]; 00214 VLC ac_vlc_2[16]; 00215 VLC ac_vlc_3[16]; 00216 VLC ac_vlc_4[16]; 00217 00218 VLC superblock_run_length_vlc; 00219 VLC fragment_run_length_vlc; 00220 VLC mode_code_vlc; 00221 VLC motion_vector_vlc; 00222 00223 /* these arrays need to be on 16-byte boundaries since SSE2 operations 00224 * index into them */ 00225 DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64]; //<qmat[qpi][is_inter][plane] 00226 00227 /* This table contains superblock_count * 16 entries. Each set of 16 00228 * numbers corresponds to the fragment indexes 0..15 of the superblock. 00229 * An entry will be -1 to indicate that no entry corresponds to that 00230 * index. */ 00231 int *superblock_fragments; 00232 00233 /* This is an array that indicates how a particular macroblock 00234 * is coded. */ 00235 unsigned char *macroblock_coding; 00236 00237 uint8_t edge_emu_buffer[9*2048]; //FIXME dynamic alloc 00238 int8_t qscale_table[2048]; //FIXME dynamic alloc (width+15)/16 00239 00240 /* Huffman decode */ 00241 int hti; 00242 unsigned int hbits; 00243 int entries; 00244 int huff_code_size; 00245 uint32_t huffman_table[80][32][2]; 00246 00247 uint8_t filter_limit_values[64]; 00248 DECLARE_ALIGNED(8, int, bounding_values_array)[256+2]; 00249 } Vp3DecodeContext; 00250 00251 /************************************************************************ 00252 * VP3 specific functions 00253 ************************************************************************/ 00254 00255 /* 00256 * This function sets up all of the various blocks mappings: 00257 * superblocks <-> fragments, macroblocks <-> fragments, 00258 * superblocks <-> macroblocks 00259 * 00260 * Returns 0 is successful; returns 1 if *anything* went wrong. 00261 */ 00262 static int init_block_mapping(Vp3DecodeContext *s) 00263 { 00264 int sb_x, sb_y, plane; 00265 int x, y, i, j = 0; 00266 00267 for (plane = 0; plane < 3; plane++) { 00268 int sb_width = plane ? s->c_superblock_width : s->y_superblock_width; 00269 int sb_height = plane ? s->c_superblock_height : s->y_superblock_height; 00270 int frag_width = s->fragment_width[!!plane]; 00271 int frag_height = s->fragment_height[!!plane]; 00272 00273 for (sb_y = 0; sb_y < sb_height; sb_y++) 00274 for (sb_x = 0; sb_x < sb_width; sb_x++) 00275 for (i = 0; i < 16; i++) { 00276 x = 4*sb_x + hilbert_offset[i][0]; 00277 y = 4*sb_y + hilbert_offset[i][1]; 00278 00279 if (x < frag_width && y < frag_height) 00280 s->superblock_fragments[j++] = s->fragment_start[plane] + y*frag_width + x; 00281 else 00282 s->superblock_fragments[j++] = -1; 00283 } 00284 } 00285 00286 return 0; /* successful path out */ 00287 } 00288 00289 /* 00290 * This function sets up the dequantization tables used for a particular 00291 * frame. 00292 */ 00293 static void init_dequantizer(Vp3DecodeContext *s, int qpi) 00294 { 00295 int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]]; 00296 int dc_scale_factor = s->coded_dc_scale_factor[s->qps[qpi]]; 00297 int i, plane, inter, qri, bmi, bmj, qistart; 00298 00299 for(inter=0; inter<2; inter++){ 00300 for(plane=0; plane<3; plane++){ 00301 int sum=0; 00302 for(qri=0; qri<s->qr_count[inter][plane]; qri++){ 00303 sum+= s->qr_size[inter][plane][qri]; 00304 if(s->qps[qpi] <= sum) 00305 break; 00306 } 00307 qistart= sum - s->qr_size[inter][plane][qri]; 00308 bmi= s->qr_base[inter][plane][qri ]; 00309 bmj= s->qr_base[inter][plane][qri+1]; 00310 for(i=0; i<64; i++){ 00311 int coeff= ( 2*(sum -s->qps[qpi])*s->base_matrix[bmi][i] 00312 - 2*(qistart-s->qps[qpi])*s->base_matrix[bmj][i] 00313 + s->qr_size[inter][plane][qri]) 00314 / (2*s->qr_size[inter][plane][qri]); 00315 00316 int qmin= 8<<(inter + !i); 00317 int qscale= i ? ac_scale_factor : dc_scale_factor; 00318 00319 s->qmat[qpi][inter][plane][s->dsp.idct_permutation[i]]= av_clip((qscale * coeff)/100 * 4, qmin, 4096); 00320 } 00321 // all DC coefficients use the same quant so as not to interfere with DC prediction 00322 s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0]; 00323 } 00324 } 00325 00326 memset(s->qscale_table, (FFMAX(s->qmat[0][0][0][1], s->qmat[0][0][1][1])+8)/16, 512); //FIXME finetune 00327 } 00328 00329 /* 00330 * This function initializes the loop filter boundary limits if the frame's 00331 * quality index is different from the previous frame's. 00332 * 00333 * The filter_limit_values may not be larger than 127. 00334 */ 00335 static void init_loop_filter(Vp3DecodeContext *s) 00336 { 00337 int *bounding_values= s->bounding_values_array+127; 00338 int filter_limit; 00339 int x; 00340 int value; 00341 00342 filter_limit = s->filter_limit_values[s->qps[0]]; 00343 00344 /* set up the bounding values */ 00345 memset(s->bounding_values_array, 0, 256 * sizeof(int)); 00346 for (x = 0; x < filter_limit; x++) { 00347 bounding_values[-x] = -x; 00348 bounding_values[x] = x; 00349 } 00350 for (x = value = filter_limit; x < 128 && value; x++, value--) { 00351 bounding_values[ x] = value; 00352 bounding_values[-x] = -value; 00353 } 00354 if (value) 00355 bounding_values[128] = value; 00356 bounding_values[129] = bounding_values[130] = filter_limit * 0x02020202; 00357 } 00358 00359 /* 00360 * This function unpacks all of the superblock/macroblock/fragment coding 00361 * information from the bitstream. 00362 */ 00363 static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) 00364 { 00365 int superblock_starts[3] = { 0, s->u_superblock_start, s->v_superblock_start }; 00366 int bit = 0; 00367 int current_superblock = 0; 00368 int current_run = 0; 00369 int num_partial_superblocks = 0; 00370 00371 int i, j; 00372 int current_fragment; 00373 int plane; 00374 00375 if (s->keyframe) { 00376 memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count); 00377 00378 } else { 00379 00380 /* unpack the list of partially-coded superblocks */ 00381 bit = get_bits1(gb); 00382 while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) { 00383 current_run = get_vlc2(gb, 00384 s->superblock_run_length_vlc.table, 6, 2) + 1; 00385 if (current_run == 34) 00386 current_run += get_bits(gb, 12); 00387 00388 if (current_superblock + current_run > s->superblock_count) { 00389 av_log(s->avctx, AV_LOG_ERROR, "Invalid partially coded superblock run length\n"); 00390 return -1; 00391 } 00392 00393 memset(s->superblock_coding + current_superblock, bit, current_run); 00394 00395 current_superblock += current_run; 00396 if (bit) 00397 num_partial_superblocks += current_run; 00398 00399 if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN) 00400 bit = get_bits1(gb); 00401 else 00402 bit ^= 1; 00403 } 00404 00405 /* unpack the list of fully coded superblocks if any of the blocks were 00406 * not marked as partially coded in the previous step */ 00407 if (num_partial_superblocks < s->superblock_count) { 00408 int superblocks_decoded = 0; 00409 00410 current_superblock = 0; 00411 bit = get_bits1(gb); 00412 while (superblocks_decoded < s->superblock_count - num_partial_superblocks 00413 && get_bits_left(gb) > 0) { 00414 current_run = get_vlc2(gb, 00415 s->superblock_run_length_vlc.table, 6, 2) + 1; 00416 if (current_run == 34) 00417 current_run += get_bits(gb, 12); 00418 00419 for (j = 0; j < current_run; current_superblock++) { 00420 if (current_superblock >= s->superblock_count) { 00421 av_log(s->avctx, AV_LOG_ERROR, "Invalid fully coded superblock run length\n"); 00422 return -1; 00423 } 00424 00425 /* skip any superblocks already marked as partially coded */ 00426 if (s->superblock_coding[current_superblock] == SB_NOT_CODED) { 00427 s->superblock_coding[current_superblock] = 2*bit; 00428 j++; 00429 } 00430 } 00431 superblocks_decoded += current_run; 00432 00433 if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN) 00434 bit = get_bits1(gb); 00435 else 00436 bit ^= 1; 00437 } 00438 } 00439 00440 /* if there were partial blocks, initialize bitstream for 00441 * unpacking fragment codings */ 00442 if (num_partial_superblocks) { 00443 00444 current_run = 0; 00445 bit = get_bits1(gb); 00446 /* toggle the bit because as soon as the first run length is 00447 * fetched the bit will be toggled again */ 00448 bit ^= 1; 00449 } 00450 } 00451 00452 /* figure out which fragments are coded; iterate through each 00453 * superblock (all planes) */ 00454 s->total_num_coded_frags = 0; 00455 memset(s->macroblock_coding, MODE_COPY, s->macroblock_count); 00456 00457 for (plane = 0; plane < 3; plane++) { 00458 int sb_start = superblock_starts[plane]; 00459 int sb_end = sb_start + (plane ? s->c_superblock_count : s->y_superblock_count); 00460 int num_coded_frags = 0; 00461 00462 for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) { 00463 00464 /* iterate through all 16 fragments in a superblock */ 00465 for (j = 0; j < 16; j++) { 00466 00467 /* if the fragment is in bounds, check its coding status */ 00468 current_fragment = s->superblock_fragments[i * 16 + j]; 00469 if (current_fragment != -1) { 00470 int coded = s->superblock_coding[i]; 00471 00472 if (s->superblock_coding[i] == SB_PARTIALLY_CODED) { 00473 00474 /* fragment may or may not be coded; this is the case 00475 * that cares about the fragment coding runs */ 00476 if (current_run-- == 0) { 00477 bit ^= 1; 00478 current_run = get_vlc2(gb, 00479 s->fragment_run_length_vlc.table, 5, 2); 00480 } 00481 coded = bit; 00482 } 00483 00484 if (coded) { 00485 /* default mode; actual mode will be decoded in 00486 * the next phase */ 00487 s->all_fragments[current_fragment].coding_method = 00488 MODE_INTER_NO_MV; 00489 s->coded_fragment_list[plane][num_coded_frags++] = 00490 current_fragment; 00491 } else { 00492 /* not coded; copy this fragment from the prior frame */ 00493 s->all_fragments[current_fragment].coding_method = 00494 MODE_COPY; 00495 } 00496 } 00497 } 00498 } 00499 s->total_num_coded_frags += num_coded_frags; 00500 for (i = 0; i < 64; i++) 00501 s->num_coded_frags[plane][i] = num_coded_frags; 00502 if (plane < 2) 00503 s->coded_fragment_list[plane+1] = s->coded_fragment_list[plane] + num_coded_frags; 00504 } 00505 return 0; 00506 } 00507 00508 /* 00509 * This function unpacks all the coding mode data for individual macroblocks 00510 * from the bitstream. 00511 */ 00512 static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) 00513 { 00514 int i, j, k, sb_x, sb_y; 00515 int scheme; 00516 int current_macroblock; 00517 int current_fragment; 00518 int coding_mode; 00519 int custom_mode_alphabet[CODING_MODE_COUNT]; 00520 const int *alphabet; 00521 Vp3Fragment *frag; 00522 00523 if (s->keyframe) { 00524 for (i = 0; i < s->fragment_count; i++) 00525 s->all_fragments[i].coding_method = MODE_INTRA; 00526 00527 } else { 00528 00529 /* fetch the mode coding scheme for this frame */ 00530 scheme = get_bits(gb, 3); 00531 00532 /* is it a custom coding scheme? */ 00533 if (scheme == 0) { 00534 for (i = 0; i < 8; i++) 00535 custom_mode_alphabet[i] = MODE_INTER_NO_MV; 00536 for (i = 0; i < 8; i++) 00537 custom_mode_alphabet[get_bits(gb, 3)] = i; 00538 alphabet = custom_mode_alphabet; 00539 } else 00540 alphabet = ModeAlphabet[scheme-1]; 00541 00542 /* iterate through all of the macroblocks that contain 1 or more 00543 * coded fragments */ 00544 for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) { 00545 for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) { 00546 if (get_bits_left(gb) <= 0) 00547 return -1; 00548 00549 for (j = 0; j < 4; j++) { 00550 int mb_x = 2*sb_x + (j>>1); 00551 int mb_y = 2*sb_y + (((j>>1)+j)&1); 00552 current_macroblock = mb_y * s->macroblock_width + mb_x; 00553 00554 if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height) 00555 continue; 00556 00557 #define BLOCK_X (2*mb_x + (k&1)) 00558 #define BLOCK_Y (2*mb_y + (k>>1)) 00559 /* coding modes are only stored if the macroblock has at least one 00560 * luma block coded, otherwise it must be INTER_NO_MV */ 00561 for (k = 0; k < 4; k++) { 00562 current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X; 00563 if (s->all_fragments[current_fragment].coding_method != MODE_COPY) 00564 break; 00565 } 00566 if (k == 4) { 00567 s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV; 00568 continue; 00569 } 00570 00571 /* mode 7 means get 3 bits for each coding mode */ 00572 if (scheme == 7) 00573 coding_mode = get_bits(gb, 3); 00574 else 00575 coding_mode = alphabet 00576 [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)]; 00577 00578 s->macroblock_coding[current_macroblock] = coding_mode; 00579 for (k = 0; k < 4; k++) { 00580 frag = s->all_fragments + BLOCK_Y*s->fragment_width[0] + BLOCK_X; 00581 if (frag->coding_method != MODE_COPY) 00582 frag->coding_method = coding_mode; 00583 } 00584 00585 #define SET_CHROMA_MODES \ 00586 if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \ 00587 frag[s->fragment_start[1]].coding_method = coding_mode;\ 00588 if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \ 00589 frag[s->fragment_start[2]].coding_method = coding_mode; 00590 00591 if (s->chroma_y_shift) { 00592 frag = s->all_fragments + mb_y*s->fragment_width[1] + mb_x; 00593 SET_CHROMA_MODES 00594 } else if (s->chroma_x_shift) { 00595 frag = s->all_fragments + 2*mb_y*s->fragment_width[1] + mb_x; 00596 for (k = 0; k < 2; k++) { 00597 SET_CHROMA_MODES 00598 frag += s->fragment_width[1]; 00599 } 00600 } else { 00601 for (k = 0; k < 4; k++) { 00602 frag = s->all_fragments + BLOCK_Y*s->fragment_width[1] + BLOCK_X; 00603 SET_CHROMA_MODES 00604 } 00605 } 00606 } 00607 } 00608 } 00609 } 00610 00611 return 0; 00612 } 00613 00614 /* 00615 * This function unpacks all the motion vectors for the individual 00616 * macroblocks from the bitstream. 00617 */ 00618 static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) 00619 { 00620 int j, k, sb_x, sb_y; 00621 int coding_mode; 00622 int motion_x[4]; 00623 int motion_y[4]; 00624 int last_motion_x = 0; 00625 int last_motion_y = 0; 00626 int prior_last_motion_x = 0; 00627 int prior_last_motion_y = 0; 00628 int current_macroblock; 00629 int current_fragment; 00630 int frag; 00631 00632 if (s->keyframe) 00633 return 0; 00634 00635 /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */ 00636 coding_mode = get_bits1(gb); 00637 00638 /* iterate through all of the macroblocks that contain 1 or more 00639 * coded fragments */ 00640 for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) { 00641 for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) { 00642 if (get_bits_left(gb) <= 0) 00643 return -1; 00644 00645 for (j = 0; j < 4; j++) { 00646 int mb_x = 2*sb_x + (j>>1); 00647 int mb_y = 2*sb_y + (((j>>1)+j)&1); 00648 current_macroblock = mb_y * s->macroblock_width + mb_x; 00649 00650 if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height || 00651 (s->macroblock_coding[current_macroblock] == MODE_COPY)) 00652 continue; 00653 00654 switch (s->macroblock_coding[current_macroblock]) { 00655 00656 case MODE_INTER_PLUS_MV: 00657 case MODE_GOLDEN_MV: 00658 /* all 6 fragments use the same motion vector */ 00659 if (coding_mode == 0) { 00660 motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; 00661 motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; 00662 } else { 00663 motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)]; 00664 motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)]; 00665 } 00666 00667 /* vector maintenance, only on MODE_INTER_PLUS_MV */ 00668 if (s->macroblock_coding[current_macroblock] == 00669 MODE_INTER_PLUS_MV) { 00670 prior_last_motion_x = last_motion_x; 00671 prior_last_motion_y = last_motion_y; 00672 last_motion_x = motion_x[0]; 00673 last_motion_y = motion_y[0]; 00674 } 00675 break; 00676 00677 case MODE_INTER_FOURMV: 00678 /* vector maintenance */ 00679 prior_last_motion_x = last_motion_x; 00680 prior_last_motion_y = last_motion_y; 00681 00682 /* fetch 4 vectors from the bitstream, one for each 00683 * Y fragment, then average for the C fragment vectors */ 00684 for (k = 0; k < 4; k++) { 00685 current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X; 00686 if (s->all_fragments[current_fragment].coding_method != MODE_COPY) { 00687 if (coding_mode == 0) { 00688 motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; 00689 motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; 00690 } else { 00691 motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)]; 00692 motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)]; 00693 } 00694 last_motion_x = motion_x[k]; 00695 last_motion_y = motion_y[k]; 00696 } else { 00697 motion_x[k] = 0; 00698 motion_y[k] = 0; 00699 } 00700 } 00701 break; 00702 00703 case MODE_INTER_LAST_MV: 00704 /* all 6 fragments use the last motion vector */ 00705 motion_x[0] = last_motion_x; 00706 motion_y[0] = last_motion_y; 00707 00708 /* no vector maintenance (last vector remains the 00709 * last vector) */ 00710 break; 00711 00712 case MODE_INTER_PRIOR_LAST: 00713 /* all 6 fragments use the motion vector prior to the 00714 * last motion vector */ 00715 motion_x[0] = prior_last_motion_x; 00716 motion_y[0] = prior_last_motion_y; 00717 00718 /* vector maintenance */ 00719 prior_last_motion_x = last_motion_x; 00720 prior_last_motion_y = last_motion_y; 00721 last_motion_x = motion_x[0]; 00722 last_motion_y = motion_y[0]; 00723 break; 00724 00725 default: 00726 /* covers intra, inter without MV, golden without MV */ 00727 motion_x[0] = 0; 00728 motion_y[0] = 0; 00729 00730 /* no vector maintenance */ 00731 break; 00732 } 00733 00734 /* assign the motion vectors to the correct fragments */ 00735 for (k = 0; k < 4; k++) { 00736 current_fragment = 00737 BLOCK_Y*s->fragment_width[0] + BLOCK_X; 00738 if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { 00739 s->motion_val[0][current_fragment][0] = motion_x[k]; 00740 s->motion_val[0][current_fragment][1] = motion_y[k]; 00741 } else { 00742 s->motion_val[0][current_fragment][0] = motion_x[0]; 00743 s->motion_val[0][current_fragment][1] = motion_y[0]; 00744 } 00745 } 00746 00747 if (s->chroma_y_shift) { 00748 if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { 00749 motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] + motion_x[2] + motion_x[3], 2); 00750 motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] + motion_y[2] + motion_y[3], 2); 00751 } 00752 motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1); 00753 motion_y[0] = (motion_y[0]>>1) | (motion_y[0]&1); 00754 frag = mb_y*s->fragment_width[1] + mb_x; 00755 s->motion_val[1][frag][0] = motion_x[0]; 00756 s->motion_val[1][frag][1] = motion_y[0]; 00757 } else if (s->chroma_x_shift) { 00758 if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { 00759 motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1); 00760 motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1); 00761 motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1); 00762 motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1); 00763 } else { 00764 motion_x[1] = motion_x[0]; 00765 motion_y[1] = motion_y[0]; 00766 } 00767 motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1); 00768 motion_x[1] = (motion_x[1]>>1) | (motion_x[1]&1); 00769 00770 frag = 2*mb_y*s->fragment_width[1] + mb_x; 00771 for (k = 0; k < 2; k++) { 00772 s->motion_val[1][frag][0] = motion_x[k]; 00773 s->motion_val[1][frag][1] = motion_y[k]; 00774 frag += s->fragment_width[1]; 00775 } 00776 } else { 00777 for (k = 0; k < 4; k++) { 00778 frag = BLOCK_Y*s->fragment_width[1] + BLOCK_X; 00779 if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { 00780 s->motion_val[1][frag][0] = motion_x[k]; 00781 s->motion_val[1][frag][1] = motion_y[k]; 00782 } else { 00783 s->motion_val[1][frag][0] = motion_x[0]; 00784 s->motion_val[1][frag][1] = motion_y[0]; 00785 } 00786 } 00787 } 00788 } 00789 } 00790 } 00791 00792 return 0; 00793 } 00794 00795 static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb) 00796 { 00797 int qpi, i, j, bit, run_length, blocks_decoded, num_blocks_at_qpi; 00798 int num_blocks = s->total_num_coded_frags; 00799 00800 for (qpi = 0; qpi < s->nqps-1 && num_blocks > 0; qpi++) { 00801 i = blocks_decoded = num_blocks_at_qpi = 0; 00802 00803 bit = get_bits1(gb); 00804 00805 do { 00806 run_length = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1; 00807 if (run_length == 34) 00808 run_length += get_bits(gb, 12); 00809 blocks_decoded += run_length; 00810 00811 if (!bit) 00812 num_blocks_at_qpi += run_length; 00813 00814 for (j = 0; j < run_length; i++) { 00815 if (i >= s->total_num_coded_frags) 00816 return -1; 00817 00818 if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) { 00819 s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit; 00820 j++; 00821 } 00822 } 00823 00824 if (run_length == MAXIMUM_LONG_BIT_RUN) 00825 bit = get_bits1(gb); 00826 else 00827 bit ^= 1; 00828 } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0); 00829 00830 num_blocks -= num_blocks_at_qpi; 00831 } 00832 00833 return 0; 00834 } 00835 00836 /* 00837 * This function is called by unpack_dct_coeffs() to extract the VLCs from 00838 * the bitstream. The VLCs encode tokens which are used to unpack DCT 00839 * data. This function unpacks all the VLCs for either the Y plane or both 00840 * C planes, and is called for DC coefficients or different AC coefficient 00841 * levels (since different coefficient types require different VLC tables. 00842 * 00843 * This function returns a residual eob run. E.g, if a particular token gave 00844 * instructions to EOB the next 5 fragments and there were only 2 fragments 00845 * left in the current fragment range, 3 would be returned so that it could 00846 * be passed into the next call to this same function. 00847 */ 00848 static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, 00849 VLC *table, int coeff_index, 00850 int plane, 00851 int eob_run) 00852 { 00853 int i, j = 0; 00854 int token; 00855 int zero_run = 0; 00856 DCTELEM coeff = 0; 00857 int bits_to_get; 00858 int blocks_ended; 00859 int coeff_i = 0; 00860 int num_coeffs = s->num_coded_frags[plane][coeff_index]; 00861 int16_t *dct_tokens = s->dct_tokens[plane][coeff_index]; 00862 00863 /* local references to structure members to avoid repeated deferences */ 00864 int *coded_fragment_list = s->coded_fragment_list[plane]; 00865 Vp3Fragment *all_fragments = s->all_fragments; 00866 VLC_TYPE (*vlc_table)[2] = table->table; 00867 00868 if (num_coeffs < 0) 00869 av_log(s->avctx, AV_LOG_ERROR, "Invalid number of coefficents at level %d\n", coeff_index); 00870 00871 if (eob_run > num_coeffs) { 00872 coeff_i = blocks_ended = num_coeffs; 00873 eob_run -= num_coeffs; 00874 } else { 00875 coeff_i = blocks_ended = eob_run; 00876 eob_run = 0; 00877 } 00878 00879 // insert fake EOB token to cover the split between planes or zzi 00880 if (blocks_ended) 00881 dct_tokens[j++] = blocks_ended << 2; 00882 00883 while (coeff_i < num_coeffs && get_bits_left(gb) > 0) { 00884 /* decode a VLC into a token */ 00885 token = get_vlc2(gb, vlc_table, 11, 3); 00886 /* use the token to get a zero run, a coefficient, and an eob run */ 00887 if (token <= 6) { 00888 eob_run = eob_run_base[token]; 00889 if (eob_run_get_bits[token]) 00890 eob_run += get_bits(gb, eob_run_get_bits[token]); 00891 00892 // record only the number of blocks ended in this plane, 00893 // any spill will be recorded in the next plane. 00894 if (eob_run > num_coeffs - coeff_i) { 00895 dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i); 00896 blocks_ended += num_coeffs - coeff_i; 00897 eob_run -= num_coeffs - coeff_i; 00898 coeff_i = num_coeffs; 00899 } else { 00900 dct_tokens[j++] = TOKEN_EOB(eob_run); 00901 blocks_ended += eob_run; 00902 coeff_i += eob_run; 00903 eob_run = 0; 00904 } 00905 } else { 00906 bits_to_get = coeff_get_bits[token]; 00907 if (bits_to_get) 00908 bits_to_get = get_bits(gb, bits_to_get); 00909 coeff = coeff_tables[token][bits_to_get]; 00910 00911 zero_run = zero_run_base[token]; 00912 if (zero_run_get_bits[token]) 00913 zero_run += get_bits(gb, zero_run_get_bits[token]); 00914 00915 if (zero_run) { 00916 dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run); 00917 } else { 00918 // Save DC into the fragment structure. DC prediction is 00919 // done in raster order, so the actual DC can't be in with 00920 // other tokens. We still need the token in dct_tokens[] 00921 // however, or else the structure collapses on itself. 00922 if (!coeff_index) 00923 all_fragments[coded_fragment_list[coeff_i]].dc = coeff; 00924 00925 dct_tokens[j++] = TOKEN_COEFF(coeff); 00926 } 00927 00928 if (coeff_index + zero_run > 64) { 00929 av_log(s->avctx, AV_LOG_DEBUG, "Invalid zero run of %d with" 00930 " %d coeffs left\n", zero_run, 64-coeff_index); 00931 zero_run = 64 - coeff_index; 00932 } 00933 00934 // zero runs code multiple coefficients, 00935 // so don't try to decode coeffs for those higher levels 00936 for (i = coeff_index+1; i <= coeff_index+zero_run; i++) 00937 s->num_coded_frags[plane][i]--; 00938 coeff_i++; 00939 } 00940 } 00941 00942 if (blocks_ended > s->num_coded_frags[plane][coeff_index]) 00943 av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n"); 00944 00945 // decrement the number of blocks that have higher coeffecients for each 00946 // EOB run at this level 00947 if (blocks_ended) 00948 for (i = coeff_index+1; i < 64; i++) 00949 s->num_coded_frags[plane][i] -= blocks_ended; 00950 00951 // setup the next buffer 00952 if (plane < 2) 00953 s->dct_tokens[plane+1][coeff_index] = dct_tokens + j; 00954 else if (coeff_index < 63) 00955 s->dct_tokens[0][coeff_index+1] = dct_tokens + j; 00956 00957 return eob_run; 00958 } 00959 00960 static void reverse_dc_prediction(Vp3DecodeContext *s, 00961 int first_fragment, 00962 int fragment_width, 00963 int fragment_height); 00964 /* 00965 * This function unpacks all of the DCT coefficient data from the 00966 * bitstream. 00967 */ 00968 static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) 00969 { 00970 int i; 00971 int dc_y_table; 00972 int dc_c_table; 00973 int ac_y_table; 00974 int ac_c_table; 00975 int residual_eob_run = 0; 00976 VLC *y_tables[64]; 00977 VLC *c_tables[64]; 00978 00979 s->dct_tokens[0][0] = s->dct_tokens_base; 00980 00981 /* fetch the DC table indexes */ 00982 dc_y_table = get_bits(gb, 4); 00983 dc_c_table = get_bits(gb, 4); 00984 00985 /* unpack the Y plane DC coefficients */ 00986 residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0, 00987 0, residual_eob_run); 00988 00989 /* reverse prediction of the Y-plane DC coefficients */ 00990 reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]); 00991 00992 /* unpack the C plane DC coefficients */ 00993 residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, 00994 1, residual_eob_run); 00995 residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, 00996 2, residual_eob_run); 00997 00998 /* reverse prediction of the C-plane DC coefficients */ 00999 if (!(s->avctx->flags & CODEC_FLAG_GRAY)) 01000 { 01001 reverse_dc_prediction(s, s->fragment_start[1], 01002 s->fragment_width[1], s->fragment_height[1]); 01003 reverse_dc_prediction(s, s->fragment_start[2], 01004 s->fragment_width[1], s->fragment_height[1]); 01005 } 01006 01007 /* fetch the AC table indexes */ 01008 ac_y_table = get_bits(gb, 4); 01009 ac_c_table = get_bits(gb, 4); 01010 01011 /* build tables of AC VLC tables */ 01012 for (i = 1; i <= 5; i++) { 01013 y_tables[i] = &s->ac_vlc_1[ac_y_table]; 01014 c_tables[i] = &s->ac_vlc_1[ac_c_table]; 01015 } 01016 for (i = 6; i <= 14; i++) { 01017 y_tables[i] = &s->ac_vlc_2[ac_y_table]; 01018 c_tables[i] = &s->ac_vlc_2[ac_c_table]; 01019 } 01020 for (i = 15; i <= 27; i++) { 01021 y_tables[i] = &s->ac_vlc_3[ac_y_table]; 01022 c_tables[i] = &s->ac_vlc_3[ac_c_table]; 01023 } 01024 for (i = 28; i <= 63; i++) { 01025 y_tables[i] = &s->ac_vlc_4[ac_y_table]; 01026 c_tables[i] = &s->ac_vlc_4[ac_c_table]; 01027 } 01028 01029 /* decode all AC coefficents */ 01030 for (i = 1; i <= 63; i++) { 01031 residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i, 01032 0, residual_eob_run); 01033 01034 residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i, 01035 1, residual_eob_run); 01036 residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i, 01037 2, residual_eob_run); 01038 } 01039 01040 return 0; 01041 } 01042 01043 /* 01044 * This function reverses the DC prediction for each coded fragment in 01045 * the frame. Much of this function is adapted directly from the original 01046 * VP3 source code. 01047 */ 01048 #define COMPATIBLE_FRAME(x) \ 01049 (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type) 01050 #define DC_COEFF(u) s->all_fragments[u].dc 01051 01052 static void reverse_dc_prediction(Vp3DecodeContext *s, 01053 int first_fragment, 01054 int fragment_width, 01055 int fragment_height) 01056 { 01057 01058 #define PUL 8 01059 #define PU 4 01060 #define PUR 2 01061 #define PL 1 01062 01063 int x, y; 01064 int i = first_fragment; 01065 01066 int predicted_dc; 01067 01068 /* DC values for the left, up-left, up, and up-right fragments */ 01069 int vl, vul, vu, vur; 01070 01071 /* indexes for the left, up-left, up, and up-right fragments */ 01072 int l, ul, u, ur; 01073 01074 /* 01075 * The 6 fields mean: 01076 * 0: up-left multiplier 01077 * 1: up multiplier 01078 * 2: up-right multiplier 01079 * 3: left multiplier 01080 */ 01081 static const int predictor_transform[16][4] = { 01082 { 0, 0, 0, 0}, 01083 { 0, 0, 0,128}, // PL 01084 { 0, 0,128, 0}, // PUR 01085 { 0, 0, 53, 75}, // PUR|PL 01086 { 0,128, 0, 0}, // PU 01087 { 0, 64, 0, 64}, // PU|PL 01088 { 0,128, 0, 0}, // PU|PUR 01089 { 0, 0, 53, 75}, // PU|PUR|PL 01090 {128, 0, 0, 0}, // PUL 01091 { 0, 0, 0,128}, // PUL|PL 01092 { 64, 0, 64, 0}, // PUL|PUR 01093 { 0, 0, 53, 75}, // PUL|PUR|PL 01094 { 0,128, 0, 0}, // PUL|PU 01095 {-104,116, 0,116}, // PUL|PU|PL 01096 { 24, 80, 24, 0}, // PUL|PU|PUR 01097 {-104,116, 0,116} // PUL|PU|PUR|PL 01098 }; 01099 01100 /* This table shows which types of blocks can use other blocks for 01101 * prediction. For example, INTRA is the only mode in this table to 01102 * have a frame number of 0. That means INTRA blocks can only predict 01103 * from other INTRA blocks. There are 2 golden frame coding types; 01104 * blocks encoding in these modes can only predict from other blocks 01105 * that were encoded with these 1 of these 2 modes. */ 01106 static const unsigned char compatible_frame[9] = { 01107 1, /* MODE_INTER_NO_MV */ 01108 0, /* MODE_INTRA */ 01109 1, /* MODE_INTER_PLUS_MV */ 01110 1, /* MODE_INTER_LAST_MV */ 01111 1, /* MODE_INTER_PRIOR_MV */ 01112 2, /* MODE_USING_GOLDEN */ 01113 2, /* MODE_GOLDEN_MV */ 01114 1, /* MODE_INTER_FOUR_MV */ 01115 3 /* MODE_COPY */ 01116 }; 01117 int current_frame_type; 01118 01119 /* there is a last DC predictor for each of the 3 frame types */ 01120 short last_dc[3]; 01121 01122 int transform = 0; 01123 01124 vul = vu = vur = vl = 0; 01125 last_dc[0] = last_dc[1] = last_dc[2] = 0; 01126 01127 /* for each fragment row... */ 01128 for (y = 0; y < fragment_height; y++) { 01129 01130 /* for each fragment in a row... */ 01131 for (x = 0; x < fragment_width; x++, i++) { 01132 01133 /* reverse prediction if this block was coded */ 01134 if (s->all_fragments[i].coding_method != MODE_COPY) { 01135 01136 current_frame_type = 01137 compatible_frame[s->all_fragments[i].coding_method]; 01138 01139 transform= 0; 01140 if(x){ 01141 l= i-1; 01142 vl = DC_COEFF(l); 01143 if(COMPATIBLE_FRAME(l)) 01144 transform |= PL; 01145 } 01146 if(y){ 01147 u= i-fragment_width; 01148 vu = DC_COEFF(u); 01149 if(COMPATIBLE_FRAME(u)) 01150 transform |= PU; 01151 if(x){ 01152 ul= i-fragment_width-1; 01153 vul = DC_COEFF(ul); 01154 if(COMPATIBLE_FRAME(ul)) 01155 transform |= PUL; 01156 } 01157 if(x + 1 < fragment_width){ 01158 ur= i-fragment_width+1; 01159 vur = DC_COEFF(ur); 01160 if(COMPATIBLE_FRAME(ur)) 01161 transform |= PUR; 01162 } 01163 } 01164 01165 if (transform == 0) { 01166 01167 /* if there were no fragments to predict from, use last 01168 * DC saved */ 01169 predicted_dc = last_dc[current_frame_type]; 01170 } else { 01171 01172 /* apply the appropriate predictor transform */ 01173 predicted_dc = 01174 (predictor_transform[transform][0] * vul) + 01175 (predictor_transform[transform][1] * vu) + 01176 (predictor_transform[transform][2] * vur) + 01177 (predictor_transform[transform][3] * vl); 01178 01179 predicted_dc /= 128; 01180 01181 /* check for outranging on the [ul u l] and 01182 * [ul u ur l] predictors */ 01183 if ((transform == 15) || (transform == 13)) { 01184 if (FFABS(predicted_dc - vu) > 128) 01185 predicted_dc = vu; 01186 else if (FFABS(predicted_dc - vl) > 128) 01187 predicted_dc = vl; 01188 else if (FFABS(predicted_dc - vul) > 128) 01189 predicted_dc = vul; 01190 } 01191 } 01192 01193 /* at long last, apply the predictor */ 01194 DC_COEFF(i) += predicted_dc; 01195 /* save the DC */ 01196 last_dc[current_frame_type] = DC_COEFF(i); 01197 } 01198 } 01199 } 01200 } 01201 01202 static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend) 01203 { 01204 int x, y; 01205 int *bounding_values= s->bounding_values_array+127; 01206 01207 int width = s->fragment_width[!!plane]; 01208 int height = s->fragment_height[!!plane]; 01209 int fragment = s->fragment_start [plane] + ystart * width; 01210 int stride = s->current_frame.linesize[plane]; 01211 uint8_t *plane_data = s->current_frame.data [plane]; 01212 if (!s->flipped_image) stride = -stride; 01213 plane_data += s->data_offset[plane] + 8*ystart*stride; 01214 01215 for (y = ystart; y < yend; y++) { 01216 01217 for (x = 0; x < width; x++) { 01218 /* This code basically just deblocks on the edges of coded blocks. 01219 * However, it has to be much more complicated because of the 01220 * braindamaged deblock ordering used in VP3/Theora. Order matters 01221 * because some pixels get filtered twice. */ 01222 if( s->all_fragments[fragment].coding_method != MODE_COPY ) 01223 { 01224 /* do not perform left edge filter for left columns frags */ 01225 if (x > 0) { 01226 s->dsp.vp3_h_loop_filter( 01227 plane_data + 8*x, 01228 stride, bounding_values); 01229 } 01230 01231 /* do not perform top edge filter for top row fragments */ 01232 if (y > 0) { 01233 s->dsp.vp3_v_loop_filter( 01234 plane_data + 8*x, 01235 stride, bounding_values); 01236 } 01237 01238 /* do not perform right edge filter for right column 01239 * fragments or if right fragment neighbor is also coded 01240 * in this frame (it will be filtered in next iteration) */ 01241 if ((x < width - 1) && 01242 (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) { 01243 s->dsp.vp3_h_loop_filter( 01244 plane_data + 8*x + 8, 01245 stride, bounding_values); 01246 } 01247 01248 /* do not perform bottom edge filter for bottom row 01249 * fragments or if bottom fragment neighbor is also coded 01250 * in this frame (it will be filtered in the next row) */ 01251 if ((y < height - 1) && 01252 (s->all_fragments[fragment + width].coding_method == MODE_COPY)) { 01253 s->dsp.vp3_v_loop_filter( 01254 plane_data + 8*x + 8*stride, 01255 stride, bounding_values); 01256 } 01257 } 01258 01259 fragment++; 01260 } 01261 plane_data += 8*stride; 01262 } 01263 } 01264 01269 static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, 01270 int plane, int inter, DCTELEM block[64]) 01271 { 01272 int16_t *dequantizer = s->qmat[frag->qpi][inter][plane]; 01273 uint8_t *perm = s->scantable.permutated; 01274 int i = 0; 01275 01276 do { 01277 int token = *s->dct_tokens[plane][i]; 01278 switch (token & 3) { 01279 case 0: // EOB 01280 if (--token < 4) // 0-3 are token types, so the EOB run must now be 0 01281 s->dct_tokens[plane][i]++; 01282 else 01283 *s->dct_tokens[plane][i] = token & ~3; 01284 goto end; 01285 case 1: // zero run 01286 s->dct_tokens[plane][i]++; 01287 i += (token >> 2) & 0x7f; 01288 block[perm[i]] = (token >> 9) * dequantizer[perm[i]]; 01289 i++; 01290 break; 01291 case 2: // coeff 01292 block[perm[i]] = (token >> 2) * dequantizer[perm[i]]; 01293 s->dct_tokens[plane][i++]++; 01294 break; 01295 default: // shouldn't happen 01296 return i; 01297 } 01298 } while (i < 64); 01299 end: 01300 // the actual DC+prediction is in the fragment structure 01301 block[0] = frag->dc * s->qmat[0][inter][plane][0]; 01302 return i; 01303 } 01304 01308 static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y) 01309 { 01310 int h, cy; 01311 int offset[4]; 01312 01313 if(s->avctx->draw_horiz_band==NULL) 01314 return; 01315 01316 h= y - s->last_slice_end; 01317 y -= h; 01318 01319 if (!s->flipped_image) { 01320 if (y == 0) 01321 h -= s->height - s->avctx->height; // account for non-mod16 01322 y = s->height - y - h; 01323 } 01324 01325 cy = y >> 1; 01326 offset[0] = s->current_frame.linesize[0]*y; 01327 offset[1] = s->current_frame.linesize[1]*cy; 01328 offset[2] = s->current_frame.linesize[2]*cy; 01329 offset[3] = 0; 01330 01331 emms_c(); 01332 s->avctx->draw_horiz_band(s->avctx, &s->current_frame, offset, y, 3, h); 01333 s->last_slice_end= y + h; 01334 } 01335 01336 /* 01337 * Perform the final rendering for a particular slice of data. 01338 * The slice number ranges from 0..(c_superblock_height - 1). 01339 */ 01340 static void render_slice(Vp3DecodeContext *s, int slice) 01341 { 01342 int x, y, i, j; 01343 LOCAL_ALIGNED_16(DCTELEM, block, [64]); 01344 int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef; 01345 int motion_halfpel_index; 01346 uint8_t *motion_source; 01347 int plane, first_pixel; 01348 01349 if (slice >= s->c_superblock_height) 01350 return; 01351 01352 for (plane = 0; plane < 3; plane++) { 01353 uint8_t *output_plane = s->current_frame.data [plane] + s->data_offset[plane]; 01354 uint8_t * last_plane = s-> last_frame.data [plane] + s->data_offset[plane]; 01355 uint8_t *golden_plane = s-> golden_frame.data [plane] + s->data_offset[plane]; 01356 int stride = s->current_frame.linesize[plane]; 01357 int plane_width = s->width >> (plane && s->chroma_x_shift); 01358 int plane_height = s->height >> (plane && s->chroma_y_shift); 01359 int8_t (*motion_val)[2] = s->motion_val[!!plane]; 01360 01361 int sb_x, sb_y = slice << (!plane && s->chroma_y_shift); 01362 int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift); 01363 int slice_width = plane ? s->c_superblock_width : s->y_superblock_width; 01364 01365 int fragment_width = s->fragment_width[!!plane]; 01366 int fragment_height = s->fragment_height[!!plane]; 01367 int fragment_start = s->fragment_start[plane]; 01368 01369 if (!s->flipped_image) stride = -stride; 01370 if (CONFIG_GRAY && plane && (s->avctx->flags & CODEC_FLAG_GRAY)) 01371 continue; 01372 01373 01374 if(FFABS(stride) > 2048) 01375 return; //various tables are fixed size 01376 01377 /* for each superblock row in the slice (both of them)... */ 01378 for (; sb_y < slice_height; sb_y++) { 01379 01380 /* for each superblock in a row... */ 01381 for (sb_x = 0; sb_x < slice_width; sb_x++) { 01382 01383 /* for each block in a superblock... */ 01384 for (j = 0; j < 16; j++) { 01385 x = 4*sb_x + hilbert_offset[j][0]; 01386 y = 4*sb_y + hilbert_offset[j][1]; 01387 01388 i = fragment_start + y*fragment_width + x; 01389 01390 // bounds check 01391 if (x >= fragment_width || y >= fragment_height) 01392 continue; 01393 01394 first_pixel = 8*y*stride + 8*x; 01395 01396 /* transform if this block was coded */ 01397 if (s->all_fragments[i].coding_method != MODE_COPY) { 01398 if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) || 01399 (s->all_fragments[i].coding_method == MODE_GOLDEN_MV)) 01400 motion_source= golden_plane; 01401 else 01402 motion_source= last_plane; 01403 01404 motion_source += first_pixel; 01405 motion_halfpel_index = 0; 01406 01407 /* sort out the motion vector if this fragment is coded 01408 * using a motion vector method */ 01409 if ((s->all_fragments[i].coding_method > MODE_INTRA) && 01410 (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) { 01411 int src_x, src_y; 01412 motion_x = motion_val[y*fragment_width + x][0]; 01413 motion_y = motion_val[y*fragment_width + x][1]; 01414 01415 src_x= (motion_x>>1) + 8*x; 01416 src_y= (motion_y>>1) + 8*y; 01417 01418 motion_halfpel_index = motion_x & 0x01; 01419 motion_source += (motion_x >> 1); 01420 01421 motion_halfpel_index |= (motion_y & 0x01) << 1; 01422 motion_source += ((motion_y >> 1) * stride); 01423 01424 if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){ 01425 uint8_t *temp= s->edge_emu_buffer; 01426 if(stride<0) temp -= 9*stride; 01427 else temp += 9*stride; 01428 01429 ff_emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height); 01430 motion_source= temp; 01431 } 01432 } 01433 01434 01435 /* first, take care of copying a block from either the 01436 * previous or the golden frame */ 01437 if (s->all_fragments[i].coding_method != MODE_INTRA) { 01438 /* Note, it is possible to implement all MC cases with 01439 put_no_rnd_pixels_l2 which would look more like the 01440 VP3 source but this would be slower as 01441 put_no_rnd_pixels_tab is better optimzed */ 01442 if(motion_halfpel_index != 3){ 01443 s->dsp.put_no_rnd_pixels_tab[1][motion_halfpel_index]( 01444 output_plane + first_pixel, 01445 motion_source, stride, 8); 01446 }else{ 01447 int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1 01448 s->dsp.put_no_rnd_pixels_l2[1]( 01449 output_plane + first_pixel, 01450 motion_source - d, 01451 motion_source + stride + 1 + d, 01452 stride, 8); 01453 } 01454 } 01455 01456 s->dsp.clear_block(block); 01457 01458 /* invert DCT and place (or add) in final output */ 01459 01460 if (s->all_fragments[i].coding_method == MODE_INTRA) { 01461 vp3_dequant(s, s->all_fragments + i, plane, 0, block); 01462 if(s->avctx->idct_algo!=FF_IDCT_VP3) 01463 block[0] += 128<<3; 01464 s->dsp.idct_put( 01465 output_plane + first_pixel, 01466 stride, 01467 block); 01468 } else { 01469 if (vp3_dequant(s, s->all_fragments + i, plane, 1, block)) { 01470 s->dsp.idct_add( 01471 output_plane + first_pixel, 01472 stride, 01473 block); 01474 } else { 01475 s->dsp.vp3_idct_dc_add(output_plane + first_pixel, stride, block); 01476 } 01477 } 01478 } else { 01479 01480 /* copy directly from the previous frame */ 01481 s->dsp.put_pixels_tab[1][0]( 01482 output_plane + first_pixel, 01483 last_plane + first_pixel, 01484 stride, 8); 01485 01486 } 01487 } 01488 } 01489 01490 // Filter up to the last row in the superblock row 01491 apply_loop_filter(s, plane, 4*sb_y - !!sb_y, FFMIN(4*sb_y+3, fragment_height-1)); 01492 } 01493 } 01494 01495 /* this looks like a good place for slice dispatch... */ 01496 /* algorithm: 01497 * if (slice == s->macroblock_height - 1) 01498 * dispatch (both last slice & 2nd-to-last slice); 01499 * else if (slice > 0) 01500 * dispatch (slice - 1); 01501 */ 01502 01503 vp3_draw_horiz_band(s, FFMIN(64*slice + 64-16, s->height-16)); 01504 } 01505 01506 /* 01507 * This is the ffmpeg/libavcodec API init function. 01508 */ 01509 static av_cold int vp3_decode_init(AVCodecContext *avctx) 01510 { 01511 Vp3DecodeContext *s = avctx->priv_data; 01512 int i, inter, plane; 01513 int c_width; 01514 int c_height; 01515 int y_fragment_count, c_fragment_count; 01516 01517 if (avctx->codec_tag == MKTAG('V','P','3','0')) 01518 s->version = 0; 01519 else 01520 s->version = 1; 01521 01522 s->avctx = avctx; 01523 s->width = FFALIGN(avctx->width, 16); 01524 s->height = FFALIGN(avctx->height, 16); 01525 if (avctx->pix_fmt == PIX_FMT_NONE) 01526 avctx->pix_fmt = PIX_FMT_YUV420P; 01527 avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; 01528 if(avctx->idct_algo==FF_IDCT_AUTO) 01529 avctx->idct_algo=FF_IDCT_VP3; 01530 dsputil_init(&s->dsp, avctx); 01531 01532 ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct); 01533 01534 /* initialize to an impossible value which will force a recalculation 01535 * in the first frame decode */ 01536 for (i = 0; i < 3; i++) 01537 s->qps[i] = -1; 01538 01539 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift); 01540 01541 s->y_superblock_width = (s->width + 31) / 32; 01542 s->y_superblock_height = (s->height + 31) / 32; 01543 s->y_superblock_count = s->y_superblock_width * s->y_superblock_height; 01544 01545 /* work out the dimensions for the C planes */ 01546 c_width = s->width >> s->chroma_x_shift; 01547 c_height = s->height >> s->chroma_y_shift; 01548 s->c_superblock_width = (c_width + 31) / 32; 01549 s->c_superblock_height = (c_height + 31) / 32; 01550 s->c_superblock_count = s->c_superblock_width * s->c_superblock_height; 01551 01552 s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2); 01553 s->u_superblock_start = s->y_superblock_count; 01554 s->v_superblock_start = s->u_superblock_start + s->c_superblock_count; 01555 s->superblock_coding = av_malloc(s->superblock_count); 01556 01557 s->macroblock_width = (s->width + 15) / 16; 01558 s->macroblock_height = (s->height + 15) / 16; 01559 s->macroblock_count = s->macroblock_width * s->macroblock_height; 01560 01561 s->fragment_width[0] = s->width / FRAGMENT_PIXELS; 01562 s->fragment_height[0] = s->height / FRAGMENT_PIXELS; 01563 s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift; 01564 s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift; 01565 01566 /* fragment count covers all 8x8 blocks for all 3 planes */ 01567 y_fragment_count = s->fragment_width[0] * s->fragment_height[0]; 01568 c_fragment_count = s->fragment_width[1] * s->fragment_height[1]; 01569 s->fragment_count = y_fragment_count + 2*c_fragment_count; 01570 s->fragment_start[1] = y_fragment_count; 01571 s->fragment_start[2] = y_fragment_count + c_fragment_count; 01572 01573 s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment)); 01574 s->coded_fragment_list[0] = av_malloc(s->fragment_count * sizeof(int)); 01575 s->dct_tokens_base = av_malloc(64*s->fragment_count * sizeof(*s->dct_tokens_base)); 01576 s->motion_val[0] = av_malloc(y_fragment_count * sizeof(*s->motion_val[0])); 01577 s->motion_val[1] = av_malloc(c_fragment_count * sizeof(*s->motion_val[1])); 01578 01579 if (!s->superblock_coding || !s->all_fragments || !s->dct_tokens_base || 01580 !s->coded_fragment_list[0] || !s->motion_val[0] || !s->motion_val[1]) { 01581 vp3_decode_end(avctx); 01582 return -1; 01583 } 01584 01585 if (!s->theora_tables) 01586 { 01587 for (i = 0; i < 64; i++) { 01588 s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i]; 01589 s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i]; 01590 s->base_matrix[0][i] = vp31_intra_y_dequant[i]; 01591 s->base_matrix[1][i] = vp31_intra_c_dequant[i]; 01592 s->base_matrix[2][i] = vp31_inter_dequant[i]; 01593 s->filter_limit_values[i] = vp31_filter_limit_values[i]; 01594 } 01595 01596 for(inter=0; inter<2; inter++){ 01597 for(plane=0; plane<3; plane++){ 01598 s->qr_count[inter][plane]= 1; 01599 s->qr_size [inter][plane][0]= 63; 01600 s->qr_base [inter][plane][0]= 01601 s->qr_base [inter][plane][1]= 2*inter + (!!plane)*!inter; 01602 } 01603 } 01604 01605 /* init VLC tables */ 01606 for (i = 0; i < 16; i++) { 01607 01608 /* DC histograms */ 01609 init_vlc(&s->dc_vlc[i], 11, 32, 01610 &dc_bias[i][0][1], 4, 2, 01611 &dc_bias[i][0][0], 4, 2, 0); 01612 01613 /* group 1 AC histograms */ 01614 init_vlc(&s->ac_vlc_1[i], 11, 32, 01615 &ac_bias_0[i][0][1], 4, 2, 01616 &ac_bias_0[i][0][0], 4, 2, 0); 01617 01618 /* group 2 AC histograms */ 01619 init_vlc(&s->ac_vlc_2[i], 11, 32, 01620 &ac_bias_1[i][0][1], 4, 2, 01621 &ac_bias_1[i][0][0], 4, 2, 0); 01622 01623 /* group 3 AC histograms */ 01624 init_vlc(&s->ac_vlc_3[i], 11, 32, 01625 &ac_bias_2[i][0][1], 4, 2, 01626 &ac_bias_2[i][0][0], 4, 2, 0); 01627 01628 /* group 4 AC histograms */ 01629 init_vlc(&s->ac_vlc_4[i], 11, 32, 01630 &ac_bias_3[i][0][1], 4, 2, 01631 &ac_bias_3[i][0][0], 4, 2, 0); 01632 } 01633 } else { 01634 01635 for (i = 0; i < 16; i++) { 01636 /* DC histograms */ 01637 if (init_vlc(&s->dc_vlc[i], 11, 32, 01638 &s->huffman_table[i][0][1], 8, 4, 01639 &s->huffman_table[i][0][0], 8, 4, 0) < 0) 01640 goto vlc_fail; 01641 01642 /* group 1 AC histograms */ 01643 if (init_vlc(&s->ac_vlc_1[i], 11, 32, 01644 &s->huffman_table[i+16][0][1], 8, 4, 01645 &s->huffman_table[i+16][0][0], 8, 4, 0) < 0) 01646 goto vlc_fail; 01647 01648 /* group 2 AC histograms */ 01649 if (init_vlc(&s->ac_vlc_2[i], 11, 32, 01650 &s->huffman_table[i+16*2][0][1], 8, 4, 01651 &s->huffman_table[i+16*2][0][0], 8, 4, 0) < 0) 01652 goto vlc_fail; 01653 01654 /* group 3 AC histograms */ 01655 if (init_vlc(&s->ac_vlc_3[i], 11, 32, 01656 &s->huffman_table[i+16*3][0][1], 8, 4, 01657 &s->huffman_table[i+16*3][0][0], 8, 4, 0) < 0) 01658 goto vlc_fail; 01659 01660 /* group 4 AC histograms */ 01661 if (init_vlc(&s->ac_vlc_4[i], 11, 32, 01662 &s->huffman_table[i+16*4][0][1], 8, 4, 01663 &s->huffman_table[i+16*4][0][0], 8, 4, 0) < 0) 01664 goto vlc_fail; 01665 } 01666 } 01667 01668 init_vlc(&s->superblock_run_length_vlc, 6, 34, 01669 &superblock_run_length_vlc_table[0][1], 4, 2, 01670 &superblock_run_length_vlc_table[0][0], 4, 2, 0); 01671 01672 init_vlc(&s->fragment_run_length_vlc, 5, 30, 01673 &fragment_run_length_vlc_table[0][1], 4, 2, 01674 &fragment_run_length_vlc_table[0][0], 4, 2, 0); 01675 01676 init_vlc(&s->mode_code_vlc, 3, 8, 01677 &mode_code_vlc_table[0][1], 2, 1, 01678 &mode_code_vlc_table[0][0], 2, 1, 0); 01679 01680 init_vlc(&s->motion_vector_vlc, 6, 63, 01681 &motion_vector_vlc_table[0][1], 2, 1, 01682 &motion_vector_vlc_table[0][0], 2, 1, 0); 01683 01684 /* work out the block mapping tables */ 01685 s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int)); 01686 s->macroblock_coding = av_malloc(s->macroblock_count + 1); 01687 if (!s->superblock_fragments || !s->macroblock_coding) { 01688 vp3_decode_end(avctx); 01689 return -1; 01690 } 01691 init_block_mapping(s); 01692 01693 for (i = 0; i < 3; i++) { 01694 s->current_frame.data[i] = NULL; 01695 s->last_frame.data[i] = NULL; 01696 s->golden_frame.data[i] = NULL; 01697 } 01698 01699 return 0; 01700 01701 vlc_fail: 01702 av_log(avctx, AV_LOG_FATAL, "Invalid huffman table\n"); 01703 return -1; 01704 } 01705 01706 /* 01707 * This is the ffmpeg/libavcodec API frame decode function. 01708 */ 01709 static int vp3_decode_frame(AVCodecContext *avctx, 01710 void *data, int *data_size, 01711 AVPacket *avpkt) 01712 { 01713 const uint8_t *buf = avpkt->data; 01714 int buf_size = avpkt->size; 01715 Vp3DecodeContext *s = avctx->priv_data; 01716 GetBitContext gb; 01717 static int counter = 0; 01718 int i; 01719 01720 init_get_bits(&gb, buf, buf_size * 8); 01721 01722 if (s->theora && get_bits1(&gb)) 01723 { 01724 av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n"); 01725 return -1; 01726 } 01727 01728 s->keyframe = !get_bits1(&gb); 01729 if (!s->theora) 01730 skip_bits(&gb, 1); 01731 for (i = 0; i < 3; i++) 01732 s->last_qps[i] = s->qps[i]; 01733 01734 s->nqps=0; 01735 do{ 01736 s->qps[s->nqps++]= get_bits(&gb, 6); 01737 } while(s->theora >= 0x030200 && s->nqps<3 && get_bits1(&gb)); 01738 for (i = s->nqps; i < 3; i++) 01739 s->qps[i] = -1; 01740 01741 if (s->avctx->debug & FF_DEBUG_PICT_INFO) 01742 av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n", 01743 s->keyframe?"key":"", counter, s->qps[0]); 01744 counter++; 01745 01746 if (s->qps[0] != s->last_qps[0]) 01747 init_loop_filter(s); 01748 01749 for (i = 0; i < s->nqps; i++) 01750 // reinit all dequantizers if the first one changed, because 01751 // the DC of the first quantizer must be used for all matrices 01752 if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0]) 01753 init_dequantizer(s, i); 01754 01755 if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe) 01756 return buf_size; 01757 01758 s->current_frame.reference = 3; 01759 s->current_frame.pict_type = s->keyframe ? FF_I_TYPE : FF_P_TYPE; 01760 if (avctx->get_buffer(avctx, &s->current_frame) < 0) { 01761 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); 01762 goto error; 01763 } 01764 01765 if (s->keyframe) { 01766 if (!s->theora) 01767 { 01768 skip_bits(&gb, 4); /* width code */ 01769 skip_bits(&gb, 4); /* height code */ 01770 if (s->version) 01771 { 01772 s->version = get_bits(&gb, 5); 01773 if (counter == 1) 01774 av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version); 01775 } 01776 } 01777 if (s->version || s->theora) 01778 { 01779 if (get_bits1(&gb)) 01780 av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n"); 01781 skip_bits(&gb, 2); /* reserved? */ 01782 } 01783 } else { 01784 if (!s->golden_frame.data[0]) { 01785 av_log(s->avctx, AV_LOG_WARNING, "vp3: first frame not a keyframe\n"); 01786 01787 s->golden_frame.reference = 3; 01788 s->golden_frame.pict_type = FF_I_TYPE; 01789 if (avctx->get_buffer(avctx, &s->golden_frame) < 0) { 01790 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); 01791 goto error; 01792 } 01793 s->last_frame = s->golden_frame; 01794 s->last_frame.type = FF_BUFFER_TYPE_COPY; 01795 } 01796 } 01797 01798 s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame 01799 s->current_frame.qstride= 0; 01800 01801 memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment)); 01802 01803 if (unpack_superblocks(s, &gb)){ 01804 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n"); 01805 goto error; 01806 } 01807 if (unpack_modes(s, &gb)){ 01808 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n"); 01809 goto error; 01810 } 01811 if (unpack_vectors(s, &gb)){ 01812 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n"); 01813 goto error; 01814 } 01815 if (unpack_block_qpis(s, &gb)){ 01816 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n"); 01817 goto error; 01818 } 01819 if (unpack_dct_coeffs(s, &gb)){ 01820 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n"); 01821 goto error; 01822 } 01823 01824 for (i = 0; i < 3; i++) { 01825 int height = s->height >> (i && s->chroma_y_shift); 01826 if (s->flipped_image) 01827 s->data_offset[i] = 0; 01828 else 01829 s->data_offset[i] = (height-1) * s->current_frame.linesize[i]; 01830 } 01831 01832 s->last_slice_end = 0; 01833 for (i = 0; i < s->c_superblock_height; i++) 01834 render_slice(s, i); 01835 01836 // filter the last row 01837 for (i = 0; i < 3; i++) { 01838 int row = (s->height >> (3+(i && s->chroma_y_shift))) - 1; 01839 apply_loop_filter(s, i, row, row+1); 01840 } 01841 vp3_draw_horiz_band(s, s->height); 01842 01843 *data_size=sizeof(AVFrame); 01844 *(AVFrame*)data= s->current_frame; 01845 01846 /* release the last frame, if it is allocated and if it is not the 01847 * golden frame */ 01848 if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY) 01849 avctx->release_buffer(avctx, &s->last_frame); 01850 01851 /* shuffle frames (last = current) */ 01852 s->last_frame= s->current_frame; 01853 01854 if (s->keyframe) { 01855 if (s->golden_frame.data[0]) 01856 avctx->release_buffer(avctx, &s->golden_frame); 01857 s->golden_frame = s->current_frame; 01858 s->last_frame.type = FF_BUFFER_TYPE_COPY; 01859 } 01860 01861 s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */ 01862 01863 return buf_size; 01864 01865 error: 01866 if (s->current_frame.data[0]) 01867 avctx->release_buffer(avctx, &s->current_frame); 01868 return -1; 01869 } 01870 01871 /* 01872 * This is the ffmpeg/libavcodec API module cleanup function. 01873 */ 01874 static av_cold int vp3_decode_end(AVCodecContext *avctx) 01875 { 01876 Vp3DecodeContext *s = avctx->priv_data; 01877 int i; 01878 01879 av_free(s->superblock_coding); 01880 av_free(s->all_fragments); 01881 av_free(s->coded_fragment_list[0]); 01882 av_free(s->dct_tokens_base); 01883 av_free(s->superblock_fragments); 01884 av_free(s->macroblock_coding); 01885 av_free(s->motion_val[0]); 01886 av_free(s->motion_val[1]); 01887 01888 for (i = 0; i < 16; i++) { 01889 free_vlc(&s->dc_vlc[i]); 01890 free_vlc(&s->ac_vlc_1[i]); 01891 free_vlc(&s->ac_vlc_2[i]); 01892 free_vlc(&s->ac_vlc_3[i]); 01893 free_vlc(&s->ac_vlc_4[i]); 01894 } 01895 01896 free_vlc(&s->superblock_run_length_vlc); 01897 free_vlc(&s->fragment_run_length_vlc); 01898 free_vlc(&s->mode_code_vlc); 01899 free_vlc(&s->motion_vector_vlc); 01900 01901 /* release all frames */ 01902 if (s->golden_frame.data[0]) 01903 avctx->release_buffer(avctx, &s->golden_frame); 01904 if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY) 01905 avctx->release_buffer(avctx, &s->last_frame); 01906 /* no need to release the current_frame since it will always be pointing 01907 * to the same frame as either the golden or last frame */ 01908 01909 return 0; 01910 } 01911 01912 static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb) 01913 { 01914 Vp3DecodeContext *s = avctx->priv_data; 01915 01916 if (get_bits1(gb)) { 01917 int token; 01918 if (s->entries >= 32) { /* overflow */ 01919 av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n"); 01920 return -1; 01921 } 01922 token = get_bits(gb, 5); 01923 //av_log(avctx, AV_LOG_DEBUG, "hti %d hbits %x token %d entry : %d size %d\n", s->hti, s->hbits, token, s->entries, s->huff_code_size); 01924 s->huffman_table[s->hti][token][0] = s->hbits; 01925 s->huffman_table[s->hti][token][1] = s->huff_code_size; 01926 s->entries++; 01927 } 01928 else { 01929 if (s->huff_code_size >= 32) {/* overflow */ 01930 av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n"); 01931 return -1; 01932 } 01933 s->huff_code_size++; 01934 s->hbits <<= 1; 01935 if (read_huffman_tree(avctx, gb)) 01936 return -1; 01937 s->hbits |= 1; 01938 if (read_huffman_tree(avctx, gb)) 01939 return -1; 01940 s->hbits >>= 1; 01941 s->huff_code_size--; 01942 } 01943 return 0; 01944 } 01945 01946 #if CONFIG_THEORA_DECODER 01947 static const enum PixelFormat theora_pix_fmts[4] = { 01948 PIX_FMT_YUV420P, PIX_FMT_NONE, PIX_FMT_YUV422P, PIX_FMT_YUV444P 01949 }; 01950 01951 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) 01952 { 01953 Vp3DecodeContext *s = avctx->priv_data; 01954 int visible_width, visible_height, colorspace; 01955 int offset_x = 0, offset_y = 0; 01956 AVRational fps; 01957 01958 s->theora = get_bits_long(gb, 24); 01959 av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora); 01960 01961 /* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */ 01962 /* but previous versions have the image flipped relative to vp3 */ 01963 if (s->theora < 0x030200) 01964 { 01965 s->flipped_image = 1; 01966 av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n"); 01967 } 01968 01969 visible_width = s->width = get_bits(gb, 16) << 4; 01970 visible_height = s->height = get_bits(gb, 16) << 4; 01971 01972 if(avcodec_check_dimensions(avctx, s->width, s->height)){ 01973 av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", s->width, s->height); 01974 s->width= s->height= 0; 01975 return -1; 01976 } 01977 01978 if (s->theora >= 0x030200) { 01979 visible_width = get_bits_long(gb, 24); 01980 visible_height = get_bits_long(gb, 24); 01981 01982 offset_x = get_bits(gb, 8); /* offset x */ 01983 offset_y = get_bits(gb, 8); /* offset y, from bottom */ 01984 } 01985 01986 fps.num = get_bits_long(gb, 32); 01987 fps.den = get_bits_long(gb, 32); 01988 if (fps.num && fps.den) { 01989 av_reduce(&avctx->time_base.num, &avctx->time_base.den, 01990 fps.den, fps.num, 1<<30); 01991 } 01992 01993 avctx->sample_aspect_ratio.num = get_bits_long(gb, 24); 01994 avctx->sample_aspect_ratio.den = get_bits_long(gb, 24); 01995 01996 if (s->theora < 0x030200) 01997 skip_bits(gb, 5); /* keyframe frequency force */ 01998 colorspace = get_bits(gb, 8); 01999 skip_bits(gb, 24); /* bitrate */ 02000 02001 skip_bits(gb, 6); /* quality hint */ 02002 02003 if (s->theora >= 0x030200) 02004 { 02005 skip_bits(gb, 5); /* keyframe frequency force */ 02006 avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)]; 02007 skip_bits(gb, 3); /* reserved */ 02008 } 02009 02010 // align_get_bits(gb); 02011 02012 if ( visible_width <= s->width && visible_width > s->width-16 02013 && visible_height <= s->height && visible_height > s->height-16 02014 && !offset_x && (offset_y == s->height - visible_height)) 02015 avcodec_set_dimensions(avctx, visible_width, visible_height); 02016 else 02017 avcodec_set_dimensions(avctx, s->width, s->height); 02018 02019 if (colorspace == 1) { 02020 avctx->color_primaries = AVCOL_PRI_BT470M; 02021 } else if (colorspace == 2) { 02022 avctx->color_primaries = AVCOL_PRI_BT470BG; 02023 } 02024 if (colorspace == 1 || colorspace == 2) { 02025 avctx->colorspace = AVCOL_SPC_BT470BG; 02026 avctx->color_trc = AVCOL_TRC_BT709; 02027 } 02028 02029 return 0; 02030 } 02031 02032 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb) 02033 { 02034 Vp3DecodeContext *s = avctx->priv_data; 02035 int i, n, matrices, inter, plane; 02036 02037 if (s->theora >= 0x030200) { 02038 n = get_bits(gb, 3); 02039 /* loop filter limit values table */ 02040 for (i = 0; i < 64; i++) { 02041 s->filter_limit_values[i] = get_bits(gb, n); 02042 if (s->filter_limit_values[i] > 127) { 02043 av_log(avctx, AV_LOG_ERROR, "filter limit value too large (%i > 127), clamping\n", s->filter_limit_values[i]); 02044 s->filter_limit_values[i] = 127; 02045 } 02046 } 02047 } 02048 02049 if (s->theora >= 0x030200) 02050 n = get_bits(gb, 4) + 1; 02051 else 02052 n = 16; 02053 /* quality threshold table */ 02054 for (i = 0; i < 64; i++) 02055 s->coded_ac_scale_factor[i] = get_bits(gb, n); 02056 02057 if (s->theora >= 0x030200) 02058 n = get_bits(gb, 4) + 1; 02059 else 02060 n = 16; 02061 /* dc scale factor table */ 02062 for (i = 0; i < 64; i++) 02063 s->coded_dc_scale_factor[i] = get_bits(gb, n); 02064 02065 if (s->theora >= 0x030200) 02066 matrices = get_bits(gb, 9) + 1; 02067 else 02068 matrices = 3; 02069 02070 if(matrices > 384){ 02071 av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n"); 02072 return -1; 02073 } 02074 02075 for(n=0; n<matrices; n++){ 02076 for (i = 0; i < 64; i++) 02077 s->base_matrix[n][i]= get_bits(gb, 8); 02078 } 02079 02080 for (inter = 0; inter <= 1; inter++) { 02081 for (plane = 0; plane <= 2; plane++) { 02082 int newqr= 1; 02083 if (inter || plane > 0) 02084 newqr = get_bits1(gb); 02085 if (!newqr) { 02086 int qtj, plj; 02087 if(inter && get_bits1(gb)){ 02088 qtj = 0; 02089 plj = plane; 02090 }else{ 02091 qtj= (3*inter + plane - 1) / 3; 02092 plj= (plane + 2) % 3; 02093 } 02094 s->qr_count[inter][plane]= s->qr_count[qtj][plj]; 02095 memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj], sizeof(s->qr_size[0][0])); 02096 memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj], sizeof(s->qr_base[0][0])); 02097 } else { 02098 int qri= 0; 02099 int qi = 0; 02100 02101 for(;;){ 02102 i= get_bits(gb, av_log2(matrices-1)+1); 02103 if(i>= matrices){ 02104 av_log(avctx, AV_LOG_ERROR, "invalid base matrix index\n"); 02105 return -1; 02106 } 02107 s->qr_base[inter][plane][qri]= i; 02108 if(qi >= 63) 02109 break; 02110 i = get_bits(gb, av_log2(63-qi)+1) + 1; 02111 s->qr_size[inter][plane][qri++]= i; 02112 qi += i; 02113 } 02114 02115 if (qi > 63) { 02116 av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi); 02117 return -1; 02118 } 02119 s->qr_count[inter][plane]= qri; 02120 } 02121 } 02122 } 02123 02124 /* Huffman tables */ 02125 for (s->hti = 0; s->hti < 80; s->hti++) { 02126 s->entries = 0; 02127 s->huff_code_size = 1; 02128 if (!get_bits1(gb)) { 02129 s->hbits = 0; 02130 if(read_huffman_tree(avctx, gb)) 02131 return -1; 02132 s->hbits = 1; 02133 if(read_huffman_tree(avctx, gb)) 02134 return -1; 02135 } 02136 } 02137 02138 s->theora_tables = 1; 02139 02140 return 0; 02141 } 02142 02143 static av_cold int theora_decode_init(AVCodecContext *avctx) 02144 { 02145 Vp3DecodeContext *s = avctx->priv_data; 02146 GetBitContext gb; 02147 int ptype; 02148 uint8_t *header_start[3]; 02149 int header_len[3]; 02150 int i; 02151 02152 s->theora = 1; 02153 02154 if (!avctx->extradata_size) 02155 { 02156 av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n"); 02157 return -1; 02158 } 02159 02160 if (ff_split_xiph_headers(avctx->extradata, avctx->extradata_size, 02161 42, header_start, header_len) < 0) { 02162 av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n"); 02163 return -1; 02164 } 02165 02166 for(i=0;i<3;i++) { 02167 init_get_bits(&gb, header_start[i], header_len[i] * 8); 02168 02169 ptype = get_bits(&gb, 8); 02170 02171 if (!(ptype & 0x80)) 02172 { 02173 av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n"); 02174 // return -1; 02175 } 02176 02177 // FIXME: Check for this as well. 02178 skip_bits_long(&gb, 6*8); /* "theora" */ 02179 02180 switch(ptype) 02181 { 02182 case 0x80: 02183 theora_decode_header(avctx, &gb); 02184 break; 02185 case 0x81: 02186 // FIXME: is this needed? it breaks sometimes 02187 // theora_decode_comments(avctx, gb); 02188 break; 02189 case 0x82: 02190 if (theora_decode_tables(avctx, &gb)) 02191 return -1; 02192 break; 02193 default: 02194 av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype&~0x80); 02195 break; 02196 } 02197 if(ptype != 0x81 && 8*header_len[i] != get_bits_count(&gb)) 02198 av_log(avctx, AV_LOG_WARNING, "%d bits left in packet %X\n", 8*header_len[i] - get_bits_count(&gb), ptype); 02199 if (s->theora < 0x030200) 02200 break; 02201 } 02202 02203 return vp3_decode_init(avctx); 02204 } 02205 02206 AVCodec theora_decoder = { 02207 "theora", 02208 AVMEDIA_TYPE_VIDEO, 02209 CODEC_ID_THEORA, 02210 sizeof(Vp3DecodeContext), 02211 theora_decode_init, 02212 NULL, 02213 vp3_decode_end, 02214 vp3_decode_frame, 02215 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND, 02216 NULL, 02217 .long_name = NULL_IF_CONFIG_SMALL("Theora"), 02218 }; 02219 #endif 02220 02221 AVCodec vp3_decoder = { 02222 "vp3", 02223 AVMEDIA_TYPE_VIDEO, 02224 CODEC_ID_VP3, 02225 sizeof(Vp3DecodeContext), 02226 vp3_decode_init, 02227 NULL, 02228 vp3_decode_end, 02229 vp3_decode_frame, 02230 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND, 02231 NULL, 02232 .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"), 02233 };