Libav
|
00001 /* 00002 * ADPCM codecs 00003 * Copyright (c) 2001-2003 The ffmpeg Project 00004 * 00005 * This file is part of FFmpeg. 00006 * 00007 * FFmpeg is free software; you can redistribute it and/or 00008 * modify it under the terms of the GNU Lesser General Public 00009 * License as published by the Free Software Foundation; either 00010 * version 2.1 of the License, or (at your option) any later version. 00011 * 00012 * FFmpeg is distributed in the hope that it will be useful, 00013 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00014 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00015 * Lesser General Public License for more details. 00016 * 00017 * You should have received a copy of the GNU Lesser General Public 00018 * License along with FFmpeg; if not, write to the Free Software 00019 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00020 */ 00021 #include "avcodec.h" 00022 #include "get_bits.h" 00023 #include "put_bits.h" 00024 #include "bytestream.h" 00025 00057 #define BLKSIZE 1024 00058 00059 /* step_table[] and index_table[] are from the ADPCM reference source */ 00060 /* This is the index table: */ 00061 static const int index_table[16] = { 00062 -1, -1, -1, -1, 2, 4, 6, 8, 00063 -1, -1, -1, -1, 2, 4, 6, 8, 00064 }; 00065 00070 static const int step_table[89] = { 00071 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 00072 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, 00073 50, 55, 60, 66, 73, 80, 88, 97, 107, 118, 00074 130, 143, 157, 173, 190, 209, 230, 253, 279, 307, 00075 337, 371, 408, 449, 494, 544, 598, 658, 724, 796, 00076 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066, 00077 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358, 00078 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899, 00079 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767 00080 }; 00081 00082 /* These are for MS-ADPCM */ 00083 /* AdaptationTable[], AdaptCoeff1[], and AdaptCoeff2[] are from libsndfile */ 00084 static const int AdaptationTable[] = { 00085 230, 230, 230, 230, 307, 409, 512, 614, 00086 768, 614, 512, 409, 307, 230, 230, 230 00087 }; 00088 00090 static const uint8_t AdaptCoeff1[] = { 00091 64, 128, 0, 48, 60, 115, 98 00092 }; 00093 00095 static const int8_t AdaptCoeff2[] = { 00096 0, -64, 0, 16, 0, -52, -58 00097 }; 00098 00099 /* These are for CD-ROM XA ADPCM */ 00100 static const int xa_adpcm_table[5][2] = { 00101 { 0, 0 }, 00102 { 60, 0 }, 00103 { 115, -52 }, 00104 { 98, -55 }, 00105 { 122, -60 } 00106 }; 00107 00108 static const int ea_adpcm_table[] = { 00109 0, 240, 460, 392, 0, 0, -208, -220, 0, 1, 00110 3, 4, 7, 8, 10, 11, 0, -1, -3, -4 00111 }; 00112 00113 // padded to zero where table size is less then 16 00114 static const int swf_index_tables[4][16] = { 00115 /*2*/ { -1, 2 }, 00116 /*3*/ { -1, -1, 2, 4 }, 00117 /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 }, 00118 /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 } 00119 }; 00120 00121 static const int yamaha_indexscale[] = { 00122 230, 230, 230, 230, 307, 409, 512, 614, 00123 230, 230, 230, 230, 307, 409, 512, 614 00124 }; 00125 00126 static const int yamaha_difflookup[] = { 00127 1, 3, 5, 7, 9, 11, 13, 15, 00128 -1, -3, -5, -7, -9, -11, -13, -15 00129 }; 00130 00131 /* end of tables */ 00132 00133 typedef struct ADPCMChannelStatus { 00134 int predictor; 00135 short int step_index; 00136 int step; 00137 /* for encoding */ 00138 int prev_sample; 00139 00140 /* MS version */ 00141 short sample1; 00142 short sample2; 00143 int coeff1; 00144 int coeff2; 00145 int idelta; 00146 } ADPCMChannelStatus; 00147 00148 typedef struct ADPCMContext { 00149 ADPCMChannelStatus status[6]; 00150 } ADPCMContext; 00151 00152 /* XXX: implement encoding */ 00153 00154 #if CONFIG_ENCODERS 00155 static av_cold int adpcm_encode_init(AVCodecContext *avctx) 00156 { 00157 uint8_t *extradata; 00158 int i; 00159 if (avctx->channels > 2) 00160 return -1; /* only stereo or mono =) */ 00161 00162 if(avctx->trellis && (unsigned)avctx->trellis > 16U){ 00163 av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n"); 00164 return -1; 00165 } 00166 00167 switch(avctx->codec->id) { 00168 case CODEC_ID_ADPCM_IMA_WAV: 00169 avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1; /* each 16 bits sample gives one nibble */ 00170 /* and we have 4 bytes per channel overhead */ 00171 avctx->block_align = BLKSIZE; 00172 /* seems frame_size isn't taken into account... have to buffer the samples :-( */ 00173 break; 00174 case CODEC_ID_ADPCM_IMA_QT: 00175 avctx->frame_size = 64; 00176 avctx->block_align = 34 * avctx->channels; 00177 break; 00178 case CODEC_ID_ADPCM_MS: 00179 avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2; /* each 16 bits sample gives one nibble */ 00180 /* and we have 7 bytes per channel overhead */ 00181 avctx->block_align = BLKSIZE; 00182 avctx->extradata_size = 32; 00183 extradata = avctx->extradata = av_malloc(avctx->extradata_size); 00184 if (!extradata) 00185 return AVERROR(ENOMEM); 00186 bytestream_put_le16(&extradata, avctx->frame_size); 00187 bytestream_put_le16(&extradata, 7); /* wNumCoef */ 00188 for (i = 0; i < 7; i++) { 00189 bytestream_put_le16(&extradata, AdaptCoeff1[i] * 4); 00190 bytestream_put_le16(&extradata, AdaptCoeff2[i] * 4); 00191 } 00192 break; 00193 case CODEC_ID_ADPCM_YAMAHA: 00194 avctx->frame_size = BLKSIZE * avctx->channels; 00195 avctx->block_align = BLKSIZE; 00196 break; 00197 case CODEC_ID_ADPCM_SWF: 00198 if (avctx->sample_rate != 11025 && 00199 avctx->sample_rate != 22050 && 00200 avctx->sample_rate != 44100) { 00201 av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n"); 00202 return -1; 00203 } 00204 avctx->frame_size = 512 * (avctx->sample_rate / 11025); 00205 break; 00206 default: 00207 return -1; 00208 } 00209 00210 avctx->coded_frame= avcodec_alloc_frame(); 00211 avctx->coded_frame->key_frame= 1; 00212 00213 return 0; 00214 } 00215 00216 static av_cold int adpcm_encode_close(AVCodecContext *avctx) 00217 { 00218 av_freep(&avctx->coded_frame); 00219 00220 return 0; 00221 } 00222 00223 00224 static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample) 00225 { 00226 int delta = sample - c->prev_sample; 00227 int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8; 00228 c->prev_sample += ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8); 00229 c->prev_sample = av_clip_int16(c->prev_sample); 00230 c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88); 00231 return nibble; 00232 } 00233 00234 static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample) 00235 { 00236 int predictor, nibble, bias; 00237 00238 predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64; 00239 00240 nibble= sample - predictor; 00241 if(nibble>=0) bias= c->idelta/2; 00242 else bias=-c->idelta/2; 00243 00244 nibble= (nibble + bias) / c->idelta; 00245 nibble= av_clip(nibble, -8, 7)&0x0F; 00246 00247 predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; 00248 00249 c->sample2 = c->sample1; 00250 c->sample1 = av_clip_int16(predictor); 00251 00252 c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8; 00253 if (c->idelta < 16) c->idelta = 16; 00254 00255 return nibble; 00256 } 00257 00258 static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample) 00259 { 00260 int nibble, delta; 00261 00262 if(!c->step) { 00263 c->predictor = 0; 00264 c->step = 127; 00265 } 00266 00267 delta = sample - c->predictor; 00268 00269 nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8; 00270 00271 c->predictor += ((c->step * yamaha_difflookup[nibble]) / 8); 00272 c->predictor = av_clip_int16(c->predictor); 00273 c->step = (c->step * yamaha_indexscale[nibble]) >> 8; 00274 c->step = av_clip(c->step, 127, 24567); 00275 00276 return nibble; 00277 } 00278 00279 typedef struct TrellisPath { 00280 int nibble; 00281 int prev; 00282 } TrellisPath; 00283 00284 typedef struct TrellisNode { 00285 uint32_t ssd; 00286 int path; 00287 int sample1; 00288 int sample2; 00289 int step; 00290 } TrellisNode; 00291 00292 static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, 00293 uint8_t *dst, ADPCMChannelStatus *c, int n) 00294 { 00295 #define FREEZE_INTERVAL 128 00296 //FIXME 6% faster if frontier is a compile-time constant 00297 const int frontier = 1 << avctx->trellis; 00298 const int stride = avctx->channels; 00299 const int version = avctx->codec->id; 00300 const int max_paths = frontier*FREEZE_INTERVAL; 00301 TrellisPath paths[max_paths], *p; 00302 TrellisNode node_buf[2][frontier]; 00303 TrellisNode *nodep_buf[2][frontier]; 00304 TrellisNode **nodes = nodep_buf[0]; // nodes[] is always sorted by .ssd 00305 TrellisNode **nodes_next = nodep_buf[1]; 00306 int pathn = 0, froze = -1, i, j, k; 00307 00308 assert(!(max_paths&(max_paths-1))); 00309 00310 memset(nodep_buf, 0, sizeof(nodep_buf)); 00311 nodes[0] = &node_buf[1][0]; 00312 nodes[0]->ssd = 0; 00313 nodes[0]->path = 0; 00314 nodes[0]->step = c->step_index; 00315 nodes[0]->sample1 = c->sample1; 00316 nodes[0]->sample2 = c->sample2; 00317 if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_IMA_QT) || (version == CODEC_ID_ADPCM_SWF)) 00318 nodes[0]->sample1 = c->prev_sample; 00319 if(version == CODEC_ID_ADPCM_MS) 00320 nodes[0]->step = c->idelta; 00321 if(version == CODEC_ID_ADPCM_YAMAHA) { 00322 if(c->step == 0) { 00323 nodes[0]->step = 127; 00324 nodes[0]->sample1 = 0; 00325 } else { 00326 nodes[0]->step = c->step; 00327 nodes[0]->sample1 = c->predictor; 00328 } 00329 } 00330 00331 for(i=0; i<n; i++) { 00332 TrellisNode *t = node_buf[i&1]; 00333 TrellisNode **u; 00334 int sample = samples[i*stride]; 00335 memset(nodes_next, 0, frontier*sizeof(TrellisNode*)); 00336 for(j=0; j<frontier && nodes[j]; j++) { 00337 // higher j have higher ssd already, so they're unlikely to use a suboptimal next sample too 00338 const int range = (j < frontier/2) ? 1 : 0; 00339 const int step = nodes[j]->step; 00340 int nidx; 00341 if(version == CODEC_ID_ADPCM_MS) { 00342 const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 64; 00343 const int div = (sample - predictor) / step; 00344 const int nmin = av_clip(div-range, -8, 6); 00345 const int nmax = av_clip(div+range, -7, 7); 00346 for(nidx=nmin; nidx<=nmax; nidx++) { 00347 const int nibble = nidx & 0xf; 00348 int dec_sample = predictor + nidx * step; 00349 #define STORE_NODE(NAME, STEP_INDEX)\ 00350 int d;\ 00351 uint32_t ssd;\ 00352 dec_sample = av_clip_int16(dec_sample);\ 00353 d = sample - dec_sample;\ 00354 ssd = nodes[j]->ssd + d*d;\ 00355 if(nodes_next[frontier-1] && ssd >= nodes_next[frontier-1]->ssd)\ 00356 continue;\ 00357 /* Collapse any two states with the same previous sample value. \ 00358 * One could also distinguish states by step and by 2nd to last 00359 * sample, but the effects of that are negligible. */\ 00360 for(k=0; k<frontier && nodes_next[k]; k++) {\ 00361 if(dec_sample == nodes_next[k]->sample1) {\ 00362 assert(ssd >= nodes_next[k]->ssd);\ 00363 goto next_##NAME;\ 00364 }\ 00365 }\ 00366 for(k=0; k<frontier; k++) {\ 00367 if(!nodes_next[k] || ssd < nodes_next[k]->ssd) {\ 00368 TrellisNode *u = nodes_next[frontier-1];\ 00369 if(!u) {\ 00370 assert(pathn < max_paths);\ 00371 u = t++;\ 00372 u->path = pathn++;\ 00373 }\ 00374 u->ssd = ssd;\ 00375 u->step = STEP_INDEX;\ 00376 u->sample2 = nodes[j]->sample1;\ 00377 u->sample1 = dec_sample;\ 00378 paths[u->path].nibble = nibble;\ 00379 paths[u->path].prev = nodes[j]->path;\ 00380 memmove(&nodes_next[k+1], &nodes_next[k], (frontier-k-1)*sizeof(TrellisNode*));\ 00381 nodes_next[k] = u;\ 00382 break;\ 00383 }\ 00384 }\ 00385 next_##NAME:; 00386 STORE_NODE(ms, FFMAX(16, (AdaptationTable[nibble] * step) >> 8)); 00387 } 00388 } else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_IMA_QT)|| (version == CODEC_ID_ADPCM_SWF)) { 00389 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\ 00390 const int predictor = nodes[j]->sample1;\ 00391 const int div = (sample - predictor) * 4 / STEP_TABLE;\ 00392 int nmin = av_clip(div-range, -7, 6);\ 00393 int nmax = av_clip(div+range, -6, 7);\ 00394 if(nmin<=0) nmin--; /* distinguish -0 from +0 */\ 00395 if(nmax<0) nmax--;\ 00396 for(nidx=nmin; nidx<=nmax; nidx++) {\ 00397 const int nibble = nidx<0 ? 7-nidx : nidx;\ 00398 int dec_sample = predictor + (STEP_TABLE * yamaha_difflookup[nibble]) / 8;\ 00399 STORE_NODE(NAME, STEP_INDEX);\ 00400 } 00401 LOOP_NODES(ima, step_table[step], av_clip(step + index_table[nibble], 0, 88)); 00402 } else { //CODEC_ID_ADPCM_YAMAHA 00403 LOOP_NODES(yamaha, step, av_clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567)); 00404 #undef LOOP_NODES 00405 #undef STORE_NODE 00406 } 00407 } 00408 00409 u = nodes; 00410 nodes = nodes_next; 00411 nodes_next = u; 00412 00413 // prevent overflow 00414 if(nodes[0]->ssd > (1<<28)) { 00415 for(j=1; j<frontier && nodes[j]; j++) 00416 nodes[j]->ssd -= nodes[0]->ssd; 00417 nodes[0]->ssd = 0; 00418 } 00419 00420 // merge old paths to save memory 00421 if(i == froze + FREEZE_INTERVAL) { 00422 p = &paths[nodes[0]->path]; 00423 for(k=i; k>froze; k--) { 00424 dst[k] = p->nibble; 00425 p = &paths[p->prev]; 00426 } 00427 froze = i; 00428 pathn = 0; 00429 // other nodes might use paths that don't coincide with the frozen one. 00430 // checking which nodes do so is too slow, so just kill them all. 00431 // this also slightly improves quality, but I don't know why. 00432 memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*)); 00433 } 00434 } 00435 00436 p = &paths[nodes[0]->path]; 00437 for(i=n-1; i>froze; i--) { 00438 dst[i] = p->nibble; 00439 p = &paths[p->prev]; 00440 } 00441 00442 c->predictor = nodes[0]->sample1; 00443 c->sample1 = nodes[0]->sample1; 00444 c->sample2 = nodes[0]->sample2; 00445 c->step_index = nodes[0]->step; 00446 c->step = nodes[0]->step; 00447 c->idelta = nodes[0]->step; 00448 } 00449 00450 static int adpcm_encode_frame(AVCodecContext *avctx, 00451 unsigned char *frame, int buf_size, void *data) 00452 { 00453 int n, i, st; 00454 short *samples; 00455 unsigned char *dst; 00456 ADPCMContext *c = avctx->priv_data; 00457 00458 dst = frame; 00459 samples = (short *)data; 00460 st= avctx->channels == 2; 00461 /* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */ 00462 00463 switch(avctx->codec->id) { 00464 case CODEC_ID_ADPCM_IMA_WAV: 00465 n = avctx->frame_size / 8; 00466 c->status[0].prev_sample = (signed short)samples[0]; /* XXX */ 00467 /* c->status[0].step_index = 0; *//* XXX: not sure how to init the state machine */ 00468 bytestream_put_le16(&dst, c->status[0].prev_sample); 00469 *dst++ = (unsigned char)c->status[0].step_index; 00470 *dst++ = 0; /* unknown */ 00471 samples++; 00472 if (avctx->channels == 2) { 00473 c->status[1].prev_sample = (signed short)samples[0]; 00474 /* c->status[1].step_index = 0; */ 00475 bytestream_put_le16(&dst, c->status[1].prev_sample); 00476 *dst++ = (unsigned char)c->status[1].step_index; 00477 *dst++ = 0; 00478 samples++; 00479 } 00480 00481 /* stereo: 4 bytes (8 samples) for left, 4 bytes for right, 4 bytes left, ... */ 00482 if(avctx->trellis > 0) { 00483 uint8_t buf[2][n*8]; 00484 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n*8); 00485 if(avctx->channels == 2) 00486 adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n*8); 00487 for(i=0; i<n; i++) { 00488 *dst++ = buf[0][8*i+0] | (buf[0][8*i+1] << 4); 00489 *dst++ = buf[0][8*i+2] | (buf[0][8*i+3] << 4); 00490 *dst++ = buf[0][8*i+4] | (buf[0][8*i+5] << 4); 00491 *dst++ = buf[0][8*i+6] | (buf[0][8*i+7] << 4); 00492 if (avctx->channels == 2) { 00493 *dst++ = buf[1][8*i+0] | (buf[1][8*i+1] << 4); 00494 *dst++ = buf[1][8*i+2] | (buf[1][8*i+3] << 4); 00495 *dst++ = buf[1][8*i+4] | (buf[1][8*i+5] << 4); 00496 *dst++ = buf[1][8*i+6] | (buf[1][8*i+7] << 4); 00497 } 00498 } 00499 } else 00500 for (; n>0; n--) { 00501 *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]); 00502 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4; 00503 dst++; 00504 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]); 00505 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4; 00506 dst++; 00507 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]); 00508 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4; 00509 dst++; 00510 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]); 00511 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4; 00512 dst++; 00513 /* right channel */ 00514 if (avctx->channels == 2) { 00515 *dst = adpcm_ima_compress_sample(&c->status[1], samples[1]); 00516 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4; 00517 dst++; 00518 *dst = adpcm_ima_compress_sample(&c->status[1], samples[5]); 00519 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4; 00520 dst++; 00521 *dst = adpcm_ima_compress_sample(&c->status[1], samples[9]); 00522 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4; 00523 dst++; 00524 *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]); 00525 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4; 00526 dst++; 00527 } 00528 samples += 8 * avctx->channels; 00529 } 00530 break; 00531 case CODEC_ID_ADPCM_IMA_QT: 00532 { 00533 int ch, i; 00534 PutBitContext pb; 00535 init_put_bits(&pb, dst, buf_size*8); 00536 00537 for(ch=0; ch<avctx->channels; ch++){ 00538 put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7); 00539 put_bits(&pb, 7, c->status[ch].step_index); 00540 if(avctx->trellis > 0) { 00541 uint8_t buf[64]; 00542 adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64); 00543 for(i=0; i<64; i++) 00544 put_bits(&pb, 4, buf[i^1]); 00545 c->status[ch].prev_sample = c->status[ch].predictor & ~0x7F; 00546 } else { 00547 for (i=0; i<64; i+=2){ 00548 int t1, t2; 00549 t1 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]); 00550 t2 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]); 00551 put_bits(&pb, 4, t2); 00552 put_bits(&pb, 4, t1); 00553 } 00554 c->status[ch].prev_sample &= ~0x7F; 00555 } 00556 } 00557 00558 dst += put_bits_count(&pb)>>3; 00559 break; 00560 } 00561 case CODEC_ID_ADPCM_SWF: 00562 { 00563 int i; 00564 PutBitContext pb; 00565 init_put_bits(&pb, dst, buf_size*8); 00566 00567 n = avctx->frame_size-1; 00568 00569 //Store AdpcmCodeSize 00570 put_bits(&pb, 2, 2); //Set 4bits flash adpcm format 00571 00572 //Init the encoder state 00573 for(i=0; i<avctx->channels; i++){ 00574 c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63); // clip step so it fits 6 bits 00575 put_sbits(&pb, 16, samples[i]); 00576 put_bits(&pb, 6, c->status[i].step_index); 00577 c->status[i].prev_sample = (signed short)samples[i]; 00578 } 00579 00580 if(avctx->trellis > 0) { 00581 uint8_t buf[2][n]; 00582 adpcm_compress_trellis(avctx, samples+2, buf[0], &c->status[0], n); 00583 if (avctx->channels == 2) 00584 adpcm_compress_trellis(avctx, samples+3, buf[1], &c->status[1], n); 00585 for(i=0; i<n; i++) { 00586 put_bits(&pb, 4, buf[0][i]); 00587 if (avctx->channels == 2) 00588 put_bits(&pb, 4, buf[1][i]); 00589 } 00590 } else { 00591 for (i=1; i<avctx->frame_size; i++) { 00592 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i])); 00593 if (avctx->channels == 2) 00594 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1])); 00595 } 00596 } 00597 flush_put_bits(&pb); 00598 dst += put_bits_count(&pb)>>3; 00599 break; 00600 } 00601 case CODEC_ID_ADPCM_MS: 00602 for(i=0; i<avctx->channels; i++){ 00603 int predictor=0; 00604 00605 *dst++ = predictor; 00606 c->status[i].coeff1 = AdaptCoeff1[predictor]; 00607 c->status[i].coeff2 = AdaptCoeff2[predictor]; 00608 } 00609 for(i=0; i<avctx->channels; i++){ 00610 if (c->status[i].idelta < 16) 00611 c->status[i].idelta = 16; 00612 00613 bytestream_put_le16(&dst, c->status[i].idelta); 00614 } 00615 for(i=0; i<avctx->channels; i++){ 00616 c->status[i].sample2= *samples++; 00617 } 00618 for(i=0; i<avctx->channels; i++){ 00619 c->status[i].sample1= *samples++; 00620 00621 bytestream_put_le16(&dst, c->status[i].sample1); 00622 } 00623 for(i=0; i<avctx->channels; i++) 00624 bytestream_put_le16(&dst, c->status[i].sample2); 00625 00626 if(avctx->trellis > 0) { 00627 int n = avctx->block_align - 7*avctx->channels; 00628 uint8_t buf[2][n]; 00629 if(avctx->channels == 1) { 00630 n *= 2; 00631 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n); 00632 for(i=0; i<n; i+=2) 00633 *dst++ = (buf[0][i] << 4) | buf[0][i+1]; 00634 } else { 00635 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n); 00636 adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n); 00637 for(i=0; i<n; i++) 00638 *dst++ = (buf[0][i] << 4) | buf[1][i]; 00639 } 00640 } else 00641 for(i=7*avctx->channels; i<avctx->block_align; i++) { 00642 int nibble; 00643 nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4; 00644 nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++); 00645 *dst++ = nibble; 00646 } 00647 break; 00648 case CODEC_ID_ADPCM_YAMAHA: 00649 n = avctx->frame_size / 2; 00650 if(avctx->trellis > 0) { 00651 uint8_t buf[2][n*2]; 00652 n *= 2; 00653 if(avctx->channels == 1) { 00654 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n); 00655 for(i=0; i<n; i+=2) 00656 *dst++ = buf[0][i] | (buf[0][i+1] << 4); 00657 } else { 00658 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n); 00659 adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n); 00660 for(i=0; i<n; i++) 00661 *dst++ = buf[0][i] | (buf[1][i] << 4); 00662 } 00663 } else 00664 for (n *= avctx->channels; n>0; n--) { 00665 int nibble; 00666 nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++); 00667 nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4; 00668 *dst++ = nibble; 00669 } 00670 break; 00671 default: 00672 return -1; 00673 } 00674 return dst - frame; 00675 } 00676 #endif //CONFIG_ENCODERS 00677 00678 static av_cold int adpcm_decode_init(AVCodecContext * avctx) 00679 { 00680 ADPCMContext *c = avctx->priv_data; 00681 unsigned int max_channels = 2; 00682 00683 switch(avctx->codec->id) { 00684 case CODEC_ID_ADPCM_EA_R1: 00685 case CODEC_ID_ADPCM_EA_R2: 00686 case CODEC_ID_ADPCM_EA_R3: 00687 max_channels = 6; 00688 break; 00689 } 00690 if(avctx->channels > max_channels){ 00691 return -1; 00692 } 00693 00694 switch(avctx->codec->id) { 00695 case CODEC_ID_ADPCM_CT: 00696 c->status[0].step = c->status[1].step = 511; 00697 break; 00698 case CODEC_ID_ADPCM_IMA_WS: 00699 if (avctx->extradata && avctx->extradata_size == 2 * 4) { 00700 c->status[0].predictor = AV_RL32(avctx->extradata); 00701 c->status[1].predictor = AV_RL32(avctx->extradata + 4); 00702 } 00703 break; 00704 default: 00705 break; 00706 } 00707 avctx->sample_fmt = SAMPLE_FMT_S16; 00708 return 0; 00709 } 00710 00711 static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift) 00712 { 00713 int step_index; 00714 int predictor; 00715 int sign, delta, diff, step; 00716 00717 step = step_table[c->step_index]; 00718 step_index = c->step_index + index_table[(unsigned)nibble]; 00719 if (step_index < 0) step_index = 0; 00720 else if (step_index > 88) step_index = 88; 00721 00722 sign = nibble & 8; 00723 delta = nibble & 7; 00724 /* perform direct multiplication instead of series of jumps proposed by 00725 * the reference ADPCM implementation since modern CPUs can do the mults 00726 * quickly enough */ 00727 diff = ((2 * delta + 1) * step) >> shift; 00728 predictor = c->predictor; 00729 if (sign) predictor -= diff; 00730 else predictor += diff; 00731 00732 c->predictor = av_clip_int16(predictor); 00733 c->step_index = step_index; 00734 00735 return (short)c->predictor; 00736 } 00737 00738 static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble) 00739 { 00740 int predictor; 00741 00742 predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64; 00743 predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; 00744 00745 c->sample2 = c->sample1; 00746 c->sample1 = av_clip_int16(predictor); 00747 c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8; 00748 if (c->idelta < 16) c->idelta = 16; 00749 00750 return c->sample1; 00751 } 00752 00753 static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble) 00754 { 00755 int sign, delta, diff; 00756 int new_step; 00757 00758 sign = nibble & 8; 00759 delta = nibble & 7; 00760 /* perform direct multiplication instead of series of jumps proposed by 00761 * the reference ADPCM implementation since modern CPUs can do the mults 00762 * quickly enough */ 00763 diff = ((2 * delta + 1) * c->step) >> 3; 00764 /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */ 00765 c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff); 00766 c->predictor = av_clip_int16(c->predictor); 00767 /* calculate new step and clamp it to range 511..32767 */ 00768 new_step = (AdaptationTable[nibble & 7] * c->step) >> 8; 00769 c->step = av_clip(new_step, 511, 32767); 00770 00771 return (short)c->predictor; 00772 } 00773 00774 static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift) 00775 { 00776 int sign, delta, diff; 00777 00778 sign = nibble & (1<<(size-1)); 00779 delta = nibble & ((1<<(size-1))-1); 00780 diff = delta << (7 + c->step + shift); 00781 00782 /* clamp result */ 00783 c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256); 00784 00785 /* calculate new step */ 00786 if (delta >= (2*size - 3) && c->step < 3) 00787 c->step++; 00788 else if (delta == 0 && c->step > 0) 00789 c->step--; 00790 00791 return (short) c->predictor; 00792 } 00793 00794 static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble) 00795 { 00796 if(!c->step) { 00797 c->predictor = 0; 00798 c->step = 127; 00799 } 00800 00801 c->predictor += (c->step * yamaha_difflookup[nibble]) / 8; 00802 c->predictor = av_clip_int16(c->predictor); 00803 c->step = (c->step * yamaha_indexscale[nibble]) >> 8; 00804 c->step = av_clip(c->step, 127, 24567); 00805 return c->predictor; 00806 } 00807 00808 static void xa_decode(short *out, const unsigned char *in, 00809 ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc) 00810 { 00811 int i, j; 00812 int shift,filter,f0,f1; 00813 int s_1,s_2; 00814 int d,s,t; 00815 00816 for(i=0;i<4;i++) { 00817 00818 shift = 12 - (in[4+i*2] & 15); 00819 filter = in[4+i*2] >> 4; 00820 f0 = xa_adpcm_table[filter][0]; 00821 f1 = xa_adpcm_table[filter][1]; 00822 00823 s_1 = left->sample1; 00824 s_2 = left->sample2; 00825 00826 for(j=0;j<28;j++) { 00827 d = in[16+i+j*4]; 00828 00829 t = (signed char)(d<<4)>>4; 00830 s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6); 00831 s_2 = s_1; 00832 s_1 = av_clip_int16(s); 00833 *out = s_1; 00834 out += inc; 00835 } 00836 00837 if (inc==2) { /* stereo */ 00838 left->sample1 = s_1; 00839 left->sample2 = s_2; 00840 s_1 = right->sample1; 00841 s_2 = right->sample2; 00842 out = out + 1 - 28*2; 00843 } 00844 00845 shift = 12 - (in[5+i*2] & 15); 00846 filter = in[5+i*2] >> 4; 00847 00848 f0 = xa_adpcm_table[filter][0]; 00849 f1 = xa_adpcm_table[filter][1]; 00850 00851 for(j=0;j<28;j++) { 00852 d = in[16+i+j*4]; 00853 00854 t = (signed char)d >> 4; 00855 s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6); 00856 s_2 = s_1; 00857 s_1 = av_clip_int16(s); 00858 *out = s_1; 00859 out += inc; 00860 } 00861 00862 if (inc==2) { /* stereo */ 00863 right->sample1 = s_1; 00864 right->sample2 = s_2; 00865 out -= 1; 00866 } else { 00867 left->sample1 = s_1; 00868 left->sample2 = s_2; 00869 } 00870 } 00871 } 00872 00873 00874 /* DK3 ADPCM support macro */ 00875 #define DK3_GET_NEXT_NIBBLE() \ 00876 if (decode_top_nibble_next) \ 00877 { \ 00878 nibble = last_byte >> 4; \ 00879 decode_top_nibble_next = 0; \ 00880 } \ 00881 else \ 00882 { \ 00883 last_byte = *src++; \ 00884 if (src >= buf + buf_size) break; \ 00885 nibble = last_byte & 0x0F; \ 00886 decode_top_nibble_next = 1; \ 00887 } 00888 00889 static int adpcm_decode_frame(AVCodecContext *avctx, 00890 void *data, int *data_size, 00891 AVPacket *avpkt) 00892 { 00893 const uint8_t *buf = avpkt->data; 00894 int buf_size = avpkt->size; 00895 ADPCMContext *c = avctx->priv_data; 00896 ADPCMChannelStatus *cs; 00897 int n, m, channel, i; 00898 int block_predictor[2]; 00899 short *samples; 00900 short *samples_end; 00901 const uint8_t *src; 00902 int st; /* stereo */ 00903 00904 /* DK3 ADPCM accounting variables */ 00905 unsigned char last_byte = 0; 00906 unsigned char nibble; 00907 int decode_top_nibble_next = 0; 00908 int diff_channel; 00909 00910 /* EA ADPCM state variables */ 00911 uint32_t samples_in_chunk; 00912 int32_t previous_left_sample, previous_right_sample; 00913 int32_t current_left_sample, current_right_sample; 00914 int32_t next_left_sample, next_right_sample; 00915 int32_t coeff1l, coeff2l, coeff1r, coeff2r; 00916 uint8_t shift_left, shift_right; 00917 int count1, count2; 00918 int coeff[2][2], shift[2];//used in EA MAXIS ADPCM 00919 00920 if (!buf_size) 00921 return 0; 00922 00923 //should protect all 4bit ADPCM variants 00924 //8 is needed for CODEC_ID_ADPCM_IMA_WAV with 2 channels 00925 // 00926 if(*data_size/4 < buf_size + 8) 00927 return -1; 00928 00929 samples = data; 00930 samples_end= samples + *data_size/2; 00931 *data_size= 0; 00932 src = buf; 00933 00934 st = avctx->channels == 2 ? 1 : 0; 00935 00936 switch(avctx->codec->id) { 00937 case CODEC_ID_ADPCM_IMA_QT: 00938 n = buf_size - 2*avctx->channels; 00939 for (channel = 0; channel < avctx->channels; channel++) { 00940 cs = &(c->status[channel]); 00941 /* (pppppp) (piiiiiii) */ 00942 00943 /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */ 00944 cs->predictor = (*src++) << 8; 00945 cs->predictor |= (*src & 0x80); 00946 cs->predictor &= 0xFF80; 00947 00948 /* sign extension */ 00949 if(cs->predictor & 0x8000) 00950 cs->predictor -= 0x10000; 00951 00952 cs->predictor = av_clip_int16(cs->predictor); 00953 00954 cs->step_index = (*src++) & 0x7F; 00955 00956 if (cs->step_index > 88){ 00957 av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index); 00958 cs->step_index = 88; 00959 } 00960 00961 cs->step = step_table[cs->step_index]; 00962 00963 samples = (short*)data + channel; 00964 00965 for(m=32; n>0 && m>0; n--, m--) { /* in QuickTime, IMA is encoded by chuncks of 34 bytes (=64 samples) */ 00966 *samples = adpcm_ima_expand_nibble(cs, src[0] & 0x0F, 3); 00967 samples += avctx->channels; 00968 *samples = adpcm_ima_expand_nibble(cs, src[0] >> 4 , 3); 00969 samples += avctx->channels; 00970 src ++; 00971 } 00972 } 00973 if (st) 00974 samples--; 00975 break; 00976 case CODEC_ID_ADPCM_IMA_WAV: 00977 if (avctx->block_align != 0 && buf_size > avctx->block_align) 00978 buf_size = avctx->block_align; 00979 00980 // samples_per_block= (block_align-4*chanels)*8 / (bits_per_sample * chanels) + 1; 00981 00982 for(i=0; i<avctx->channels; i++){ 00983 cs = &(c->status[i]); 00984 cs->predictor = *samples++ = (int16_t)bytestream_get_le16(&src); 00985 00986 cs->step_index = *src++; 00987 if (cs->step_index > 88){ 00988 av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index); 00989 cs->step_index = 88; 00990 } 00991 if (*src++) av_log(avctx, AV_LOG_ERROR, "unused byte should be null but is %d!!\n", src[-1]); /* unused */ 00992 } 00993 00994 while(src < buf + buf_size){ 00995 for(m=0; m<4; m++){ 00996 for(i=0; i<=st; i++) 00997 *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] & 0x0F, 3); 00998 for(i=0; i<=st; i++) 00999 *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] >> 4 , 3); 01000 src++; 01001 } 01002 src += 4*st; 01003 } 01004 break; 01005 case CODEC_ID_ADPCM_4XM: 01006 cs = &(c->status[0]); 01007 c->status[0].predictor= (int16_t)bytestream_get_le16(&src); 01008 if(st){ 01009 c->status[1].predictor= (int16_t)bytestream_get_le16(&src); 01010 } 01011 c->status[0].step_index= (int16_t)bytestream_get_le16(&src); 01012 if(st){ 01013 c->status[1].step_index= (int16_t)bytestream_get_le16(&src); 01014 } 01015 if (cs->step_index < 0) cs->step_index = 0; 01016 if (cs->step_index > 88) cs->step_index = 88; 01017 01018 m= (buf_size - (src - buf))>>st; 01019 for(i=0; i<m; i++) { 01020 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] & 0x0F, 4); 01021 if (st) 01022 *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] & 0x0F, 4); 01023 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] >> 4, 4); 01024 if (st) 01025 *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] >> 4, 4); 01026 } 01027 01028 src += m<<st; 01029 01030 break; 01031 case CODEC_ID_ADPCM_MS: 01032 if (avctx->block_align != 0 && buf_size > avctx->block_align) 01033 buf_size = avctx->block_align; 01034 n = buf_size - 7 * avctx->channels; 01035 if (n < 0) 01036 return -1; 01037 block_predictor[0] = av_clip(*src++, 0, 6); 01038 block_predictor[1] = 0; 01039 if (st) 01040 block_predictor[1] = av_clip(*src++, 0, 6); 01041 c->status[0].idelta = (int16_t)bytestream_get_le16(&src); 01042 if (st){ 01043 c->status[1].idelta = (int16_t)bytestream_get_le16(&src); 01044 } 01045 c->status[0].coeff1 = AdaptCoeff1[block_predictor[0]]; 01046 c->status[0].coeff2 = AdaptCoeff2[block_predictor[0]]; 01047 c->status[1].coeff1 = AdaptCoeff1[block_predictor[1]]; 01048 c->status[1].coeff2 = AdaptCoeff2[block_predictor[1]]; 01049 01050 c->status[0].sample1 = bytestream_get_le16(&src); 01051 if (st) c->status[1].sample1 = bytestream_get_le16(&src); 01052 c->status[0].sample2 = bytestream_get_le16(&src); 01053 if (st) c->status[1].sample2 = bytestream_get_le16(&src); 01054 01055 *samples++ = c->status[0].sample2; 01056 if (st) *samples++ = c->status[1].sample2; 01057 *samples++ = c->status[0].sample1; 01058 if (st) *samples++ = c->status[1].sample1; 01059 for(;n>0;n--) { 01060 *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], src[0] >> 4 ); 01061 *samples++ = adpcm_ms_expand_nibble(&c->status[st], src[0] & 0x0F); 01062 src ++; 01063 } 01064 break; 01065 case CODEC_ID_ADPCM_IMA_DK4: 01066 if (avctx->block_align != 0 && buf_size > avctx->block_align) 01067 buf_size = avctx->block_align; 01068 01069 c->status[0].predictor = (int16_t)bytestream_get_le16(&src); 01070 c->status[0].step_index = *src++; 01071 src++; 01072 *samples++ = c->status[0].predictor; 01073 if (st) { 01074 c->status[1].predictor = (int16_t)bytestream_get_le16(&src); 01075 c->status[1].step_index = *src++; 01076 src++; 01077 *samples++ = c->status[1].predictor; 01078 } 01079 while (src < buf + buf_size) { 01080 01081 /* take care of the top nibble (always left or mono channel) */ 01082 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01083 src[0] >> 4, 3); 01084 01085 /* take care of the bottom nibble, which is right sample for 01086 * stereo, or another mono sample */ 01087 if (st) 01088 *samples++ = adpcm_ima_expand_nibble(&c->status[1], 01089 src[0] & 0x0F, 3); 01090 else 01091 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01092 src[0] & 0x0F, 3); 01093 01094 src++; 01095 } 01096 break; 01097 case CODEC_ID_ADPCM_IMA_DK3: 01098 if (avctx->block_align != 0 && buf_size > avctx->block_align) 01099 buf_size = avctx->block_align; 01100 01101 if(buf_size + 16 > (samples_end - samples)*3/8) 01102 return -1; 01103 01104 c->status[0].predictor = (int16_t)AV_RL16(src + 10); 01105 c->status[1].predictor = (int16_t)AV_RL16(src + 12); 01106 c->status[0].step_index = src[14]; 01107 c->status[1].step_index = src[15]; 01108 /* sign extend the predictors */ 01109 src += 16; 01110 diff_channel = c->status[1].predictor; 01111 01112 /* the DK3_GET_NEXT_NIBBLE macro issues the break statement when 01113 * the buffer is consumed */ 01114 while (1) { 01115 01116 /* for this algorithm, c->status[0] is the sum channel and 01117 * c->status[1] is the diff channel */ 01118 01119 /* process the first predictor of the sum channel */ 01120 DK3_GET_NEXT_NIBBLE(); 01121 adpcm_ima_expand_nibble(&c->status[0], nibble, 3); 01122 01123 /* process the diff channel predictor */ 01124 DK3_GET_NEXT_NIBBLE(); 01125 adpcm_ima_expand_nibble(&c->status[1], nibble, 3); 01126 01127 /* process the first pair of stereo PCM samples */ 01128 diff_channel = (diff_channel + c->status[1].predictor) / 2; 01129 *samples++ = c->status[0].predictor + c->status[1].predictor; 01130 *samples++ = c->status[0].predictor - c->status[1].predictor; 01131 01132 /* process the second predictor of the sum channel */ 01133 DK3_GET_NEXT_NIBBLE(); 01134 adpcm_ima_expand_nibble(&c->status[0], nibble, 3); 01135 01136 /* process the second pair of stereo PCM samples */ 01137 diff_channel = (diff_channel + c->status[1].predictor) / 2; 01138 *samples++ = c->status[0].predictor + c->status[1].predictor; 01139 *samples++ = c->status[0].predictor - c->status[1].predictor; 01140 } 01141 break; 01142 case CODEC_ID_ADPCM_IMA_ISS: 01143 c->status[0].predictor = (int16_t)AV_RL16(src + 0); 01144 c->status[0].step_index = src[2]; 01145 src += 4; 01146 if(st) { 01147 c->status[1].predictor = (int16_t)AV_RL16(src + 0); 01148 c->status[1].step_index = src[2]; 01149 src += 4; 01150 } 01151 01152 while (src < buf + buf_size) { 01153 01154 if (st) { 01155 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01156 src[0] >> 4 , 3); 01157 *samples++ = adpcm_ima_expand_nibble(&c->status[1], 01158 src[0] & 0x0F, 3); 01159 } else { 01160 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01161 src[0] & 0x0F, 3); 01162 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01163 src[0] >> 4 , 3); 01164 } 01165 01166 src++; 01167 } 01168 break; 01169 case CODEC_ID_ADPCM_IMA_WS: 01170 /* no per-block initialization; just start decoding the data */ 01171 while (src < buf + buf_size) { 01172 01173 if (st) { 01174 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01175 src[0] >> 4 , 3); 01176 *samples++ = adpcm_ima_expand_nibble(&c->status[1], 01177 src[0] & 0x0F, 3); 01178 } else { 01179 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01180 src[0] >> 4 , 3); 01181 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01182 src[0] & 0x0F, 3); 01183 } 01184 01185 src++; 01186 } 01187 break; 01188 case CODEC_ID_ADPCM_XA: 01189 while (buf_size >= 128) { 01190 xa_decode(samples, src, &c->status[0], &c->status[1], 01191 avctx->channels); 01192 src += 128; 01193 samples += 28 * 8; 01194 buf_size -= 128; 01195 } 01196 break; 01197 case CODEC_ID_ADPCM_IMA_EA_EACS: 01198 samples_in_chunk = bytestream_get_le32(&src) >> (1-st); 01199 01200 if (samples_in_chunk > buf_size-4-(8<<st)) { 01201 src += buf_size - 4; 01202 break; 01203 } 01204 01205 for (i=0; i<=st; i++) 01206 c->status[i].step_index = bytestream_get_le32(&src); 01207 for (i=0; i<=st; i++) 01208 c->status[i].predictor = bytestream_get_le32(&src); 01209 01210 for (; samples_in_chunk; samples_in_chunk--, src++) { 01211 *samples++ = adpcm_ima_expand_nibble(&c->status[0], *src>>4, 3); 01212 *samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3); 01213 } 01214 break; 01215 case CODEC_ID_ADPCM_IMA_EA_SEAD: 01216 for (; src < buf+buf_size; src++) { 01217 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6); 01218 *samples++ = adpcm_ima_expand_nibble(&c->status[st],src[0]&0x0F, 6); 01219 } 01220 break; 01221 case CODEC_ID_ADPCM_EA: 01222 if (buf_size < 4 || AV_RL32(src) >= ((buf_size - 12) * 2)) { 01223 src += buf_size; 01224 break; 01225 } 01226 samples_in_chunk = AV_RL32(src); 01227 src += 4; 01228 current_left_sample = (int16_t)bytestream_get_le16(&src); 01229 previous_left_sample = (int16_t)bytestream_get_le16(&src); 01230 current_right_sample = (int16_t)bytestream_get_le16(&src); 01231 previous_right_sample = (int16_t)bytestream_get_le16(&src); 01232 01233 for (count1 = 0; count1 < samples_in_chunk/28;count1++) { 01234 coeff1l = ea_adpcm_table[ *src >> 4 ]; 01235 coeff2l = ea_adpcm_table[(*src >> 4 ) + 4]; 01236 coeff1r = ea_adpcm_table[*src & 0x0F]; 01237 coeff2r = ea_adpcm_table[(*src & 0x0F) + 4]; 01238 src++; 01239 01240 shift_left = (*src >> 4 ) + 8; 01241 shift_right = (*src & 0x0F) + 8; 01242 src++; 01243 01244 for (count2 = 0; count2 < 28; count2++) { 01245 next_left_sample = (int32_t)((*src & 0xF0) << 24) >> shift_left; 01246 next_right_sample = (int32_t)((*src & 0x0F) << 28) >> shift_right; 01247 src++; 01248 01249 next_left_sample = (next_left_sample + 01250 (current_left_sample * coeff1l) + 01251 (previous_left_sample * coeff2l) + 0x80) >> 8; 01252 next_right_sample = (next_right_sample + 01253 (current_right_sample * coeff1r) + 01254 (previous_right_sample * coeff2r) + 0x80) >> 8; 01255 01256 previous_left_sample = current_left_sample; 01257 current_left_sample = av_clip_int16(next_left_sample); 01258 previous_right_sample = current_right_sample; 01259 current_right_sample = av_clip_int16(next_right_sample); 01260 *samples++ = (unsigned short)current_left_sample; 01261 *samples++ = (unsigned short)current_right_sample; 01262 } 01263 } 01264 01265 if (src - buf == buf_size - 2) 01266 src += 2; // Skip terminating 0x0000 01267 01268 break; 01269 case CODEC_ID_ADPCM_EA_MAXIS_XA: 01270 for(channel = 0; channel < avctx->channels; channel++) { 01271 for (i=0; i<2; i++) 01272 coeff[channel][i] = ea_adpcm_table[(*src >> 4) + 4*i]; 01273 shift[channel] = (*src & 0x0F) + 8; 01274 src++; 01275 } 01276 for (count1 = 0; count1 < (buf_size - avctx->channels) / avctx->channels; count1++) { 01277 for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */ 01278 for(channel = 0; channel < avctx->channels; channel++) { 01279 int32_t sample = (int32_t)(((*(src+channel) >> i) & 0x0F) << 0x1C) >> shift[channel]; 01280 sample = (sample + 01281 c->status[channel].sample1 * coeff[channel][0] + 01282 c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8; 01283 c->status[channel].sample2 = c->status[channel].sample1; 01284 c->status[channel].sample1 = av_clip_int16(sample); 01285 *samples++ = c->status[channel].sample1; 01286 } 01287 } 01288 src+=avctx->channels; 01289 } 01290 break; 01291 case CODEC_ID_ADPCM_EA_R1: 01292 case CODEC_ID_ADPCM_EA_R2: 01293 case CODEC_ID_ADPCM_EA_R3: { 01294 /* channel numbering 01295 2chan: 0=fl, 1=fr 01296 4chan: 0=fl, 1=rl, 2=fr, 3=rr 01297 6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */ 01298 const int big_endian = avctx->codec->id == CODEC_ID_ADPCM_EA_R3; 01299 int32_t previous_sample, current_sample, next_sample; 01300 int32_t coeff1, coeff2; 01301 uint8_t shift; 01302 unsigned int channel; 01303 uint16_t *samplesC; 01304 const uint8_t *srcC; 01305 const uint8_t *src_end = buf + buf_size; 01306 01307 samples_in_chunk = (big_endian ? bytestream_get_be32(&src) 01308 : bytestream_get_le32(&src)) / 28; 01309 if (samples_in_chunk > UINT32_MAX/(28*avctx->channels) || 01310 28*samples_in_chunk*avctx->channels > samples_end-samples) { 01311 src += buf_size - 4; 01312 break; 01313 } 01314 01315 for (channel=0; channel<avctx->channels; channel++) { 01316 int32_t offset = (big_endian ? bytestream_get_be32(&src) 01317 : bytestream_get_le32(&src)) 01318 + (avctx->channels-channel-1) * 4; 01319 01320 if ((offset < 0) || (offset >= src_end - src - 4)) break; 01321 srcC = src + offset; 01322 samplesC = samples + channel; 01323 01324 if (avctx->codec->id == CODEC_ID_ADPCM_EA_R1) { 01325 current_sample = (int16_t)bytestream_get_le16(&srcC); 01326 previous_sample = (int16_t)bytestream_get_le16(&srcC); 01327 } else { 01328 current_sample = c->status[channel].predictor; 01329 previous_sample = c->status[channel].prev_sample; 01330 } 01331 01332 for (count1=0; count1<samples_in_chunk; count1++) { 01333 if (*srcC == 0xEE) { /* only seen in R2 and R3 */ 01334 srcC++; 01335 if (srcC > src_end - 30*2) break; 01336 current_sample = (int16_t)bytestream_get_be16(&srcC); 01337 previous_sample = (int16_t)bytestream_get_be16(&srcC); 01338 01339 for (count2=0; count2<28; count2++) { 01340 *samplesC = (int16_t)bytestream_get_be16(&srcC); 01341 samplesC += avctx->channels; 01342 } 01343 } else { 01344 coeff1 = ea_adpcm_table[ *srcC>>4 ]; 01345 coeff2 = ea_adpcm_table[(*srcC>>4) + 4]; 01346 shift = (*srcC++ & 0x0F) + 8; 01347 01348 if (srcC > src_end - 14) break; 01349 for (count2=0; count2<28; count2++) { 01350 if (count2 & 1) 01351 next_sample = (int32_t)((*srcC++ & 0x0F) << 28) >> shift; 01352 else 01353 next_sample = (int32_t)((*srcC & 0xF0) << 24) >> shift; 01354 01355 next_sample += (current_sample * coeff1) + 01356 (previous_sample * coeff2); 01357 next_sample = av_clip_int16(next_sample >> 8); 01358 01359 previous_sample = current_sample; 01360 current_sample = next_sample; 01361 *samplesC = current_sample; 01362 samplesC += avctx->channels; 01363 } 01364 } 01365 } 01366 01367 if (avctx->codec->id != CODEC_ID_ADPCM_EA_R1) { 01368 c->status[channel].predictor = current_sample; 01369 c->status[channel].prev_sample = previous_sample; 01370 } 01371 } 01372 01373 src = src + buf_size - (4 + 4*avctx->channels); 01374 samples += 28 * samples_in_chunk * avctx->channels; 01375 break; 01376 } 01377 case CODEC_ID_ADPCM_EA_XAS: 01378 if (samples_end-samples < 32*4*avctx->channels 01379 || buf_size < (4+15)*4*avctx->channels) { 01380 src += buf_size; 01381 break; 01382 } 01383 for (channel=0; channel<avctx->channels; channel++) { 01384 int coeff[2][4], shift[4]; 01385 short *s2, *s = &samples[channel]; 01386 for (n=0; n<4; n++, s+=32*avctx->channels) { 01387 for (i=0; i<2; i++) 01388 coeff[i][n] = ea_adpcm_table[(src[0]&0x0F)+4*i]; 01389 shift[n] = (src[2]&0x0F) + 8; 01390 for (s2=s, i=0; i<2; i++, src+=2, s2+=avctx->channels) 01391 s2[0] = (src[0]&0xF0) + (src[1]<<8); 01392 } 01393 01394 for (m=2; m<32; m+=2) { 01395 s = &samples[m*avctx->channels + channel]; 01396 for (n=0; n<4; n++, src++, s+=32*avctx->channels) { 01397 for (s2=s, i=0; i<8; i+=4, s2+=avctx->channels) { 01398 int level = (int32_t)((*src & (0xF0>>i)) << (24+i)) >> shift[n]; 01399 int pred = s2[-1*avctx->channels] * coeff[0][n] 01400 + s2[-2*avctx->channels] * coeff[1][n]; 01401 s2[0] = av_clip_int16((level + pred + 0x80) >> 8); 01402 } 01403 } 01404 } 01405 } 01406 samples += 32*4*avctx->channels; 01407 break; 01408 case CODEC_ID_ADPCM_IMA_AMV: 01409 case CODEC_ID_ADPCM_IMA_SMJPEG: 01410 c->status[0].predictor = (int16_t)bytestream_get_le16(&src); 01411 c->status[0].step_index = bytestream_get_le16(&src); 01412 01413 if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) 01414 src+=4; 01415 01416 while (src < buf + buf_size) { 01417 char hi, lo; 01418 lo = *src & 0x0F; 01419 hi = *src >> 4; 01420 01421 if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) 01422 FFSWAP(char, hi, lo); 01423 01424 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01425 lo, 3); 01426 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01427 hi, 3); 01428 src++; 01429 } 01430 break; 01431 case CODEC_ID_ADPCM_CT: 01432 while (src < buf + buf_size) { 01433 if (st) { 01434 *samples++ = adpcm_ct_expand_nibble(&c->status[0], 01435 src[0] >> 4); 01436 *samples++ = adpcm_ct_expand_nibble(&c->status[1], 01437 src[0] & 0x0F); 01438 } else { 01439 *samples++ = adpcm_ct_expand_nibble(&c->status[0], 01440 src[0] >> 4); 01441 *samples++ = adpcm_ct_expand_nibble(&c->status[0], 01442 src[0] & 0x0F); 01443 } 01444 src++; 01445 } 01446 break; 01447 case CODEC_ID_ADPCM_SBPRO_4: 01448 case CODEC_ID_ADPCM_SBPRO_3: 01449 case CODEC_ID_ADPCM_SBPRO_2: 01450 if (!c->status[0].step_index) { 01451 /* the first byte is a raw sample */ 01452 *samples++ = 128 * (*src++ - 0x80); 01453 if (st) 01454 *samples++ = 128 * (*src++ - 0x80); 01455 c->status[0].step_index = 1; 01456 } 01457 if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) { 01458 while (src < buf + buf_size) { 01459 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01460 src[0] >> 4, 4, 0); 01461 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], 01462 src[0] & 0x0F, 4, 0); 01463 src++; 01464 } 01465 } else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) { 01466 while (src < buf + buf_size && samples + 2 < samples_end) { 01467 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01468 src[0] >> 5 , 3, 0); 01469 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01470 (src[0] >> 2) & 0x07, 3, 0); 01471 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01472 src[0] & 0x03, 2, 0); 01473 src++; 01474 } 01475 } else { 01476 while (src < buf + buf_size && samples + 3 < samples_end) { 01477 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01478 src[0] >> 6 , 2, 2); 01479 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], 01480 (src[0] >> 4) & 0x03, 2, 2); 01481 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01482 (src[0] >> 2) & 0x03, 2, 2); 01483 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], 01484 src[0] & 0x03, 2, 2); 01485 src++; 01486 } 01487 } 01488 break; 01489 case CODEC_ID_ADPCM_SWF: 01490 { 01491 GetBitContext gb; 01492 const int *table; 01493 int k0, signmask, nb_bits, count; 01494 int size = buf_size*8; 01495 01496 init_get_bits(&gb, buf, size); 01497 01498 //read bits & initial values 01499 nb_bits = get_bits(&gb, 2)+2; 01500 //av_log(NULL,AV_LOG_INFO,"nb_bits: %d\n", nb_bits); 01501 table = swf_index_tables[nb_bits-2]; 01502 k0 = 1 << (nb_bits-2); 01503 signmask = 1 << (nb_bits-1); 01504 01505 while (get_bits_count(&gb) <= size - 22*avctx->channels) { 01506 for (i = 0; i < avctx->channels; i++) { 01507 *samples++ = c->status[i].predictor = get_sbits(&gb, 16); 01508 c->status[i].step_index = get_bits(&gb, 6); 01509 } 01510 01511 for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) { 01512 int i; 01513 01514 for (i = 0; i < avctx->channels; i++) { 01515 // similar to IMA adpcm 01516 int delta = get_bits(&gb, nb_bits); 01517 int step = step_table[c->status[i].step_index]; 01518 long vpdiff = 0; // vpdiff = (delta+0.5)*step/4 01519 int k = k0; 01520 01521 do { 01522 if (delta & k) 01523 vpdiff += step; 01524 step >>= 1; 01525 k >>= 1; 01526 } while(k); 01527 vpdiff += step; 01528 01529 if (delta & signmask) 01530 c->status[i].predictor -= vpdiff; 01531 else 01532 c->status[i].predictor += vpdiff; 01533 01534 c->status[i].step_index += table[delta & (~signmask)]; 01535 01536 c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88); 01537 c->status[i].predictor = av_clip_int16(c->status[i].predictor); 01538 01539 *samples++ = c->status[i].predictor; 01540 if (samples >= samples_end) { 01541 av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n"); 01542 return -1; 01543 } 01544 } 01545 } 01546 } 01547 src += buf_size; 01548 break; 01549 } 01550 case CODEC_ID_ADPCM_YAMAHA: 01551 while (src < buf + buf_size) { 01552 if (st) { 01553 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0], 01554 src[0] & 0x0F); 01555 *samples++ = adpcm_yamaha_expand_nibble(&c->status[1], 01556 src[0] >> 4 ); 01557 } else { 01558 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0], 01559 src[0] & 0x0F); 01560 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0], 01561 src[0] >> 4 ); 01562 } 01563 src++; 01564 } 01565 break; 01566 case CODEC_ID_ADPCM_THP: 01567 { 01568 int table[2][16]; 01569 unsigned int samplecnt; 01570 int prev[2][2]; 01571 int ch; 01572 01573 if (buf_size < 80) { 01574 av_log(avctx, AV_LOG_ERROR, "frame too small\n"); 01575 return -1; 01576 } 01577 01578 src+=4; 01579 samplecnt = bytestream_get_be32(&src); 01580 01581 for (i = 0; i < 32; i++) 01582 table[0][i] = (int16_t)bytestream_get_be16(&src); 01583 01584 /* Initialize the previous sample. */ 01585 for (i = 0; i < 4; i++) 01586 prev[0][i] = (int16_t)bytestream_get_be16(&src); 01587 01588 if (samplecnt >= (samples_end - samples) / (st + 1)) { 01589 av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n"); 01590 return -1; 01591 } 01592 01593 for (ch = 0; ch <= st; ch++) { 01594 samples = (unsigned short *) data + ch; 01595 01596 /* Read in every sample for this channel. */ 01597 for (i = 0; i < samplecnt / 14; i++) { 01598 int index = (*src >> 4) & 7; 01599 unsigned int exp = 28 - (*src++ & 15); 01600 int factor1 = table[ch][index * 2]; 01601 int factor2 = table[ch][index * 2 + 1]; 01602 01603 /* Decode 14 samples. */ 01604 for (n = 0; n < 14; n++) { 01605 int32_t sampledat; 01606 if(n&1) sampledat= *src++ <<28; 01607 else sampledat= (*src&0xF0)<<24; 01608 01609 sampledat = ((prev[ch][0]*factor1 01610 + prev[ch][1]*factor2) >> 11) + (sampledat>>exp); 01611 *samples = av_clip_int16(sampledat); 01612 prev[ch][1] = prev[ch][0]; 01613 prev[ch][0] = *samples++; 01614 01615 /* In case of stereo, skip one sample, this sample 01616 is for the other channel. */ 01617 samples += st; 01618 } 01619 } 01620 } 01621 01622 /* In the previous loop, in case stereo is used, samples is 01623 increased exactly one time too often. */ 01624 samples -= st; 01625 break; 01626 } 01627 01628 default: 01629 return -1; 01630 } 01631 *data_size = (uint8_t *)samples - (uint8_t *)data; 01632 return src - buf; 01633 } 01634 01635 01636 01637 #if CONFIG_ENCODERS 01638 #define ADPCM_ENCODER(id,name,long_name_) \ 01639 AVCodec name ## _encoder = { \ 01640 #name, \ 01641 AVMEDIA_TYPE_AUDIO, \ 01642 id, \ 01643 sizeof(ADPCMContext), \ 01644 adpcm_encode_init, \ 01645 adpcm_encode_frame, \ 01646 adpcm_encode_close, \ 01647 NULL, \ 01648 .sample_fmts = (const enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE}, \ 01649 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ 01650 }; 01651 #else 01652 #define ADPCM_ENCODER(id,name,long_name_) 01653 #endif 01654 01655 #if CONFIG_DECODERS 01656 #define ADPCM_DECODER(id,name,long_name_) \ 01657 AVCodec name ## _decoder = { \ 01658 #name, \ 01659 AVMEDIA_TYPE_AUDIO, \ 01660 id, \ 01661 sizeof(ADPCMContext), \ 01662 adpcm_decode_init, \ 01663 NULL, \ 01664 NULL, \ 01665 adpcm_decode_frame, \ 01666 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ 01667 }; 01668 #else 01669 #define ADPCM_DECODER(id,name,long_name_) 01670 #endif 01671 01672 #define ADPCM_CODEC(id,name,long_name_) \ 01673 ADPCM_ENCODER(id,name,long_name_) ADPCM_DECODER(id,name,long_name_) 01674 01675 /* Note: Do not forget to add new entries to the Makefile as well. */ 01676 ADPCM_DECODER(CODEC_ID_ADPCM_4XM, adpcm_4xm, "ADPCM 4X Movie"); 01677 ADPCM_DECODER(CODEC_ID_ADPCM_CT, adpcm_ct, "ADPCM Creative Technology"); 01678 ADPCM_DECODER(CODEC_ID_ADPCM_EA, adpcm_ea, "ADPCM Electronic Arts"); 01679 ADPCM_DECODER(CODEC_ID_ADPCM_EA_MAXIS_XA, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA"); 01680 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R1, adpcm_ea_r1, "ADPCM Electronic Arts R1"); 01681 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R2, adpcm_ea_r2, "ADPCM Electronic Arts R2"); 01682 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R3, adpcm_ea_r3, "ADPCM Electronic Arts R3"); 01683 ADPCM_DECODER(CODEC_ID_ADPCM_EA_XAS, adpcm_ea_xas, "ADPCM Electronic Arts XAS"); 01684 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_AMV, adpcm_ima_amv, "ADPCM IMA AMV"); 01685 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3, "ADPCM IMA Duck DK3"); 01686 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4, "ADPCM IMA Duck DK4"); 01687 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS"); 01688 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD"); 01689 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_ISS, adpcm_ima_iss, "ADPCM IMA Funcom ISS"); 01690 ADPCM_CODEC (CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime"); 01691 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG"); 01692 ADPCM_CODEC (CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV"); 01693 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws, "ADPCM IMA Westwood"); 01694 ADPCM_CODEC (CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft"); 01695 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit"); 01696 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit"); 01697 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit"); 01698 ADPCM_CODEC (CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash"); 01699 ADPCM_DECODER(CODEC_ID_ADPCM_THP, adpcm_thp, "ADPCM Nintendo Gamecube THP"); 01700 ADPCM_DECODER(CODEC_ID_ADPCM_XA, adpcm_xa, "ADPCM CDROM XA"); 01701 ADPCM_CODEC (CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");