00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #include "avcodec.h"
00022 #include "bitstream.h"
00023 #include "bytestream.h"
00024
00056 #define BLKSIZE 1024
00057
00058
00059
00060 static const int index_table[16] = {
00061 -1, -1, -1, -1, 2, 4, 6, 8,
00062 -1, -1, -1, -1, 2, 4, 6, 8,
00063 };
00064
00069 static const int step_table[89] = {
00070 7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
00071 19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
00072 50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
00073 130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
00074 337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
00075 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
00076 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
00077 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
00078 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
00079 };
00080
00081
00082
00083 static const int AdaptationTable[] = {
00084 230, 230, 230, 230, 307, 409, 512, 614,
00085 768, 614, 512, 409, 307, 230, 230, 230
00086 };
00087
00088 static const uint8_t AdaptCoeff1[] = {
00089 64, 128, 0, 48, 60, 115, 98
00090 };
00091
00092 static const int8_t AdaptCoeff2[] = {
00093 0, -64, 0, 16, 0, -52, -58
00094 };
00095
00096
00097 static const int xa_adpcm_table[5][2] = {
00098 { 0, 0 },
00099 { 60, 0 },
00100 { 115, -52 },
00101 { 98, -55 },
00102 { 122, -60 }
00103 };
00104
00105 static const int ea_adpcm_table[] = {
00106 0, 240, 460, 392, 0, 0, -208, -220, 0, 1,
00107 3, 4, 7, 8, 10, 11, 0, -1, -3, -4
00108 };
00109
00110
00111 static const int swf_index_tables[4][16] = {
00112 { -1, 2 },
00113 { -1, -1, 2, 4 },
00114 { -1, -1, -1, -1, 2, 4, 6, 8 },
00115 { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
00116 };
00117
00118 static const int yamaha_indexscale[] = {
00119 230, 230, 230, 230, 307, 409, 512, 614,
00120 230, 230, 230, 230, 307, 409, 512, 614
00121 };
00122
00123 static const int yamaha_difflookup[] = {
00124 1, 3, 5, 7, 9, 11, 13, 15,
00125 -1, -3, -5, -7, -9, -11, -13, -15
00126 };
00127
00128
00129
00130 typedef struct ADPCMChannelStatus {
00131 int predictor;
00132 short int step_index;
00133 int step;
00134
00135 int prev_sample;
00136
00137
00138 short sample1;
00139 short sample2;
00140 int coeff1;
00141 int coeff2;
00142 int idelta;
00143 } ADPCMChannelStatus;
00144
00145 typedef struct ADPCMContext {
00146 ADPCMChannelStatus status[6];
00147 } ADPCMContext;
00148
00149
00150
00151 #if CONFIG_ENCODERS
00152 static av_cold int adpcm_encode_init(AVCodecContext *avctx)
00153 {
00154 if (avctx->channels > 2)
00155 return -1;
00156
00157 if(avctx->trellis && (unsigned)avctx->trellis > 16U){
00158 av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n");
00159 return -1;
00160 }
00161
00162 switch(avctx->codec->id) {
00163 case CODEC_ID_ADPCM_IMA_WAV:
00164 avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1;
00165
00166 avctx->block_align = BLKSIZE;
00167
00168 break;
00169 case CODEC_ID_ADPCM_IMA_QT:
00170 avctx->frame_size = 64;
00171 avctx->block_align = 34 * avctx->channels;
00172 break;
00173 case CODEC_ID_ADPCM_MS:
00174 avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2;
00175
00176 avctx->block_align = BLKSIZE;
00177 break;
00178 case CODEC_ID_ADPCM_YAMAHA:
00179 avctx->frame_size = BLKSIZE * avctx->channels;
00180 avctx->block_align = BLKSIZE;
00181 break;
00182 case CODEC_ID_ADPCM_SWF:
00183 if (avctx->sample_rate != 11025 &&
00184 avctx->sample_rate != 22050 &&
00185 avctx->sample_rate != 44100) {
00186 av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n");
00187 return -1;
00188 }
00189 avctx->frame_size = 512 * (avctx->sample_rate / 11025);
00190 break;
00191 default:
00192 return -1;
00193 break;
00194 }
00195
00196 avctx->coded_frame= avcodec_alloc_frame();
00197 avctx->coded_frame->key_frame= 1;
00198
00199 return 0;
00200 }
00201
00202 static av_cold int adpcm_encode_close(AVCodecContext *avctx)
00203 {
00204 av_freep(&avctx->coded_frame);
00205
00206 return 0;
00207 }
00208
00209
00210 static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample)
00211 {
00212 int delta = sample - c->prev_sample;
00213 int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8;
00214 c->prev_sample += ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8);
00215 c->prev_sample = av_clip_int16(c->prev_sample);
00216 c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88);
00217 return nibble;
00218 }
00219
00220 static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample)
00221 {
00222 int predictor, nibble, bias;
00223
00224 predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
00225
00226 nibble= sample - predictor;
00227 if(nibble>=0) bias= c->idelta/2;
00228 else bias=-c->idelta/2;
00229
00230 nibble= (nibble + bias) / c->idelta;
00231 nibble= av_clip(nibble, -8, 7)&0x0F;
00232
00233 predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
00234
00235 c->sample2 = c->sample1;
00236 c->sample1 = av_clip_int16(predictor);
00237
00238 c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8;
00239 if (c->idelta < 16) c->idelta = 16;
00240
00241 return nibble;
00242 }
00243
00244 static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample)
00245 {
00246 int nibble, delta;
00247
00248 if(!c->step) {
00249 c->predictor = 0;
00250 c->step = 127;
00251 }
00252
00253 delta = sample - c->predictor;
00254
00255 nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8;
00256
00257 c->predictor += ((c->step * yamaha_difflookup[nibble]) / 8);
00258 c->predictor = av_clip_int16(c->predictor);
00259 c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
00260 c->step = av_clip(c->step, 127, 24567);
00261
00262 return nibble;
00263 }
00264
00265 typedef struct TrellisPath {
00266 int nibble;
00267 int prev;
00268 } TrellisPath;
00269
00270 typedef struct TrellisNode {
00271 uint32_t ssd;
00272 int path;
00273 int sample1;
00274 int sample2;
00275 int step;
00276 } TrellisNode;
00277
00278 static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
00279 uint8_t *dst, ADPCMChannelStatus *c, int n)
00280 {
00281 #define FREEZE_INTERVAL 128
00282
00283 const int frontier = 1 << avctx->trellis;
00284 const int stride = avctx->channels;
00285 const int version = avctx->codec->id;
00286 const int max_paths = frontier*FREEZE_INTERVAL;
00287 TrellisPath paths[max_paths], *p;
00288 TrellisNode node_buf[2][frontier];
00289 TrellisNode *nodep_buf[2][frontier];
00290 TrellisNode **nodes = nodep_buf[0];
00291 TrellisNode **nodes_next = nodep_buf[1];
00292 int pathn = 0, froze = -1, i, j, k;
00293
00294 assert(!(max_paths&(max_paths-1)));
00295
00296 memset(nodep_buf, 0, sizeof(nodep_buf));
00297 nodes[0] = &node_buf[1][0];
00298 nodes[0]->ssd = 0;
00299 nodes[0]->path = 0;
00300 nodes[0]->step = c->step_index;
00301 nodes[0]->sample1 = c->sample1;
00302 nodes[0]->sample2 = c->sample2;
00303 if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_IMA_QT) || (version == CODEC_ID_ADPCM_SWF))
00304 nodes[0]->sample1 = c->prev_sample;
00305 if(version == CODEC_ID_ADPCM_MS)
00306 nodes[0]->step = c->idelta;
00307 if(version == CODEC_ID_ADPCM_YAMAHA) {
00308 if(c->step == 0) {
00309 nodes[0]->step = 127;
00310 nodes[0]->sample1 = 0;
00311 } else {
00312 nodes[0]->step = c->step;
00313 nodes[0]->sample1 = c->predictor;
00314 }
00315 }
00316
00317 for(i=0; i<n; i++) {
00318 TrellisNode *t = node_buf[i&1];
00319 TrellisNode **u;
00320 int sample = samples[i*stride];
00321 memset(nodes_next, 0, frontier*sizeof(TrellisNode*));
00322 for(j=0; j<frontier && nodes[j]; j++) {
00323
00324 const int range = (j < frontier/2) ? 1 : 0;
00325 const int step = nodes[j]->step;
00326 int nidx;
00327 if(version == CODEC_ID_ADPCM_MS) {
00328 const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 64;
00329 const int div = (sample - predictor) / step;
00330 const int nmin = av_clip(div-range, -8, 6);
00331 const int nmax = av_clip(div+range, -7, 7);
00332 for(nidx=nmin; nidx<=nmax; nidx++) {
00333 const int nibble = nidx & 0xf;
00334 int dec_sample = predictor + nidx * step;
00335 #define STORE_NODE(NAME, STEP_INDEX)\
00336 int d;\
00337 uint32_t ssd;\
00338 dec_sample = av_clip_int16(dec_sample);\
00339 d = sample - dec_sample;\
00340 ssd = nodes[j]->ssd + d*d;\
00341 if(nodes_next[frontier-1] && ssd >= nodes_next[frontier-1]->ssd)\
00342 continue;\
00343
00344
00345 \
00346 for(k=0; k<frontier && nodes_next[k]; k++) {\
00347 if(dec_sample == nodes_next[k]->sample1) {\
00348 assert(ssd >= nodes_next[k]->ssd);\
00349 goto next_##NAME;\
00350 }\
00351 }\
00352 for(k=0; k<frontier; k++) {\
00353 if(!nodes_next[k] || ssd < nodes_next[k]->ssd) {\
00354 TrellisNode *u = nodes_next[frontier-1];\
00355 if(!u) {\
00356 assert(pathn < max_paths);\
00357 u = t++;\
00358 u->path = pathn++;\
00359 }\
00360 u->ssd = ssd;\
00361 u->step = STEP_INDEX;\
00362 u->sample2 = nodes[j]->sample1;\
00363 u->sample1 = dec_sample;\
00364 paths[u->path].nibble = nibble;\
00365 paths[u->path].prev = nodes[j]->path;\
00366 memmove(&nodes_next[k+1], &nodes_next[k], (frontier-k-1)*sizeof(TrellisNode*));\
00367 nodes_next[k] = u;\
00368 break;\
00369 }\
00370 }\
00371 next_##NAME:;
00372 STORE_NODE(ms, FFMAX(16, (AdaptationTable[nibble] * step) >> 8));
00373 }
00374 } else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_IMA_QT)|| (version == CODEC_ID_ADPCM_SWF)) {
00375 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
00376 const int predictor = nodes[j]->sample1;\
00377 const int div = (sample - predictor) * 4 / STEP_TABLE;\
00378 int nmin = av_clip(div-range, -7, 6);\
00379 int nmax = av_clip(div+range, -6, 7);\
00380 if(nmin<=0) nmin--; \
00381 if(nmax<0) nmax--;\
00382 for(nidx=nmin; nidx<=nmax; nidx++) {\
00383 const int nibble = nidx<0 ? 7-nidx : nidx;\
00384 int dec_sample = predictor + (STEP_TABLE * yamaha_difflookup[nibble]) / 8;\
00385 STORE_NODE(NAME, STEP_INDEX);\
00386 }
00387 LOOP_NODES(ima, step_table[step], av_clip(step + index_table[nibble], 0, 88));
00388 } else {
00389 LOOP_NODES(yamaha, step, av_clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567));
00390 #undef LOOP_NODES
00391 #undef STORE_NODE
00392 }
00393 }
00394
00395 u = nodes;
00396 nodes = nodes_next;
00397 nodes_next = u;
00398
00399
00400 if(nodes[0]->ssd > (1<<28)) {
00401 for(j=1; j<frontier && nodes[j]; j++)
00402 nodes[j]->ssd -= nodes[0]->ssd;
00403 nodes[0]->ssd = 0;
00404 }
00405
00406
00407 if(i == froze + FREEZE_INTERVAL) {
00408 p = &paths[nodes[0]->path];
00409 for(k=i; k>froze; k--) {
00410 dst[k] = p->nibble;
00411 p = &paths[p->prev];
00412 }
00413 froze = i;
00414 pathn = 0;
00415
00416
00417
00418 memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*));
00419 }
00420 }
00421
00422 p = &paths[nodes[0]->path];
00423 for(i=n-1; i>froze; i--) {
00424 dst[i] = p->nibble;
00425 p = &paths[p->prev];
00426 }
00427
00428 c->predictor = nodes[0]->sample1;
00429 c->sample1 = nodes[0]->sample1;
00430 c->sample2 = nodes[0]->sample2;
00431 c->step_index = nodes[0]->step;
00432 c->step = nodes[0]->step;
00433 c->idelta = nodes[0]->step;
00434 }
00435
00436 static int adpcm_encode_frame(AVCodecContext *avctx,
00437 unsigned char *frame, int buf_size, void *data)
00438 {
00439 int n, i, st;
00440 short *samples;
00441 unsigned char *dst;
00442 ADPCMContext *c = avctx->priv_data;
00443
00444 dst = frame;
00445 samples = (short *)data;
00446 st= avctx->channels == 2;
00447
00448
00449 switch(avctx->codec->id) {
00450 case CODEC_ID_ADPCM_IMA_WAV:
00451 n = avctx->frame_size / 8;
00452 c->status[0].prev_sample = (signed short)samples[0];
00453
00454 bytestream_put_le16(&dst, c->status[0].prev_sample);
00455 *dst++ = (unsigned char)c->status[0].step_index;
00456 *dst++ = 0;
00457 samples++;
00458 if (avctx->channels == 2) {
00459 c->status[1].prev_sample = (signed short)samples[0];
00460
00461 bytestream_put_le16(&dst, c->status[1].prev_sample);
00462 *dst++ = (unsigned char)c->status[1].step_index;
00463 *dst++ = 0;
00464 samples++;
00465 }
00466
00467
00468 if(avctx->trellis > 0) {
00469 uint8_t buf[2][n*8];
00470 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n*8);
00471 if(avctx->channels == 2)
00472 adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n*8);
00473 for(i=0; i<n; i++) {
00474 *dst++ = buf[0][8*i+0] | (buf[0][8*i+1] << 4);
00475 *dst++ = buf[0][8*i+2] | (buf[0][8*i+3] << 4);
00476 *dst++ = buf[0][8*i+4] | (buf[0][8*i+5] << 4);
00477 *dst++ = buf[0][8*i+6] | (buf[0][8*i+7] << 4);
00478 if (avctx->channels == 2) {
00479 *dst++ = buf[1][8*i+0] | (buf[1][8*i+1] << 4);
00480 *dst++ = buf[1][8*i+2] | (buf[1][8*i+3] << 4);
00481 *dst++ = buf[1][8*i+4] | (buf[1][8*i+5] << 4);
00482 *dst++ = buf[1][8*i+6] | (buf[1][8*i+7] << 4);
00483 }
00484 }
00485 } else
00486 for (; n>0; n--) {
00487 *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]);
00488 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4;
00489 dst++;
00490 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]);
00491 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4;
00492 dst++;
00493 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]);
00494 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4;
00495 dst++;
00496 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]);
00497 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4;
00498 dst++;
00499
00500 if (avctx->channels == 2) {
00501 *dst = adpcm_ima_compress_sample(&c->status[1], samples[1]);
00502 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4;
00503 dst++;
00504 *dst = adpcm_ima_compress_sample(&c->status[1], samples[5]);
00505 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4;
00506 dst++;
00507 *dst = adpcm_ima_compress_sample(&c->status[1], samples[9]);
00508 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4;
00509 dst++;
00510 *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]);
00511 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4;
00512 dst++;
00513 }
00514 samples += 8 * avctx->channels;
00515 }
00516 break;
00517 case CODEC_ID_ADPCM_IMA_QT:
00518 {
00519 int ch, i;
00520 PutBitContext pb;
00521 init_put_bits(&pb, dst, buf_size*8);
00522
00523 for(ch=0; ch<avctx->channels; ch++){
00524 put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7);
00525 put_bits(&pb, 7, c->status[ch].step_index);
00526 if(avctx->trellis > 0) {
00527 uint8_t buf[64];
00528 adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64);
00529 for(i=0; i<64; i++)
00530 put_bits(&pb, 4, buf[i^1]);
00531 c->status[ch].prev_sample = c->status[ch].predictor & ~0x7F;
00532 } else {
00533 for (i=0; i<64; i+=2){
00534 int t1, t2;
00535 t1 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]);
00536 t2 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]);
00537 put_bits(&pb, 4, t2);
00538 put_bits(&pb, 4, t1);
00539 }
00540 c->status[ch].prev_sample &= ~0x7F;
00541 }
00542 }
00543
00544 dst += put_bits_count(&pb)>>3;
00545 break;
00546 }
00547 case CODEC_ID_ADPCM_SWF:
00548 {
00549 int i;
00550 PutBitContext pb;
00551 init_put_bits(&pb, dst, buf_size*8);
00552
00553 n = avctx->frame_size-1;
00554
00555
00556 put_bits(&pb, 2, 2);
00557
00558
00559 for(i=0; i<avctx->channels; i++){
00560 c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63);
00561 put_sbits(&pb, 16, samples[i]);
00562 put_bits(&pb, 6, c->status[i].step_index);
00563 c->status[i].prev_sample = (signed short)samples[i];
00564 }
00565
00566 if(avctx->trellis > 0) {
00567 uint8_t buf[2][n];
00568 adpcm_compress_trellis(avctx, samples+2, buf[0], &c->status[0], n);
00569 if (avctx->channels == 2)
00570 adpcm_compress_trellis(avctx, samples+3, buf[1], &c->status[1], n);
00571 for(i=0; i<n; i++) {
00572 put_bits(&pb, 4, buf[0][i]);
00573 if (avctx->channels == 2)
00574 put_bits(&pb, 4, buf[1][i]);
00575 }
00576 } else {
00577 for (i=1; i<avctx->frame_size; i++) {
00578 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i]));
00579 if (avctx->channels == 2)
00580 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1]));
00581 }
00582 }
00583 flush_put_bits(&pb);
00584 dst += put_bits_count(&pb)>>3;
00585 break;
00586 }
00587 case CODEC_ID_ADPCM_MS:
00588 for(i=0; i<avctx->channels; i++){
00589 int predictor=0;
00590
00591 *dst++ = predictor;
00592 c->status[i].coeff1 = AdaptCoeff1[predictor];
00593 c->status[i].coeff2 = AdaptCoeff2[predictor];
00594 }
00595 for(i=0; i<avctx->channels; i++){
00596 if (c->status[i].idelta < 16)
00597 c->status[i].idelta = 16;
00598
00599 bytestream_put_le16(&dst, c->status[i].idelta);
00600 }
00601 for(i=0; i<avctx->channels; i++){
00602 c->status[i].sample2= *samples++;
00603 }
00604 for(i=0; i<avctx->channels; i++){
00605 c->status[i].sample1= *samples++;
00606
00607 bytestream_put_le16(&dst, c->status[i].sample1);
00608 }
00609 for(i=0; i<avctx->channels; i++)
00610 bytestream_put_le16(&dst, c->status[i].sample2);
00611
00612 if(avctx->trellis > 0) {
00613 int n = avctx->block_align - 7*avctx->channels;
00614 uint8_t buf[2][n];
00615 if(avctx->channels == 1) {
00616 n *= 2;
00617 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n);
00618 for(i=0; i<n; i+=2)
00619 *dst++ = (buf[0][i] << 4) | buf[0][i+1];
00620 } else {
00621 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n);
00622 adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n);
00623 for(i=0; i<n; i++)
00624 *dst++ = (buf[0][i] << 4) | buf[1][i];
00625 }
00626 } else
00627 for(i=7*avctx->channels; i<avctx->block_align; i++) {
00628 int nibble;
00629 nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4;
00630 nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++);
00631 *dst++ = nibble;
00632 }
00633 break;
00634 case CODEC_ID_ADPCM_YAMAHA:
00635 n = avctx->frame_size / 2;
00636 if(avctx->trellis > 0) {
00637 uint8_t buf[2][n*2];
00638 n *= 2;
00639 if(avctx->channels == 1) {
00640 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n);
00641 for(i=0; i<n; i+=2)
00642 *dst++ = buf[0][i] | (buf[0][i+1] << 4);
00643 } else {
00644 adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n);
00645 adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n);
00646 for(i=0; i<n; i++)
00647 *dst++ = buf[0][i] | (buf[1][i] << 4);
00648 }
00649 } else
00650 for (; n>0; n--) {
00651 for(i = 0; i < avctx->channels; i++) {
00652 int nibble;
00653 nibble = adpcm_yamaha_compress_sample(&c->status[i], samples[i]);
00654 nibble |= adpcm_yamaha_compress_sample(&c->status[i], samples[i+avctx->channels]) << 4;
00655 *dst++ = nibble;
00656 }
00657 samples += 2 * avctx->channels;
00658 }
00659 break;
00660 default:
00661 return -1;
00662 }
00663 return dst - frame;
00664 }
00665 #endif //CONFIG_ENCODERS
00666
00667 static av_cold int adpcm_decode_init(AVCodecContext * avctx)
00668 {
00669 ADPCMContext *c = avctx->priv_data;
00670 unsigned int max_channels = 2;
00671
00672 switch(avctx->codec->id) {
00673 case CODEC_ID_ADPCM_EA_R1:
00674 case CODEC_ID_ADPCM_EA_R2:
00675 case CODEC_ID_ADPCM_EA_R3:
00676 max_channels = 6;
00677 break;
00678 }
00679 if(avctx->channels > max_channels){
00680 return -1;
00681 }
00682
00683 switch(avctx->codec->id) {
00684 case CODEC_ID_ADPCM_CT:
00685 c->status[0].step = c->status[1].step = 511;
00686 break;
00687 case CODEC_ID_ADPCM_IMA_WS:
00688 if (avctx->extradata && avctx->extradata_size == 2 * 4) {
00689 c->status[0].predictor = AV_RL32(avctx->extradata);
00690 c->status[1].predictor = AV_RL32(avctx->extradata + 4);
00691 }
00692 break;
00693 default:
00694 break;
00695 }
00696 avctx->sample_fmt = SAMPLE_FMT_S16;
00697 return 0;
00698 }
00699
00700 static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift)
00701 {
00702 int step_index;
00703 int predictor;
00704 int sign, delta, diff, step;
00705
00706 step = step_table[c->step_index];
00707 step_index = c->step_index + index_table[(unsigned)nibble];
00708 if (step_index < 0) step_index = 0;
00709 else if (step_index > 88) step_index = 88;
00710
00711 sign = nibble & 8;
00712 delta = nibble & 7;
00713
00714
00715
00716 diff = ((2 * delta + 1) * step) >> shift;
00717 predictor = c->predictor;
00718 if (sign) predictor -= diff;
00719 else predictor += diff;
00720
00721 c->predictor = av_clip_int16(predictor);
00722 c->step_index = step_index;
00723
00724 return (short)c->predictor;
00725 }
00726
00727 static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble)
00728 {
00729 int predictor;
00730
00731 predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
00732 predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
00733
00734 c->sample2 = c->sample1;
00735 c->sample1 = av_clip_int16(predictor);
00736 c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8;
00737 if (c->idelta < 16) c->idelta = 16;
00738
00739 return c->sample1;
00740 }
00741
00742 static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble)
00743 {
00744 int sign, delta, diff;
00745 int new_step;
00746
00747 sign = nibble & 8;
00748 delta = nibble & 7;
00749
00750
00751
00752 diff = ((2 * delta + 1) * c->step) >> 3;
00753
00754 c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
00755 c->predictor = av_clip_int16(c->predictor);
00756
00757 new_step = (AdaptationTable[nibble & 7] * c->step) >> 8;
00758 c->step = av_clip(new_step, 511, 32767);
00759
00760 return (short)c->predictor;
00761 }
00762
00763 static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift)
00764 {
00765 int sign, delta, diff;
00766
00767 sign = nibble & (1<<(size-1));
00768 delta = nibble & ((1<<(size-1))-1);
00769 diff = delta << (7 + c->step + shift);
00770
00771
00772 c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
00773
00774
00775 if (delta >= (2*size - 3) && c->step < 3)
00776 c->step++;
00777 else if (delta == 0 && c->step > 0)
00778 c->step--;
00779
00780 return (short) c->predictor;
00781 }
00782
00783 static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble)
00784 {
00785 if(!c->step) {
00786 c->predictor = 0;
00787 c->step = 127;
00788 }
00789
00790 c->predictor += (c->step * yamaha_difflookup[nibble]) / 8;
00791 c->predictor = av_clip_int16(c->predictor);
00792 c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
00793 c->step = av_clip(c->step, 127, 24567);
00794 return c->predictor;
00795 }
00796
00797 static void xa_decode(short *out, const unsigned char *in,
00798 ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc)
00799 {
00800 int i, j;
00801 int shift,filter,f0,f1;
00802 int s_1,s_2;
00803 int d,s,t;
00804
00805 for(i=0;i<4;i++) {
00806
00807 shift = 12 - (in[4+i*2] & 15);
00808 filter = in[4+i*2] >> 4;
00809 f0 = xa_adpcm_table[filter][0];
00810 f1 = xa_adpcm_table[filter][1];
00811
00812 s_1 = left->sample1;
00813 s_2 = left->sample2;
00814
00815 for(j=0;j<28;j++) {
00816 d = in[16+i+j*4];
00817
00818 t = (signed char)(d<<4)>>4;
00819 s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
00820 s_2 = s_1;
00821 s_1 = av_clip_int16(s);
00822 *out = s_1;
00823 out += inc;
00824 }
00825
00826 if (inc==2) {
00827 left->sample1 = s_1;
00828 left->sample2 = s_2;
00829 s_1 = right->sample1;
00830 s_2 = right->sample2;
00831 out = out + 1 - 28*2;
00832 }
00833
00834 shift = 12 - (in[5+i*2] & 15);
00835 filter = in[5+i*2] >> 4;
00836
00837 f0 = xa_adpcm_table[filter][0];
00838 f1 = xa_adpcm_table[filter][1];
00839
00840 for(j=0;j<28;j++) {
00841 d = in[16+i+j*4];
00842
00843 t = (signed char)d >> 4;
00844 s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
00845 s_2 = s_1;
00846 s_1 = av_clip_int16(s);
00847 *out = s_1;
00848 out += inc;
00849 }
00850
00851 if (inc==2) {
00852 right->sample1 = s_1;
00853 right->sample2 = s_2;
00854 out -= 1;
00855 } else {
00856 left->sample1 = s_1;
00857 left->sample2 = s_2;
00858 }
00859 }
00860 }
00861
00862
00863
00864 #define DK3_GET_NEXT_NIBBLE() \
00865 if (decode_top_nibble_next) \
00866 { \
00867 nibble = last_byte >> 4; \
00868 decode_top_nibble_next = 0; \
00869 } \
00870 else \
00871 { \
00872 last_byte = *src++; \
00873 if (src >= buf + buf_size) break; \
00874 nibble = last_byte & 0x0F; \
00875 decode_top_nibble_next = 1; \
00876 }
00877
00878 static int adpcm_decode_frame(AVCodecContext *avctx,
00879 void *data, int *data_size,
00880 const uint8_t *buf, int buf_size)
00881 {
00882 ADPCMContext *c = avctx->priv_data;
00883 ADPCMChannelStatus *cs;
00884 int n, m, channel, i;
00885 int block_predictor[2];
00886 short *samples;
00887 short *samples_end;
00888 const uint8_t *src;
00889 int st;
00890
00891
00892 unsigned char last_byte = 0;
00893 unsigned char nibble;
00894 int decode_top_nibble_next = 0;
00895 int diff_channel;
00896
00897
00898 uint32_t samples_in_chunk;
00899 int32_t previous_left_sample, previous_right_sample;
00900 int32_t current_left_sample, current_right_sample;
00901 int32_t next_left_sample, next_right_sample;
00902 int32_t coeff1l, coeff2l, coeff1r, coeff2r;
00903 uint8_t shift_left, shift_right;
00904 int count1, count2;
00905 int coeff[2][2], shift[2];
00906
00907 if (!buf_size)
00908 return 0;
00909
00910
00911
00912
00913 if(*data_size/4 < buf_size + 8)
00914 return -1;
00915
00916 samples = data;
00917 samples_end= samples + *data_size/2;
00918 *data_size= 0;
00919 src = buf;
00920
00921 st = avctx->channels == 2 ? 1 : 0;
00922
00923 switch(avctx->codec->id) {
00924 case CODEC_ID_ADPCM_IMA_QT:
00925 n = buf_size - 2*avctx->channels;
00926 for (channel = 0; channel < avctx->channels; channel++) {
00927 cs = &(c->status[channel]);
00928
00929
00930
00931 cs->predictor = (*src++) << 8;
00932 cs->predictor |= (*src & 0x80);
00933 cs->predictor &= 0xFF80;
00934
00935
00936 if(cs->predictor & 0x8000)
00937 cs->predictor -= 0x10000;
00938
00939 cs->predictor = av_clip_int16(cs->predictor);
00940
00941 cs->step_index = (*src++) & 0x7F;
00942
00943 if (cs->step_index > 88){
00944 av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
00945 cs->step_index = 88;
00946 }
00947
00948 cs->step = step_table[cs->step_index];
00949
00950 samples = (short*)data + channel;
00951
00952 for(m=32; n>0 && m>0; n--, m--) {
00953 *samples = adpcm_ima_expand_nibble(cs, src[0] & 0x0F, 3);
00954 samples += avctx->channels;
00955 *samples = adpcm_ima_expand_nibble(cs, src[0] >> 4 , 3);
00956 samples += avctx->channels;
00957 src ++;
00958 }
00959 }
00960 if (st)
00961 samples--;
00962 break;
00963 case CODEC_ID_ADPCM_IMA_WAV:
00964 if (avctx->block_align != 0 && buf_size > avctx->block_align)
00965 buf_size = avctx->block_align;
00966
00967
00968
00969 for(i=0; i<avctx->channels; i++){
00970 cs = &(c->status[i]);
00971 cs->predictor = *samples++ = (int16_t)bytestream_get_le16(&src);
00972
00973 cs->step_index = *src++;
00974 if (cs->step_index > 88){
00975 av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
00976 cs->step_index = 88;
00977 }
00978 if (*src++) av_log(avctx, AV_LOG_ERROR, "unused byte should be null but is %d!!\n", src[-1]);
00979 }
00980
00981 while(src < buf + buf_size){
00982 for(m=0; m<4; m++){
00983 for(i=0; i<=st; i++)
00984 *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] & 0x0F, 3);
00985 for(i=0; i<=st; i++)
00986 *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] >> 4 , 3);
00987 src++;
00988 }
00989 src += 4*st;
00990 }
00991 break;
00992 case CODEC_ID_ADPCM_4XM:
00993 cs = &(c->status[0]);
00994 c->status[0].predictor= (int16_t)bytestream_get_le16(&src);
00995 if(st){
00996 c->status[1].predictor= (int16_t)bytestream_get_le16(&src);
00997 }
00998 c->status[0].step_index= (int16_t)bytestream_get_le16(&src);
00999 if(st){
01000 c->status[1].step_index= (int16_t)bytestream_get_le16(&src);
01001 }
01002 if (cs->step_index < 0) cs->step_index = 0;
01003 if (cs->step_index > 88) cs->step_index = 88;
01004
01005 m= (buf_size - (src - buf))>>st;
01006 for(i=0; i<m; i++) {
01007 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] & 0x0F, 4);
01008 if (st)
01009 *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] & 0x0F, 4);
01010 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] >> 4, 4);
01011 if (st)
01012 *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] >> 4, 4);
01013 }
01014
01015 src += m<<st;
01016
01017 break;
01018 case CODEC_ID_ADPCM_MS:
01019 if (avctx->block_align != 0 && buf_size > avctx->block_align)
01020 buf_size = avctx->block_align;
01021 n = buf_size - 7 * avctx->channels;
01022 if (n < 0)
01023 return -1;
01024 block_predictor[0] = av_clip(*src++, 0, 6);
01025 block_predictor[1] = 0;
01026 if (st)
01027 block_predictor[1] = av_clip(*src++, 0, 6);
01028 c->status[0].idelta = (int16_t)bytestream_get_le16(&src);
01029 if (st){
01030 c->status[1].idelta = (int16_t)bytestream_get_le16(&src);
01031 }
01032 c->status[0].coeff1 = AdaptCoeff1[block_predictor[0]];
01033 c->status[0].coeff2 = AdaptCoeff2[block_predictor[0]];
01034 c->status[1].coeff1 = AdaptCoeff1[block_predictor[1]];
01035 c->status[1].coeff2 = AdaptCoeff2[block_predictor[1]];
01036
01037 c->status[0].sample1 = bytestream_get_le16(&src);
01038 if (st) c->status[1].sample1 = bytestream_get_le16(&src);
01039 c->status[0].sample2 = bytestream_get_le16(&src);
01040 if (st) c->status[1].sample2 = bytestream_get_le16(&src);
01041
01042 *samples++ = c->status[0].sample2;
01043 if (st) *samples++ = c->status[1].sample2;
01044 *samples++ = c->status[0].sample1;
01045 if (st) *samples++ = c->status[1].sample1;
01046 for(;n>0;n--) {
01047 *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], src[0] >> 4 );
01048 *samples++ = adpcm_ms_expand_nibble(&c->status[st], src[0] & 0x0F);
01049 src ++;
01050 }
01051 break;
01052 case CODEC_ID_ADPCM_IMA_DK4:
01053 if (avctx->block_align != 0 && buf_size > avctx->block_align)
01054 buf_size = avctx->block_align;
01055
01056 c->status[0].predictor = (int16_t)bytestream_get_le16(&src);
01057 c->status[0].step_index = *src++;
01058 src++;
01059 *samples++ = c->status[0].predictor;
01060 if (st) {
01061 c->status[1].predictor = (int16_t)bytestream_get_le16(&src);
01062 c->status[1].step_index = *src++;
01063 src++;
01064 *samples++ = c->status[1].predictor;
01065 }
01066 while (src < buf + buf_size) {
01067
01068
01069 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01070 src[0] >> 4, 3);
01071
01072
01073
01074 if (st)
01075 *samples++ = adpcm_ima_expand_nibble(&c->status[1],
01076 src[0] & 0x0F, 3);
01077 else
01078 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01079 src[0] & 0x0F, 3);
01080
01081 src++;
01082 }
01083 break;
01084 case CODEC_ID_ADPCM_IMA_DK3:
01085 if (avctx->block_align != 0 && buf_size > avctx->block_align)
01086 buf_size = avctx->block_align;
01087
01088 if(buf_size + 16 > (samples_end - samples)*3/8)
01089 return -1;
01090
01091 c->status[0].predictor = (int16_t)AV_RL16(src + 10);
01092 c->status[1].predictor = (int16_t)AV_RL16(src + 12);
01093 c->status[0].step_index = src[14];
01094 c->status[1].step_index = src[15];
01095
01096 src += 16;
01097 diff_channel = c->status[1].predictor;
01098
01099
01100
01101 while (1) {
01102
01103
01104
01105
01106
01107 DK3_GET_NEXT_NIBBLE();
01108 adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
01109
01110
01111 DK3_GET_NEXT_NIBBLE();
01112 adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
01113
01114
01115 diff_channel = (diff_channel + c->status[1].predictor) / 2;
01116 *samples++ = c->status[0].predictor + c->status[1].predictor;
01117 *samples++ = c->status[0].predictor - c->status[1].predictor;
01118
01119
01120 DK3_GET_NEXT_NIBBLE();
01121 adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
01122
01123
01124 diff_channel = (diff_channel + c->status[1].predictor) / 2;
01125 *samples++ = c->status[0].predictor + c->status[1].predictor;
01126 *samples++ = c->status[0].predictor - c->status[1].predictor;
01127 }
01128 break;
01129 case CODEC_ID_ADPCM_IMA_ISS:
01130 c->status[0].predictor = (int16_t)AV_RL16(src + 0);
01131 c->status[0].step_index = src[2];
01132 src += 4;
01133 if(st) {
01134 c->status[1].predictor = (int16_t)AV_RL16(src + 0);
01135 c->status[1].step_index = src[2];
01136 src += 4;
01137 }
01138
01139 while (src < buf + buf_size) {
01140
01141 if (st) {
01142 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01143 src[0] >> 4 , 3);
01144 *samples++ = adpcm_ima_expand_nibble(&c->status[1],
01145 src[0] & 0x0F, 3);
01146 } else {
01147 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01148 src[0] & 0x0F, 3);
01149 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01150 src[0] >> 4 , 3);
01151 }
01152
01153 src++;
01154 }
01155 break;
01156 case CODEC_ID_ADPCM_IMA_WS:
01157
01158 while (src < buf + buf_size) {
01159
01160 if (st) {
01161 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01162 src[0] >> 4 , 3);
01163 *samples++ = adpcm_ima_expand_nibble(&c->status[1],
01164 src[0] & 0x0F, 3);
01165 } else {
01166 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01167 src[0] >> 4 , 3);
01168 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01169 src[0] & 0x0F, 3);
01170 }
01171
01172 src++;
01173 }
01174 break;
01175 case CODEC_ID_ADPCM_XA:
01176 while (buf_size >= 128) {
01177 xa_decode(samples, src, &c->status[0], &c->status[1],
01178 avctx->channels);
01179 src += 128;
01180 samples += 28 * 8;
01181 buf_size -= 128;
01182 }
01183 break;
01184 case CODEC_ID_ADPCM_IMA_EA_EACS:
01185 samples_in_chunk = bytestream_get_le32(&src) >> (1-st);
01186
01187 if (samples_in_chunk > buf_size-4-(8<<st)) {
01188 src += buf_size - 4;
01189 break;
01190 }
01191
01192 for (i=0; i<=st; i++)
01193 c->status[i].step_index = bytestream_get_le32(&src);
01194 for (i=0; i<=st; i++)
01195 c->status[i].predictor = bytestream_get_le32(&src);
01196
01197 for (; samples_in_chunk; samples_in_chunk--, src++) {
01198 *samples++ = adpcm_ima_expand_nibble(&c->status[0], *src>>4, 3);
01199 *samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3);
01200 }
01201 break;
01202 case CODEC_ID_ADPCM_IMA_EA_SEAD:
01203 for (; src < buf+buf_size; src++) {
01204 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6);
01205 *samples++ = adpcm_ima_expand_nibble(&c->status[st],src[0]&0x0F, 6);
01206 }
01207 break;
01208 case CODEC_ID_ADPCM_EA:
01209 samples_in_chunk = AV_RL32(src);
01210 if (samples_in_chunk >= ((buf_size - 12) * 2)) {
01211 src += buf_size;
01212 break;
01213 }
01214 src += 4;
01215 current_left_sample = (int16_t)bytestream_get_le16(&src);
01216 previous_left_sample = (int16_t)bytestream_get_le16(&src);
01217 current_right_sample = (int16_t)bytestream_get_le16(&src);
01218 previous_right_sample = (int16_t)bytestream_get_le16(&src);
01219
01220 for (count1 = 0; count1 < samples_in_chunk/28;count1++) {
01221 coeff1l = ea_adpcm_table[ *src >> 4 ];
01222 coeff2l = ea_adpcm_table[(*src >> 4 ) + 4];
01223 coeff1r = ea_adpcm_table[*src & 0x0F];
01224 coeff2r = ea_adpcm_table[(*src & 0x0F) + 4];
01225 src++;
01226
01227 shift_left = (*src >> 4 ) + 8;
01228 shift_right = (*src & 0x0F) + 8;
01229 src++;
01230
01231 for (count2 = 0; count2 < 28; count2++) {
01232 next_left_sample = (int32_t)((*src & 0xF0) << 24) >> shift_left;
01233 next_right_sample = (int32_t)((*src & 0x0F) << 28) >> shift_right;
01234 src++;
01235
01236 next_left_sample = (next_left_sample +
01237 (current_left_sample * coeff1l) +
01238 (previous_left_sample * coeff2l) + 0x80) >> 8;
01239 next_right_sample = (next_right_sample +
01240 (current_right_sample * coeff1r) +
01241 (previous_right_sample * coeff2r) + 0x80) >> 8;
01242
01243 previous_left_sample = current_left_sample;
01244 current_left_sample = av_clip_int16(next_left_sample);
01245 previous_right_sample = current_right_sample;
01246 current_right_sample = av_clip_int16(next_right_sample);
01247 *samples++ = (unsigned short)current_left_sample;
01248 *samples++ = (unsigned short)current_right_sample;
01249 }
01250 }
01251 break;
01252 case CODEC_ID_ADPCM_EA_MAXIS_XA:
01253 for(channel = 0; channel < avctx->channels; channel++) {
01254 for (i=0; i<2; i++)
01255 coeff[channel][i] = ea_adpcm_table[(*src >> 4) + 4*i];
01256 shift[channel] = (*src & 0x0F) + 8;
01257 src++;
01258 }
01259 for (count1 = 0; count1 < (buf_size - avctx->channels) / avctx->channels; count1++) {
01260 for(i = 4; i >= 0; i-=4) {
01261 for(channel = 0; channel < avctx->channels; channel++) {
01262 int32_t sample = (int32_t)(((*(src+channel) >> i) & 0x0F) << 0x1C) >> shift[channel];
01263 sample = (sample +
01264 c->status[channel].sample1 * coeff[channel][0] +
01265 c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
01266 c->status[channel].sample2 = c->status[channel].sample1;
01267 c->status[channel].sample1 = av_clip_int16(sample);
01268 *samples++ = c->status[channel].sample1;
01269 }
01270 }
01271 src+=avctx->channels;
01272 }
01273 break;
01274 case CODEC_ID_ADPCM_EA_R1:
01275 case CODEC_ID_ADPCM_EA_R2:
01276 case CODEC_ID_ADPCM_EA_R3: {
01277
01278
01279
01280
01281 const int big_endian = avctx->codec->id == CODEC_ID_ADPCM_EA_R3;
01282 int32_t previous_sample, current_sample, next_sample;
01283 int32_t coeff1, coeff2;
01284 uint8_t shift;
01285 unsigned int channel;
01286 uint16_t *samplesC;
01287 const uint8_t *srcC;
01288 const uint8_t *src_end = buf + buf_size;
01289
01290 samples_in_chunk = (big_endian ? bytestream_get_be32(&src)
01291 : bytestream_get_le32(&src)) / 28;
01292 if (samples_in_chunk > UINT32_MAX/(28*avctx->channels) ||
01293 28*samples_in_chunk*avctx->channels > samples_end-samples) {
01294 src += buf_size - 4;
01295 break;
01296 }
01297
01298 for (channel=0; channel<avctx->channels; channel++) {
01299 int32_t offset = (big_endian ? bytestream_get_be32(&src)
01300 : bytestream_get_le32(&src))
01301 + (avctx->channels-channel-1) * 4;
01302
01303 if ((offset < 0) || (offset >= src_end - src - 4)) break;
01304 srcC = src + offset;
01305 samplesC = samples + channel;
01306
01307 if (avctx->codec->id == CODEC_ID_ADPCM_EA_R1) {
01308 current_sample = (int16_t)bytestream_get_le16(&srcC);
01309 previous_sample = (int16_t)bytestream_get_le16(&srcC);
01310 } else {
01311 current_sample = c->status[channel].predictor;
01312 previous_sample = c->status[channel].prev_sample;
01313 }
01314
01315 for (count1=0; count1<samples_in_chunk; count1++) {
01316 if (*srcC == 0xEE) {
01317 srcC++;
01318 if (srcC > src_end - 30*2) break;
01319 current_sample = (int16_t)bytestream_get_be16(&srcC);
01320 previous_sample = (int16_t)bytestream_get_be16(&srcC);
01321
01322 for (count2=0; count2<28; count2++) {
01323 *samplesC = (int16_t)bytestream_get_be16(&srcC);
01324 samplesC += avctx->channels;
01325 }
01326 } else {
01327 coeff1 = ea_adpcm_table[ *srcC>>4 ];
01328 coeff2 = ea_adpcm_table[(*srcC>>4) + 4];
01329 shift = (*srcC++ & 0x0F) + 8;
01330
01331 if (srcC > src_end - 14) break;
01332 for (count2=0; count2<28; count2++) {
01333 if (count2 & 1)
01334 next_sample = (int32_t)((*srcC++ & 0x0F) << 28) >> shift;
01335 else
01336 next_sample = (int32_t)((*srcC & 0xF0) << 24) >> shift;
01337
01338 next_sample += (current_sample * coeff1) +
01339 (previous_sample * coeff2);
01340 next_sample = av_clip_int16(next_sample >> 8);
01341
01342 previous_sample = current_sample;
01343 current_sample = next_sample;
01344 *samplesC = current_sample;
01345 samplesC += avctx->channels;
01346 }
01347 }
01348 }
01349
01350 if (avctx->codec->id != CODEC_ID_ADPCM_EA_R1) {
01351 c->status[channel].predictor = current_sample;
01352 c->status[channel].prev_sample = previous_sample;
01353 }
01354 }
01355
01356 src = src + buf_size - (4 + 4*avctx->channels);
01357 samples += 28 * samples_in_chunk * avctx->channels;
01358 break;
01359 }
01360 case CODEC_ID_ADPCM_EA_XAS:
01361 if (samples_end-samples < 32*4*avctx->channels
01362 || buf_size < (4+15)*4*avctx->channels) {
01363 src += buf_size;
01364 break;
01365 }
01366 for (channel=0; channel<avctx->channels; channel++) {
01367 int coeff[2][4], shift[4];
01368 short *s2, *s = &samples[channel];
01369 for (n=0; n<4; n++, s+=32*avctx->channels) {
01370 for (i=0; i<2; i++)
01371 coeff[i][n] = ea_adpcm_table[(src[0]&0x0F)+4*i];
01372 shift[n] = (src[2]&0x0F) + 8;
01373 for (s2=s, i=0; i<2; i++, src+=2, s2+=avctx->channels)
01374 s2[0] = (src[0]&0xF0) + (src[1]<<8);
01375 }
01376
01377 for (m=2; m<32; m+=2) {
01378 s = &samples[m*avctx->channels + channel];
01379 for (n=0; n<4; n++, src++, s+=32*avctx->channels) {
01380 for (s2=s, i=0; i<8; i+=4, s2+=avctx->channels) {
01381 int level = (int32_t)((*src & (0xF0>>i)) << (24+i)) >> shift[n];
01382 int pred = s2[-1*avctx->channels] * coeff[0][n]
01383 + s2[-2*avctx->channels] * coeff[1][n];
01384 s2[0] = av_clip_int16((level + pred + 0x80) >> 8);
01385 }
01386 }
01387 }
01388 }
01389 samples += 32*4*avctx->channels;
01390 break;
01391 case CODEC_ID_ADPCM_IMA_AMV:
01392 case CODEC_ID_ADPCM_IMA_SMJPEG:
01393 c->status[0].predictor = (int16_t)bytestream_get_le16(&src);
01394 c->status[0].step_index = bytestream_get_le16(&src);
01395
01396 if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV)
01397 src+=4;
01398
01399 while (src < buf + buf_size) {
01400 char hi, lo;
01401 lo = *src & 0x0F;
01402 hi = *src >> 4;
01403
01404 if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV)
01405 FFSWAP(char, hi, lo);
01406
01407 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01408 lo, 3);
01409 *samples++ = adpcm_ima_expand_nibble(&c->status[0],
01410 hi, 3);
01411 src++;
01412 }
01413 break;
01414 case CODEC_ID_ADPCM_CT:
01415 while (src < buf + buf_size) {
01416 if (st) {
01417 *samples++ = adpcm_ct_expand_nibble(&c->status[0],
01418 src[0] >> 4);
01419 *samples++ = adpcm_ct_expand_nibble(&c->status[1],
01420 src[0] & 0x0F);
01421 } else {
01422 *samples++ = adpcm_ct_expand_nibble(&c->status[0],
01423 src[0] >> 4);
01424 *samples++ = adpcm_ct_expand_nibble(&c->status[0],
01425 src[0] & 0x0F);
01426 }
01427 src++;
01428 }
01429 break;
01430 case CODEC_ID_ADPCM_SBPRO_4:
01431 case CODEC_ID_ADPCM_SBPRO_3:
01432 case CODEC_ID_ADPCM_SBPRO_2:
01433 if (!c->status[0].step_index) {
01434
01435 *samples++ = 128 * (*src++ - 0x80);
01436 if (st)
01437 *samples++ = 128 * (*src++ - 0x80);
01438 c->status[0].step_index = 1;
01439 }
01440 if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) {
01441 while (src < buf + buf_size) {
01442 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01443 src[0] >> 4, 4, 0);
01444 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
01445 src[0] & 0x0F, 4, 0);
01446 src++;
01447 }
01448 } else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) {
01449 while (src < buf + buf_size && samples + 2 < samples_end) {
01450 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01451 src[0] >> 5 , 3, 0);
01452 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01453 (src[0] >> 2) & 0x07, 3, 0);
01454 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01455 src[0] & 0x03, 2, 0);
01456 src++;
01457 }
01458 } else {
01459 while (src < buf + buf_size && samples + 3 < samples_end) {
01460 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01461 src[0] >> 6 , 2, 2);
01462 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
01463 (src[0] >> 4) & 0x03, 2, 2);
01464 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
01465 (src[0] >> 2) & 0x03, 2, 2);
01466 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
01467 src[0] & 0x03, 2, 2);
01468 src++;
01469 }
01470 }
01471 break;
01472 case CODEC_ID_ADPCM_SWF:
01473 {
01474 GetBitContext gb;
01475 const int *table;
01476 int k0, signmask, nb_bits, count;
01477 int size = buf_size*8;
01478
01479 init_get_bits(&gb, buf, size);
01480
01481
01482 nb_bits = get_bits(&gb, 2)+2;
01483
01484 table = swf_index_tables[nb_bits-2];
01485 k0 = 1 << (nb_bits-2);
01486 signmask = 1 << (nb_bits-1);
01487
01488 while (get_bits_count(&gb) <= size - 22*avctx->channels) {
01489 for (i = 0; i < avctx->channels; i++) {
01490 *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
01491 c->status[i].step_index = get_bits(&gb, 6);
01492 }
01493
01494 for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
01495 int i;
01496
01497 for (i = 0; i < avctx->channels; i++) {
01498
01499 int delta = get_bits(&gb, nb_bits);
01500 int step = step_table[c->status[i].step_index];
01501 long vpdiff = 0;
01502 int k = k0;
01503
01504 do {
01505 if (delta & k)
01506 vpdiff += step;
01507 step >>= 1;
01508 k >>= 1;
01509 } while(k);
01510 vpdiff += step;
01511
01512 if (delta & signmask)
01513 c->status[i].predictor -= vpdiff;
01514 else
01515 c->status[i].predictor += vpdiff;
01516
01517 c->status[i].step_index += table[delta & (~signmask)];
01518
01519 c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
01520 c->status[i].predictor = av_clip_int16(c->status[i].predictor);
01521
01522 *samples++ = c->status[i].predictor;
01523 if (samples >= samples_end) {
01524 av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n");
01525 return -1;
01526 }
01527 }
01528 }
01529 }
01530 src += buf_size;
01531 break;
01532 }
01533 case CODEC_ID_ADPCM_YAMAHA:
01534 while (src < buf + buf_size) {
01535 if (st) {
01536 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
01537 src[0] & 0x0F);
01538 *samples++ = adpcm_yamaha_expand_nibble(&c->status[1],
01539 src[0] >> 4 );
01540 } else {
01541 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
01542 src[0] & 0x0F);
01543 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
01544 src[0] >> 4 );
01545 }
01546 src++;
01547 }
01548 break;
01549 case CODEC_ID_ADPCM_THP:
01550 {
01551 int table[2][16];
01552 unsigned int samplecnt;
01553 int prev[2][2];
01554 int ch;
01555
01556 if (buf_size < 80) {
01557 av_log(avctx, AV_LOG_ERROR, "frame too small\n");
01558 return -1;
01559 }
01560
01561 src+=4;
01562 samplecnt = bytestream_get_be32(&src);
01563
01564 for (i = 0; i < 32; i++)
01565 table[0][i] = (int16_t)bytestream_get_be16(&src);
01566
01567
01568 for (i = 0; i < 4; i++)
01569 prev[0][i] = (int16_t)bytestream_get_be16(&src);
01570
01571 if (samplecnt >= (samples_end - samples) / (st + 1)) {
01572 av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n");
01573 return -1;
01574 }
01575
01576 for (ch = 0; ch <= st; ch++) {
01577 samples = (unsigned short *) data + ch;
01578
01579
01580 for (i = 0; i < samplecnt / 14; i++) {
01581 int index = (*src >> 4) & 7;
01582 unsigned int exp = 28 - (*src++ & 15);
01583 int factor1 = table[ch][index * 2];
01584 int factor2 = table[ch][index * 2 + 1];
01585
01586
01587 for (n = 0; n < 14; n++) {
01588 int32_t sampledat;
01589 if(n&1) sampledat= *src++ <<28;
01590 else sampledat= (*src&0xF0)<<24;
01591
01592 sampledat = ((prev[ch][0]*factor1
01593 + prev[ch][1]*factor2) >> 11) + (sampledat>>exp);
01594 *samples = av_clip_int16(sampledat);
01595 prev[ch][1] = prev[ch][0];
01596 prev[ch][0] = *samples++;
01597
01598
01599
01600 samples += st;
01601 }
01602 }
01603 }
01604
01605
01606
01607 samples -= st;
01608 break;
01609 }
01610
01611 default:
01612 return -1;
01613 }
01614 *data_size = (uint8_t *)samples - (uint8_t *)data;
01615 return src - buf;
01616 }
01617
01618
01619
01620 #if CONFIG_ENCODERS
01621 #define ADPCM_ENCODER(id,name,long_name_) \
01622 AVCodec name ## _encoder = { \
01623 #name, \
01624 CODEC_TYPE_AUDIO, \
01625 id, \
01626 sizeof(ADPCMContext), \
01627 adpcm_encode_init, \
01628 adpcm_encode_frame, \
01629 adpcm_encode_close, \
01630 NULL, \
01631 .sample_fmts = (enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE}, \
01632 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
01633 };
01634 #else
01635 #define ADPCM_ENCODER(id,name,long_name_)
01636 #endif
01637
01638 #if CONFIG_DECODERS
01639 #define ADPCM_DECODER(id,name,long_name_) \
01640 AVCodec name ## _decoder = { \
01641 #name, \
01642 CODEC_TYPE_AUDIO, \
01643 id, \
01644 sizeof(ADPCMContext), \
01645 adpcm_decode_init, \
01646 NULL, \
01647 NULL, \
01648 adpcm_decode_frame, \
01649 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
01650 };
01651 #else
01652 #define ADPCM_DECODER(id,name,long_name_)
01653 #endif
01654
01655 #define ADPCM_CODEC(id,name,long_name_) \
01656 ADPCM_ENCODER(id,name,long_name_) ADPCM_DECODER(id,name,long_name_)
01657
01658
01659 ADPCM_DECODER(CODEC_ID_ADPCM_4XM, adpcm_4xm, "ADPCM 4X Movie");
01660 ADPCM_DECODER(CODEC_ID_ADPCM_CT, adpcm_ct, "ADPCM Creative Technology");
01661 ADPCM_DECODER(CODEC_ID_ADPCM_EA, adpcm_ea, "ADPCM Electronic Arts");
01662 ADPCM_DECODER(CODEC_ID_ADPCM_EA_MAXIS_XA, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
01663 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R1, adpcm_ea_r1, "ADPCM Electronic Arts R1");
01664 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R2, adpcm_ea_r2, "ADPCM Electronic Arts R2");
01665 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R3, adpcm_ea_r3, "ADPCM Electronic Arts R3");
01666 ADPCM_DECODER(CODEC_ID_ADPCM_EA_XAS, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
01667 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_AMV, adpcm_ima_amv, "ADPCM IMA AMV");
01668 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
01669 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
01670 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
01671 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
01672 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_ISS, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
01673 ADPCM_CODEC (CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime");
01674 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
01675 ADPCM_CODEC (CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV");
01676 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws, "ADPCM IMA Westwood");
01677 ADPCM_CODEC (CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft");
01678 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
01679 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
01680 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
01681 ADPCM_CODEC (CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash");
01682 ADPCM_DECODER(CODEC_ID_ADPCM_THP, adpcm_thp, "ADPCM Nintendo Gamecube THP");
01683 ADPCM_DECODER(CODEC_ID_ADPCM_XA, adpcm_xa, "ADPCM CDROM XA");
01684 ADPCM_CODEC (CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");