00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027 #define RC_VARIANCE 1 // use variance or ssd for fast rc
00028
00029 #include "libavutil/opt.h"
00030 #include "avcodec.h"
00031 #include "dsputil.h"
00032 #include "mpegvideo.h"
00033 #include "mpegvideo_common.h"
00034 #include "dnxhdenc.h"
00035
00036 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
00037 #define DNX10BIT_QMAT_SHIFT 18 // The largest value that will not lead to overflow for 10bit samples.
00038
00039 static const AVOption options[]={
00040 {"nitris_compat", "encode with Avid Nitris compatibility", offsetof(DNXHDEncContext, nitris_compat), AV_OPT_TYPE_INT, {.dbl = 0}, 0, 1, VE},
00041 {NULL}
00042 };
00043 static const AVClass class = { "dnxhd", av_default_item_name, options, LIBAVUTIL_VERSION_INT };
00044
00045 #define LAMBDA_FRAC_BITS 10
00046
00047 static void dnxhd_8bit_get_pixels_8x4_sym(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
00048 {
00049 int i;
00050 for (i = 0; i < 4; i++) {
00051 block[0] = pixels[0]; block[1] = pixels[1];
00052 block[2] = pixels[2]; block[3] = pixels[3];
00053 block[4] = pixels[4]; block[5] = pixels[5];
00054 block[6] = pixels[6]; block[7] = pixels[7];
00055 pixels += line_size;
00056 block += 8;
00057 }
00058 memcpy(block, block - 8, sizeof(*block) * 8);
00059 memcpy(block + 8, block - 16, sizeof(*block) * 8);
00060 memcpy(block + 16, block - 24, sizeof(*block) * 8);
00061 memcpy(block + 24, block - 32, sizeof(*block) * 8);
00062 }
00063
00064 static av_always_inline void dnxhd_10bit_get_pixels_8x4_sym(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
00065 {
00066 int i;
00067
00068 block += 32;
00069
00070 for (i = 0; i < 4; i++) {
00071 memcpy(block + i * 8, pixels + i * line_size, 8 * sizeof(*block));
00072 memcpy(block - (i+1) * 8, pixels + i * line_size, 8 * sizeof(*block));
00073 }
00074 }
00075
00076 static int dnxhd_10bit_dct_quantize(MpegEncContext *ctx, DCTELEM *block,
00077 int n, int qscale, int *overflow)
00078 {
00079 const uint8_t *scantable= ctx->intra_scantable.scantable;
00080 const int *qmat = ctx->q_intra_matrix[qscale];
00081 int last_non_zero = 0;
00082 int i;
00083
00084 ctx->dsp.fdct(block);
00085
00086
00087 block[0] = (block[0] + 2) >> 2;
00088
00089 for (i = 1; i < 64; ++i) {
00090 int j = scantable[i];
00091 int sign = block[j] >> 31;
00092 int level = (block[j] ^ sign) - sign;
00093 level = level * qmat[j] >> DNX10BIT_QMAT_SHIFT;
00094 block[j] = (level ^ sign) - sign;
00095 if (level)
00096 last_non_zero = i;
00097 }
00098
00099 return last_non_zero;
00100 }
00101
00102 static int dnxhd_init_vlc(DNXHDEncContext *ctx)
00103 {
00104 int i, j, level, run;
00105 int max_level = 1<<(ctx->cid_table->bit_depth+2);
00106
00107 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->vlc_codes, max_level*4*sizeof(*ctx->vlc_codes), fail);
00108 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->vlc_bits, max_level*4*sizeof(*ctx->vlc_bits) , fail);
00109 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->run_codes, 63*2, fail);
00110 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->run_bits, 63, fail);
00111
00112 ctx->vlc_codes += max_level*2;
00113 ctx->vlc_bits += max_level*2;
00114 for (level = -max_level; level < max_level; level++) {
00115 for (run = 0; run < 2; run++) {
00116 int index = (level<<1)|run;
00117 int sign, offset = 0, alevel = level;
00118
00119 MASK_ABS(sign, alevel);
00120 if (alevel > 64) {
00121 offset = (alevel-1)>>6;
00122 alevel -= offset<<6;
00123 }
00124 for (j = 0; j < 257; j++) {
00125 if (ctx->cid_table->ac_level[j] == alevel &&
00126 (!offset || (ctx->cid_table->ac_index_flag[j] && offset)) &&
00127 (!run || (ctx->cid_table->ac_run_flag [j] && run))) {
00128 assert(!ctx->vlc_codes[index]);
00129 if (alevel) {
00130 ctx->vlc_codes[index] = (ctx->cid_table->ac_codes[j]<<1)|(sign&1);
00131 ctx->vlc_bits [index] = ctx->cid_table->ac_bits[j]+1;
00132 } else {
00133 ctx->vlc_codes[index] = ctx->cid_table->ac_codes[j];
00134 ctx->vlc_bits [index] = ctx->cid_table->ac_bits [j];
00135 }
00136 break;
00137 }
00138 }
00139 assert(!alevel || j < 257);
00140 if (offset) {
00141 ctx->vlc_codes[index] = (ctx->vlc_codes[index]<<ctx->cid_table->index_bits)|offset;
00142 ctx->vlc_bits [index]+= ctx->cid_table->index_bits;
00143 }
00144 }
00145 }
00146 for (i = 0; i < 62; i++) {
00147 int run = ctx->cid_table->run[i];
00148 assert(run < 63);
00149 ctx->run_codes[run] = ctx->cid_table->run_codes[i];
00150 ctx->run_bits [run] = ctx->cid_table->run_bits[i];
00151 }
00152 return 0;
00153 fail:
00154 return -1;
00155 }
00156
00157 static int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
00158 {
00159
00160 uint16_t weight_matrix[64] = {1,};
00161 int qscale, i;
00162 const uint8_t *luma_weight_table = ctx->cid_table->luma_weight;
00163 const uint8_t *chroma_weight_table = ctx->cid_table->chroma_weight;
00164
00165 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_l, (ctx->m.avctx->qmax+1) * 64 * sizeof(int), fail);
00166 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_c, (ctx->m.avctx->qmax+1) * 64 * sizeof(int), fail);
00167 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_l16, (ctx->m.avctx->qmax+1) * 64 * 2 * sizeof(uint16_t), fail);
00168 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_c16, (ctx->m.avctx->qmax+1) * 64 * 2 * sizeof(uint16_t), fail);
00169
00170 if (ctx->cid_table->bit_depth == 8) {
00171 for (i = 1; i < 64; i++) {
00172 int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]];
00173 weight_matrix[j] = ctx->cid_table->luma_weight[i];
00174 }
00175 ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_l, ctx->qmatrix_l16, weight_matrix,
00176 ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1);
00177 for (i = 1; i < 64; i++) {
00178 int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]];
00179 weight_matrix[j] = ctx->cid_table->chroma_weight[i];
00180 }
00181 ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_c, ctx->qmatrix_c16, weight_matrix,
00182 ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1);
00183
00184 for (qscale = 1; qscale <= ctx->m.avctx->qmax; qscale++) {
00185 for (i = 0; i < 64; i++) {
00186 ctx->qmatrix_l [qscale] [i] <<= 2; ctx->qmatrix_c [qscale] [i] <<= 2;
00187 ctx->qmatrix_l16[qscale][0][i] <<= 2; ctx->qmatrix_l16[qscale][1][i] <<= 2;
00188 ctx->qmatrix_c16[qscale][0][i] <<= 2; ctx->qmatrix_c16[qscale][1][i] <<= 2;
00189 }
00190 }
00191 } else {
00192
00193 for (qscale = 1; qscale <= ctx->m.avctx->qmax; qscale++) {
00194 for (i = 1; i < 64; i++) {
00195 int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]];
00196
00197
00198
00199
00200
00201
00202
00203
00204
00205 ctx->qmatrix_l[qscale][j] = (1 << (DNX10BIT_QMAT_SHIFT + 1)) / (qscale * luma_weight_table[i]);
00206 ctx->qmatrix_c[qscale][j] = (1 << (DNX10BIT_QMAT_SHIFT + 1)) / (qscale * chroma_weight_table[i]);
00207 }
00208 }
00209 }
00210
00211 return 0;
00212 fail:
00213 return -1;
00214 }
00215
00216 static int dnxhd_init_rc(DNXHDEncContext *ctx)
00217 {
00218 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_rc, 8160*ctx->m.avctx->qmax*sizeof(RCEntry), fail);
00219 if (ctx->m.avctx->mb_decision != FF_MB_DECISION_RD)
00220 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_cmp, ctx->m.mb_num*sizeof(RCCMPEntry), fail);
00221
00222 ctx->frame_bits = (ctx->cid_table->coding_unit_size - 640 - 4 - ctx->min_padding) * 8;
00223 ctx->qscale = 1;
00224 ctx->lambda = 2<<LAMBDA_FRAC_BITS;
00225 return 0;
00226 fail:
00227 return -1;
00228 }
00229
00230 static int dnxhd_encode_init(AVCodecContext *avctx)
00231 {
00232 DNXHDEncContext *ctx = avctx->priv_data;
00233 int i, index, bit_depth;
00234
00235 switch (avctx->pix_fmt) {
00236 case PIX_FMT_YUV422P:
00237 bit_depth = 8;
00238 break;
00239 case PIX_FMT_YUV422P10:
00240 bit_depth = 10;
00241 break;
00242 default:
00243 av_log(avctx, AV_LOG_ERROR, "pixel format is incompatible with DNxHD\n");
00244 return -1;
00245 }
00246
00247 ctx->cid = ff_dnxhd_find_cid(avctx, bit_depth);
00248 if (!ctx->cid) {
00249 av_log(avctx, AV_LOG_ERROR, "video parameters incompatible with DNxHD\n");
00250 return -1;
00251 }
00252 av_log(avctx, AV_LOG_DEBUG, "cid %d\n", ctx->cid);
00253
00254 index = ff_dnxhd_get_cid_table(ctx->cid);
00255 ctx->cid_table = &ff_dnxhd_cid_table[index];
00256
00257 ctx->m.avctx = avctx;
00258 ctx->m.mb_intra = 1;
00259 ctx->m.h263_aic = 1;
00260
00261 avctx->bits_per_raw_sample = ctx->cid_table->bit_depth;
00262
00263 dsputil_init(&ctx->m.dsp, avctx);
00264 ff_dct_common_init(&ctx->m);
00265 if (!ctx->m.dct_quantize)
00266 ctx->m.dct_quantize = dct_quantize_c;
00267
00268 if (ctx->cid_table->bit_depth == 10) {
00269 ctx->m.dct_quantize = dnxhd_10bit_dct_quantize;
00270 ctx->get_pixels_8x4_sym = dnxhd_10bit_get_pixels_8x4_sym;
00271 ctx->block_width_l2 = 4;
00272 } else {
00273 ctx->get_pixels_8x4_sym = dnxhd_8bit_get_pixels_8x4_sym;
00274 ctx->block_width_l2 = 3;
00275 }
00276
00277 #if HAVE_MMX
00278 ff_dnxhd_init_mmx(ctx);
00279 #endif
00280
00281 ctx->m.mb_height = (avctx->height + 15) / 16;
00282 ctx->m.mb_width = (avctx->width + 15) / 16;
00283
00284 if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
00285 ctx->interlaced = 1;
00286 ctx->m.mb_height /= 2;
00287 }
00288
00289 ctx->m.mb_num = ctx->m.mb_height * ctx->m.mb_width;
00290
00291 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
00292 ctx->m.intra_quant_bias = avctx->intra_quant_bias;
00293 if (dnxhd_init_qmat(ctx, ctx->m.intra_quant_bias, 0) < 0)
00294 return -1;
00295
00296
00297 if (ctx->nitris_compat)
00298 ctx->min_padding = 1600;
00299
00300 if (dnxhd_init_vlc(ctx) < 0)
00301 return -1;
00302 if (dnxhd_init_rc(ctx) < 0)
00303 return -1;
00304
00305 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->slice_size, ctx->m.mb_height*sizeof(uint32_t), fail);
00306 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->slice_offs, ctx->m.mb_height*sizeof(uint32_t), fail);
00307 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_bits, ctx->m.mb_num *sizeof(uint16_t), fail);
00308 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t), fail);
00309
00310 ctx->frame.key_frame = 1;
00311 ctx->frame.pict_type = AV_PICTURE_TYPE_I;
00312 ctx->m.avctx->coded_frame = &ctx->frame;
00313
00314 if (avctx->thread_count > MAX_THREADS) {
00315 av_log(avctx, AV_LOG_ERROR, "too many threads\n");
00316 return -1;
00317 }
00318
00319 ctx->thread[0] = ctx;
00320 for (i = 1; i < avctx->thread_count; i++) {
00321 ctx->thread[i] = av_malloc(sizeof(DNXHDEncContext));
00322 memcpy(ctx->thread[i], ctx, sizeof(DNXHDEncContext));
00323 }
00324
00325 return 0;
00326 fail:
00327 return -1;
00328 }
00329
00330 static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf)
00331 {
00332 DNXHDEncContext *ctx = avctx->priv_data;
00333 const uint8_t header_prefix[5] = { 0x00,0x00,0x02,0x80,0x01 };
00334
00335 memset(buf, 0, 640);
00336
00337 memcpy(buf, header_prefix, 5);
00338 buf[5] = ctx->interlaced ? ctx->cur_field+2 : 0x01;
00339 buf[6] = 0x80;
00340 buf[7] = 0xa0;
00341 AV_WB16(buf + 0x18, avctx->height>>ctx->interlaced);
00342 AV_WB16(buf + 0x1a, avctx->width);
00343 AV_WB16(buf + 0x1d, avctx->height>>ctx->interlaced);
00344
00345 buf[0x21] = ctx->cid_table->bit_depth == 10 ? 0x58 : 0x38;
00346 buf[0x22] = 0x88 + (ctx->interlaced<<2);
00347 AV_WB32(buf + 0x28, ctx->cid);
00348 buf[0x2c] = ctx->interlaced ? 0 : 0x80;
00349
00350 buf[0x5f] = 0x01;
00351
00352 buf[0x167] = 0x02;
00353 AV_WB16(buf + 0x16a, ctx->m.mb_height * 4 + 4);
00354 buf[0x16d] = ctx->m.mb_height;
00355 buf[0x16f] = 0x10;
00356
00357 ctx->msip = buf + 0x170;
00358 return 0;
00359 }
00360
00361 static av_always_inline void dnxhd_encode_dc(DNXHDEncContext *ctx, int diff)
00362 {
00363 int nbits;
00364 if (diff < 0) {
00365 nbits = av_log2_16bit(-2*diff);
00366 diff--;
00367 } else {
00368 nbits = av_log2_16bit(2*diff);
00369 }
00370 put_bits(&ctx->m.pb, ctx->cid_table->dc_bits[nbits] + nbits,
00371 (ctx->cid_table->dc_codes[nbits]<<nbits) + (diff & ((1 << nbits) - 1)));
00372 }
00373
00374 static av_always_inline void dnxhd_encode_block(DNXHDEncContext *ctx, DCTELEM *block, int last_index, int n)
00375 {
00376 int last_non_zero = 0;
00377 int slevel, i, j;
00378
00379 dnxhd_encode_dc(ctx, block[0] - ctx->m.last_dc[n]);
00380 ctx->m.last_dc[n] = block[0];
00381
00382 for (i = 1; i <= last_index; i++) {
00383 j = ctx->m.intra_scantable.permutated[i];
00384 slevel = block[j];
00385 if (slevel) {
00386 int run_level = i - last_non_zero - 1;
00387 int rlevel = (slevel<<1)|!!run_level;
00388 put_bits(&ctx->m.pb, ctx->vlc_bits[rlevel], ctx->vlc_codes[rlevel]);
00389 if (run_level)
00390 put_bits(&ctx->m.pb, ctx->run_bits[run_level], ctx->run_codes[run_level]);
00391 last_non_zero = i;
00392 }
00393 }
00394 put_bits(&ctx->m.pb, ctx->vlc_bits[0], ctx->vlc_codes[0]);
00395 }
00396
00397 static av_always_inline void dnxhd_unquantize_c(DNXHDEncContext *ctx, DCTELEM *block, int n, int qscale, int last_index)
00398 {
00399 const uint8_t *weight_matrix;
00400 int level;
00401 int i;
00402
00403 weight_matrix = (n&2) ? ctx->cid_table->chroma_weight : ctx->cid_table->luma_weight;
00404
00405 for (i = 1; i <= last_index; i++) {
00406 int j = ctx->m.intra_scantable.permutated[i];
00407 level = block[j];
00408 if (level) {
00409 if (level < 0) {
00410 level = (1-2*level) * qscale * weight_matrix[i];
00411 if (ctx->cid_table->bit_depth == 10) {
00412 if (weight_matrix[i] != 8)
00413 level += 8;
00414 level >>= 4;
00415 } else {
00416 if (weight_matrix[i] != 32)
00417 level += 32;
00418 level >>= 6;
00419 }
00420 level = -level;
00421 } else {
00422 level = (2*level+1) * qscale * weight_matrix[i];
00423 if (ctx->cid_table->bit_depth == 10) {
00424 if (weight_matrix[i] != 8)
00425 level += 8;
00426 level >>= 4;
00427 } else {
00428 if (weight_matrix[i] != 32)
00429 level += 32;
00430 level >>= 6;
00431 }
00432 }
00433 block[j] = level;
00434 }
00435 }
00436 }
00437
00438 static av_always_inline int dnxhd_ssd_block(DCTELEM *qblock, DCTELEM *block)
00439 {
00440 int score = 0;
00441 int i;
00442 for (i = 0; i < 64; i++)
00443 score += (block[i] - qblock[i]) * (block[i] - qblock[i]);
00444 return score;
00445 }
00446
00447 static av_always_inline int dnxhd_calc_ac_bits(DNXHDEncContext *ctx, DCTELEM *block, int last_index)
00448 {
00449 int last_non_zero = 0;
00450 int bits = 0;
00451 int i, j, level;
00452 for (i = 1; i <= last_index; i++) {
00453 j = ctx->m.intra_scantable.permutated[i];
00454 level = block[j];
00455 if (level) {
00456 int run_level = i - last_non_zero - 1;
00457 bits += ctx->vlc_bits[(level<<1)|!!run_level]+ctx->run_bits[run_level];
00458 last_non_zero = i;
00459 }
00460 }
00461 return bits;
00462 }
00463
00464 static av_always_inline void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
00465 {
00466 const int bs = ctx->block_width_l2;
00467 const int bw = 1 << bs;
00468 const uint8_t *ptr_y = ctx->thread[0]->src[0] + ((mb_y << 4) * ctx->m.linesize) + (mb_x << bs+1);
00469 const uint8_t *ptr_u = ctx->thread[0]->src[1] + ((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << bs);
00470 const uint8_t *ptr_v = ctx->thread[0]->src[2] + ((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << bs);
00471 DSPContext *dsp = &ctx->m.dsp;
00472
00473 dsp->get_pixels(ctx->blocks[0], ptr_y, ctx->m.linesize);
00474 dsp->get_pixels(ctx->blocks[1], ptr_y + bw, ctx->m.linesize);
00475 dsp->get_pixels(ctx->blocks[2], ptr_u, ctx->m.uvlinesize);
00476 dsp->get_pixels(ctx->blocks[3], ptr_v, ctx->m.uvlinesize);
00477
00478 if (mb_y+1 == ctx->m.mb_height && ctx->m.avctx->height == 1080) {
00479 if (ctx->interlaced) {
00480 ctx->get_pixels_8x4_sym(ctx->blocks[4], ptr_y + ctx->dct_y_offset, ctx->m.linesize);
00481 ctx->get_pixels_8x4_sym(ctx->blocks[5], ptr_y + ctx->dct_y_offset + bw, ctx->m.linesize);
00482 ctx->get_pixels_8x4_sym(ctx->blocks[6], ptr_u + ctx->dct_uv_offset, ctx->m.uvlinesize);
00483 ctx->get_pixels_8x4_sym(ctx->blocks[7], ptr_v + ctx->dct_uv_offset, ctx->m.uvlinesize);
00484 } else {
00485 dsp->clear_block(ctx->blocks[4]);
00486 dsp->clear_block(ctx->blocks[5]);
00487 dsp->clear_block(ctx->blocks[6]);
00488 dsp->clear_block(ctx->blocks[7]);
00489 }
00490 } else {
00491 dsp->get_pixels(ctx->blocks[4], ptr_y + ctx->dct_y_offset, ctx->m.linesize);
00492 dsp->get_pixels(ctx->blocks[5], ptr_y + ctx->dct_y_offset + bw, ctx->m.linesize);
00493 dsp->get_pixels(ctx->blocks[6], ptr_u + ctx->dct_uv_offset, ctx->m.uvlinesize);
00494 dsp->get_pixels(ctx->blocks[7], ptr_v + ctx->dct_uv_offset, ctx->m.uvlinesize);
00495 }
00496 }
00497
00498 static av_always_inline int dnxhd_switch_matrix(DNXHDEncContext *ctx, int i)
00499 {
00500 if (i&2) {
00501 ctx->m.q_intra_matrix16 = ctx->qmatrix_c16;
00502 ctx->m.q_intra_matrix = ctx->qmatrix_c;
00503 return 1 + (i&1);
00504 } else {
00505 ctx->m.q_intra_matrix16 = ctx->qmatrix_l16;
00506 ctx->m.q_intra_matrix = ctx->qmatrix_l;
00507 return 0;
00508 }
00509 }
00510
00511 static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
00512 {
00513 DNXHDEncContext *ctx = avctx->priv_data;
00514 int mb_y = jobnr, mb_x;
00515 int qscale = ctx->qscale;
00516 LOCAL_ALIGNED_16(DCTELEM, block, [64]);
00517 ctx = ctx->thread[threadnr];
00518
00519 ctx->m.last_dc[0] =
00520 ctx->m.last_dc[1] =
00521 ctx->m.last_dc[2] = 1 << (ctx->cid_table->bit_depth + 2);
00522
00523 for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
00524 unsigned mb = mb_y * ctx->m.mb_width + mb_x;
00525 int ssd = 0;
00526 int ac_bits = 0;
00527 int dc_bits = 0;
00528 int i;
00529
00530 dnxhd_get_blocks(ctx, mb_x, mb_y);
00531
00532 for (i = 0; i < 8; i++) {
00533 DCTELEM *src_block = ctx->blocks[i];
00534 int overflow, nbits, diff, last_index;
00535 int n = dnxhd_switch_matrix(ctx, i);
00536
00537 memcpy(block, src_block, 64*sizeof(*block));
00538 last_index = ctx->m.dct_quantize(&ctx->m, block, i, qscale, &overflow);
00539 ac_bits += dnxhd_calc_ac_bits(ctx, block, last_index);
00540
00541 diff = block[0] - ctx->m.last_dc[n];
00542 if (diff < 0) nbits = av_log2_16bit(-2*diff);
00543 else nbits = av_log2_16bit( 2*diff);
00544
00545 assert(nbits < ctx->cid_table->bit_depth + 4);
00546 dc_bits += ctx->cid_table->dc_bits[nbits] + nbits;
00547
00548 ctx->m.last_dc[n] = block[0];
00549
00550 if (avctx->mb_decision == FF_MB_DECISION_RD || !RC_VARIANCE) {
00551 dnxhd_unquantize_c(ctx, block, i, qscale, last_index);
00552 ctx->m.dsp.idct(block);
00553 ssd += dnxhd_ssd_block(block, src_block);
00554 }
00555 }
00556 ctx->mb_rc[qscale][mb].ssd = ssd;
00557 ctx->mb_rc[qscale][mb].bits = ac_bits+dc_bits+12+8*ctx->vlc_bits[0];
00558 }
00559 return 0;
00560 }
00561
00562 static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
00563 {
00564 DNXHDEncContext *ctx = avctx->priv_data;
00565 int mb_y = jobnr, mb_x;
00566 ctx = ctx->thread[threadnr];
00567 init_put_bits(&ctx->m.pb, (uint8_t *)arg + 640 + ctx->slice_offs[jobnr], ctx->slice_size[jobnr]);
00568
00569 ctx->m.last_dc[0] =
00570 ctx->m.last_dc[1] =
00571 ctx->m.last_dc[2] = 1 << (ctx->cid_table->bit_depth + 2);
00572 for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
00573 unsigned mb = mb_y * ctx->m.mb_width + mb_x;
00574 int qscale = ctx->mb_qscale[mb];
00575 int i;
00576
00577 put_bits(&ctx->m.pb, 12, qscale<<1);
00578
00579 dnxhd_get_blocks(ctx, mb_x, mb_y);
00580
00581 for (i = 0; i < 8; i++) {
00582 DCTELEM *block = ctx->blocks[i];
00583 int overflow, n = dnxhd_switch_matrix(ctx, i);
00584 int last_index = ctx->m.dct_quantize(&ctx->m, block, i,
00585 qscale, &overflow);
00586
00587 dnxhd_encode_block(ctx, block, last_index, n);
00588
00589 }
00590 }
00591 if (put_bits_count(&ctx->m.pb)&31)
00592 put_bits(&ctx->m.pb, 32-(put_bits_count(&ctx->m.pb)&31), 0);
00593 flush_put_bits(&ctx->m.pb);
00594 return 0;
00595 }
00596
00597 static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx)
00598 {
00599 int mb_y, mb_x;
00600 int offset = 0;
00601 for (mb_y = 0; mb_y < ctx->m.mb_height; mb_y++) {
00602 int thread_size;
00603 ctx->slice_offs[mb_y] = offset;
00604 ctx->slice_size[mb_y] = 0;
00605 for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
00606 unsigned mb = mb_y * ctx->m.mb_width + mb_x;
00607 ctx->slice_size[mb_y] += ctx->mb_bits[mb];
00608 }
00609 ctx->slice_size[mb_y] = (ctx->slice_size[mb_y]+31)&~31;
00610 ctx->slice_size[mb_y] >>= 3;
00611 thread_size = ctx->slice_size[mb_y];
00612 offset += thread_size;
00613 }
00614 }
00615
00616 static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
00617 {
00618 DNXHDEncContext *ctx = avctx->priv_data;
00619 int mb_y = jobnr, mb_x;
00620 ctx = ctx->thread[threadnr];
00621 if (ctx->cid_table->bit_depth == 8) {
00622 uint8_t *pix = ctx->thread[0]->src[0] + ((mb_y<<4) * ctx->m.linesize);
00623 for (mb_x = 0; mb_x < ctx->m.mb_width; ++mb_x, pix += 16) {
00624 unsigned mb = mb_y * ctx->m.mb_width + mb_x;
00625 int sum = ctx->m.dsp.pix_sum(pix, ctx->m.linesize);
00626 int varc = (ctx->m.dsp.pix_norm1(pix, ctx->m.linesize) - (((unsigned)sum*sum)>>8)+128)>>8;
00627 ctx->mb_cmp[mb].value = varc;
00628 ctx->mb_cmp[mb].mb = mb;
00629 }
00630 } else {
00631 int const linesize = ctx->m.linesize >> 1;
00632 for (mb_x = 0; mb_x < ctx->m.mb_width; ++mb_x) {
00633 uint16_t *pix = (uint16_t*)ctx->thread[0]->src[0] + ((mb_y << 4) * linesize) + (mb_x << 4);
00634 unsigned mb = mb_y * ctx->m.mb_width + mb_x;
00635 int sum = 0;
00636 int sqsum = 0;
00637 int mean, sqmean;
00638 int i, j;
00639
00640 for (i = 0; i < 16; ++i) {
00641 for (j = 0; j < 16; ++j) {
00642
00643 int const sample = (unsigned)pix[j] >> 6;
00644 sum += sample;
00645 sqsum += sample * sample;
00646
00647 }
00648 pix += linesize;
00649 }
00650 mean = sum >> 8;
00651 sqmean = sqsum >> 8;
00652 ctx->mb_cmp[mb].value = sqmean - mean * mean;
00653 ctx->mb_cmp[mb].mb = mb;
00654 }
00655 }
00656 return 0;
00657 }
00658
00659 static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx)
00660 {
00661 int lambda, up_step, down_step;
00662 int last_lower = INT_MAX, last_higher = 0;
00663 int x, y, q;
00664
00665 for (q = 1; q < avctx->qmax; q++) {
00666 ctx->qscale = q;
00667 avctx->execute2(avctx, dnxhd_calc_bits_thread, NULL, NULL, ctx->m.mb_height);
00668 }
00669 up_step = down_step = 2<<LAMBDA_FRAC_BITS;
00670 lambda = ctx->lambda;
00671
00672 for (;;) {
00673 int bits = 0;
00674 int end = 0;
00675 if (lambda == last_higher) {
00676 lambda++;
00677 end = 1;
00678 }
00679 for (y = 0; y < ctx->m.mb_height; y++) {
00680 for (x = 0; x < ctx->m.mb_width; x++) {
00681 unsigned min = UINT_MAX;
00682 int qscale = 1;
00683 int mb = y*ctx->m.mb_width+x;
00684 for (q = 1; q < avctx->qmax; q++) {
00685 unsigned score = ctx->mb_rc[q][mb].bits*lambda+
00686 ((unsigned)ctx->mb_rc[q][mb].ssd<<LAMBDA_FRAC_BITS);
00687 if (score < min) {
00688 min = score;
00689 qscale = q;
00690 }
00691 }
00692 bits += ctx->mb_rc[qscale][mb].bits;
00693 ctx->mb_qscale[mb] = qscale;
00694 ctx->mb_bits[mb] = ctx->mb_rc[qscale][mb].bits;
00695 }
00696 bits = (bits+31)&~31;
00697 if (bits > ctx->frame_bits)
00698 break;
00699 }
00700
00701
00702 if (end) {
00703 if (bits > ctx->frame_bits)
00704 return -1;
00705 break;
00706 }
00707 if (bits < ctx->frame_bits) {
00708 last_lower = FFMIN(lambda, last_lower);
00709 if (last_higher != 0)
00710 lambda = (lambda+last_higher)>>1;
00711 else
00712 lambda -= down_step;
00713 down_step = FFMIN((int64_t)down_step*5, INT_MAX);
00714 up_step = 1<<LAMBDA_FRAC_BITS;
00715 lambda = FFMAX(1, lambda);
00716 if (lambda == last_lower)
00717 break;
00718 } else {
00719 last_higher = FFMAX(lambda, last_higher);
00720 if (last_lower != INT_MAX)
00721 lambda = (lambda+last_lower)>>1;
00722 else if ((int64_t)lambda + up_step > INT_MAX)
00723 return -1;
00724 else
00725 lambda += up_step;
00726 up_step = FFMIN((int64_t)up_step*5, INT_MAX);
00727 down_step = 1<<LAMBDA_FRAC_BITS;
00728 }
00729 }
00730
00731 ctx->lambda = lambda;
00732 return 0;
00733 }
00734
00735 static int dnxhd_find_qscale(DNXHDEncContext *ctx)
00736 {
00737 int bits = 0;
00738 int up_step = 1;
00739 int down_step = 1;
00740 int last_higher = 0;
00741 int last_lower = INT_MAX;
00742 int qscale;
00743 int x, y;
00744
00745 qscale = ctx->qscale;
00746 for (;;) {
00747 bits = 0;
00748 ctx->qscale = qscale;
00749
00750 ctx->m.avctx->execute2(ctx->m.avctx, dnxhd_calc_bits_thread, NULL, NULL, ctx->m.mb_height);
00751 for (y = 0; y < ctx->m.mb_height; y++) {
00752 for (x = 0; x < ctx->m.mb_width; x++)
00753 bits += ctx->mb_rc[qscale][y*ctx->m.mb_width+x].bits;
00754 bits = (bits+31)&~31;
00755 if (bits > ctx->frame_bits)
00756 break;
00757 }
00758
00759
00760 if (bits < ctx->frame_bits) {
00761 if (qscale == 1)
00762 return 1;
00763 if (last_higher == qscale - 1) {
00764 qscale = last_higher;
00765 break;
00766 }
00767 last_lower = FFMIN(qscale, last_lower);
00768 if (last_higher != 0)
00769 qscale = (qscale+last_higher)>>1;
00770 else
00771 qscale -= down_step++;
00772 if (qscale < 1)
00773 qscale = 1;
00774 up_step = 1;
00775 } else {
00776 if (last_lower == qscale + 1)
00777 break;
00778 last_higher = FFMAX(qscale, last_higher);
00779 if (last_lower != INT_MAX)
00780 qscale = (qscale+last_lower)>>1;
00781 else
00782 qscale += up_step++;
00783 down_step = 1;
00784 if (qscale >= ctx->m.avctx->qmax)
00785 return -1;
00786 }
00787 }
00788
00789 ctx->qscale = qscale;
00790 return 0;
00791 }
00792
00793 #define BUCKET_BITS 8
00794 #define RADIX_PASSES 4
00795 #define NBUCKETS (1 << BUCKET_BITS)
00796
00797 static inline int get_bucket(int value, int shift)
00798 {
00799 value >>= shift;
00800 value &= NBUCKETS - 1;
00801 return NBUCKETS - 1 - value;
00802 }
00803
00804 static void radix_count(const RCCMPEntry *data, int size, int buckets[RADIX_PASSES][NBUCKETS])
00805 {
00806 int i, j;
00807 memset(buckets, 0, sizeof(buckets[0][0]) * RADIX_PASSES * NBUCKETS);
00808 for (i = 0; i < size; i++) {
00809 int v = data[i].value;
00810 for (j = 0; j < RADIX_PASSES; j++) {
00811 buckets[j][get_bucket(v, 0)]++;
00812 v >>= BUCKET_BITS;
00813 }
00814 assert(!v);
00815 }
00816 for (j = 0; j < RADIX_PASSES; j++) {
00817 int offset = size;
00818 for (i = NBUCKETS - 1; i >= 0; i--)
00819 buckets[j][i] = offset -= buckets[j][i];
00820 assert(!buckets[j][0]);
00821 }
00822 }
00823
00824 static void radix_sort_pass(RCCMPEntry *dst, const RCCMPEntry *data, int size, int buckets[NBUCKETS], int pass)
00825 {
00826 int shift = pass * BUCKET_BITS;
00827 int i;
00828 for (i = 0; i < size; i++) {
00829 int v = get_bucket(data[i].value, shift);
00830 int pos = buckets[v]++;
00831 dst[pos] = data[i];
00832 }
00833 }
00834
00835 static void radix_sort(RCCMPEntry *data, int size)
00836 {
00837 int buckets[RADIX_PASSES][NBUCKETS];
00838 RCCMPEntry *tmp = av_malloc(sizeof(*tmp) * size);
00839 radix_count(data, size, buckets);
00840 radix_sort_pass(tmp, data, size, buckets[0], 0);
00841 radix_sort_pass(data, tmp, size, buckets[1], 1);
00842 if (buckets[2][NBUCKETS - 1] || buckets[3][NBUCKETS - 1]) {
00843 radix_sort_pass(tmp, data, size, buckets[2], 2);
00844 radix_sort_pass(data, tmp, size, buckets[3], 3);
00845 }
00846 av_free(tmp);
00847 }
00848
00849 static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx)
00850 {
00851 int max_bits = 0;
00852 int ret, x, y;
00853 if ((ret = dnxhd_find_qscale(ctx)) < 0)
00854 return -1;
00855 for (y = 0; y < ctx->m.mb_height; y++) {
00856 for (x = 0; x < ctx->m.mb_width; x++) {
00857 int mb = y*ctx->m.mb_width+x;
00858 int delta_bits;
00859 ctx->mb_qscale[mb] = ctx->qscale;
00860 ctx->mb_bits[mb] = ctx->mb_rc[ctx->qscale][mb].bits;
00861 max_bits += ctx->mb_rc[ctx->qscale][mb].bits;
00862 if (!RC_VARIANCE) {
00863 delta_bits = ctx->mb_rc[ctx->qscale][mb].bits-ctx->mb_rc[ctx->qscale+1][mb].bits;
00864 ctx->mb_cmp[mb].mb = mb;
00865 ctx->mb_cmp[mb].value = delta_bits ?
00866 ((ctx->mb_rc[ctx->qscale][mb].ssd-ctx->mb_rc[ctx->qscale+1][mb].ssd)*100)/delta_bits
00867 : INT_MIN;
00868 }
00869 }
00870 max_bits += 31;
00871 }
00872 if (!ret) {
00873 if (RC_VARIANCE)
00874 avctx->execute2(avctx, dnxhd_mb_var_thread, NULL, NULL, ctx->m.mb_height);
00875 radix_sort(ctx->mb_cmp, ctx->m.mb_num);
00876 for (x = 0; x < ctx->m.mb_num && max_bits > ctx->frame_bits; x++) {
00877 int mb = ctx->mb_cmp[x].mb;
00878 max_bits -= ctx->mb_rc[ctx->qscale][mb].bits - ctx->mb_rc[ctx->qscale+1][mb].bits;
00879 ctx->mb_qscale[mb] = ctx->qscale+1;
00880 ctx->mb_bits[mb] = ctx->mb_rc[ctx->qscale+1][mb].bits;
00881 }
00882 }
00883 return 0;
00884 }
00885
00886 static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame)
00887 {
00888 int i;
00889
00890 for (i = 0; i < 3; i++) {
00891 ctx->frame.data[i] = frame->data[i];
00892 ctx->frame.linesize[i] = frame->linesize[i];
00893 }
00894
00895 for (i = 0; i < ctx->m.avctx->thread_count; i++) {
00896 ctx->thread[i]->m.linesize = ctx->frame.linesize[0]<<ctx->interlaced;
00897 ctx->thread[i]->m.uvlinesize = ctx->frame.linesize[1]<<ctx->interlaced;
00898 ctx->thread[i]->dct_y_offset = ctx->m.linesize *8;
00899 ctx->thread[i]->dct_uv_offset = ctx->m.uvlinesize*8;
00900 }
00901
00902 ctx->frame.interlaced_frame = frame->interlaced_frame;
00903 ctx->cur_field = frame->interlaced_frame && !frame->top_field_first;
00904 }
00905
00906 static int dnxhd_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data)
00907 {
00908 DNXHDEncContext *ctx = avctx->priv_data;
00909 int first_field = 1;
00910 int offset, i, ret;
00911
00912 if (buf_size < ctx->cid_table->frame_size) {
00913 av_log(avctx, AV_LOG_ERROR, "output buffer is too small to compress picture\n");
00914 return -1;
00915 }
00916
00917 dnxhd_load_picture(ctx, data);
00918
00919 encode_coding_unit:
00920 for (i = 0; i < 3; i++) {
00921 ctx->src[i] = ctx->frame.data[i];
00922 if (ctx->interlaced && ctx->cur_field)
00923 ctx->src[i] += ctx->frame.linesize[i];
00924 }
00925
00926 dnxhd_write_header(avctx, buf);
00927
00928 if (avctx->mb_decision == FF_MB_DECISION_RD)
00929 ret = dnxhd_encode_rdo(avctx, ctx);
00930 else
00931 ret = dnxhd_encode_fast(avctx, ctx);
00932 if (ret < 0) {
00933 av_log(avctx, AV_LOG_ERROR,
00934 "picture could not fit ratecontrol constraints, increase qmax\n");
00935 return -1;
00936 }
00937
00938 dnxhd_setup_threads_slices(ctx);
00939
00940 offset = 0;
00941 for (i = 0; i < ctx->m.mb_height; i++) {
00942 AV_WB32(ctx->msip + i * 4, offset);
00943 offset += ctx->slice_size[i];
00944 assert(!(ctx->slice_size[i] & 3));
00945 }
00946
00947 avctx->execute2(avctx, dnxhd_encode_thread, buf, NULL, ctx->m.mb_height);
00948
00949 assert(640 + offset + 4 <= ctx->cid_table->coding_unit_size);
00950 memset(buf + 640 + offset, 0, ctx->cid_table->coding_unit_size - 4 - offset - 640);
00951
00952 AV_WB32(buf + ctx->cid_table->coding_unit_size - 4, 0x600DC0DE);
00953
00954 if (ctx->interlaced && first_field) {
00955 first_field = 0;
00956 ctx->cur_field ^= 1;
00957 buf += ctx->cid_table->coding_unit_size;
00958 buf_size -= ctx->cid_table->coding_unit_size;
00959 goto encode_coding_unit;
00960 }
00961
00962 ctx->frame.quality = ctx->qscale*FF_QP2LAMBDA;
00963
00964 return ctx->cid_table->frame_size;
00965 }
00966
00967 static int dnxhd_encode_end(AVCodecContext *avctx)
00968 {
00969 DNXHDEncContext *ctx = avctx->priv_data;
00970 int max_level = 1<<(ctx->cid_table->bit_depth+2);
00971 int i;
00972
00973 av_free(ctx->vlc_codes-max_level*2);
00974 av_free(ctx->vlc_bits -max_level*2);
00975 av_freep(&ctx->run_codes);
00976 av_freep(&ctx->run_bits);
00977
00978 av_freep(&ctx->mb_bits);
00979 av_freep(&ctx->mb_qscale);
00980 av_freep(&ctx->mb_rc);
00981 av_freep(&ctx->mb_cmp);
00982 av_freep(&ctx->slice_size);
00983 av_freep(&ctx->slice_offs);
00984
00985 av_freep(&ctx->qmatrix_c);
00986 av_freep(&ctx->qmatrix_l);
00987 av_freep(&ctx->qmatrix_c16);
00988 av_freep(&ctx->qmatrix_l16);
00989
00990 for (i = 1; i < avctx->thread_count; i++)
00991 av_freep(&ctx->thread[i]);
00992
00993 return 0;
00994 }
00995
00996 AVCodec ff_dnxhd_encoder = {
00997 .name = "dnxhd",
00998 .type = AVMEDIA_TYPE_VIDEO,
00999 .id = CODEC_ID_DNXHD,
01000 .priv_data_size = sizeof(DNXHDEncContext),
01001 .init = dnxhd_encode_init,
01002 .encode = dnxhd_encode_picture,
01003 .close = dnxhd_encode_end,
01004 .capabilities = CODEC_CAP_SLICE_THREADS,
01005 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_YUV422P10, PIX_FMT_NONE},
01006 .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"),
01007 .priv_class = &class,
01008 };