Libav
|
00001 /* 00002 * various utility functions for use within FFmpeg 00003 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard 00004 * 00005 * This file is part of FFmpeg. 00006 * 00007 * FFmpeg is free software; you can redistribute it and/or 00008 * modify it under the terms of the GNU Lesser General Public 00009 * License as published by the Free Software Foundation; either 00010 * version 2.1 of the License, or (at your option) any later version. 00011 * 00012 * FFmpeg is distributed in the hope that it will be useful, 00013 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00014 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00015 * Lesser General Public License for more details. 00016 * 00017 * You should have received a copy of the GNU Lesser General Public 00018 * License along with FFmpeg; if not, write to the Free Software 00019 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00020 */ 00021 #include "avformat.h" 00022 #include "internal.h" 00023 #include "libavcodec/opt.h" 00024 #include "metadata.h" 00025 #include "libavutil/avstring.h" 00026 #include "riff.h" 00027 #include "audiointerleave.h" 00028 #include <sys/time.h> 00029 #include <time.h> 00030 #include <strings.h> 00031 #include <stdarg.h> 00032 #if CONFIG_NETWORK 00033 #include "network.h" 00034 #endif 00035 00036 #undef NDEBUG 00037 #include <assert.h> 00038 00044 unsigned avformat_version(void) 00045 { 00046 return LIBAVFORMAT_VERSION_INT; 00047 } 00048 00049 const char *avformat_configuration(void) 00050 { 00051 return FFMPEG_CONFIGURATION; 00052 } 00053 00054 const char *avformat_license(void) 00055 { 00056 #define LICENSE_PREFIX "libavformat license: " 00057 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; 00058 } 00059 00060 /* fraction handling */ 00061 00072 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den) 00073 { 00074 num += (den >> 1); 00075 if (num >= den) { 00076 val += num / den; 00077 num = num % den; 00078 } 00079 f->val = val; 00080 f->num = num; 00081 f->den = den; 00082 } 00083 00090 static void av_frac_add(AVFrac *f, int64_t incr) 00091 { 00092 int64_t num, den; 00093 00094 num = f->num + incr; 00095 den = f->den; 00096 if (num < 0) { 00097 f->val += num / den; 00098 num = num % den; 00099 if (num < 0) { 00100 num += den; 00101 f->val--; 00102 } 00103 } else if (num >= den) { 00104 f->val += num / den; 00105 num = num % den; 00106 } 00107 f->num = num; 00108 } 00109 00111 AVInputFormat *first_iformat = NULL; 00113 AVOutputFormat *first_oformat = NULL; 00114 00115 AVInputFormat *av_iformat_next(AVInputFormat *f) 00116 { 00117 if(f) return f->next; 00118 else return first_iformat; 00119 } 00120 00121 AVOutputFormat *av_oformat_next(AVOutputFormat *f) 00122 { 00123 if(f) return f->next; 00124 else return first_oformat; 00125 } 00126 00127 void av_register_input_format(AVInputFormat *format) 00128 { 00129 AVInputFormat **p; 00130 p = &first_iformat; 00131 while (*p != NULL) p = &(*p)->next; 00132 *p = format; 00133 format->next = NULL; 00134 } 00135 00136 void av_register_output_format(AVOutputFormat *format) 00137 { 00138 AVOutputFormat **p; 00139 p = &first_oformat; 00140 while (*p != NULL) p = &(*p)->next; 00141 *p = format; 00142 format->next = NULL; 00143 } 00144 00145 int av_match_ext(const char *filename, const char *extensions) 00146 { 00147 const char *ext, *p; 00148 char ext1[32], *q; 00149 00150 if(!filename) 00151 return 0; 00152 00153 ext = strrchr(filename, '.'); 00154 if (ext) { 00155 ext++; 00156 p = extensions; 00157 for(;;) { 00158 q = ext1; 00159 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1) 00160 *q++ = *p++; 00161 *q = '\0'; 00162 if (!strcasecmp(ext1, ext)) 00163 return 1; 00164 if (*p == '\0') 00165 break; 00166 p++; 00167 } 00168 } 00169 return 0; 00170 } 00171 00172 static int match_format(const char *name, const char *names) 00173 { 00174 const char *p; 00175 int len, namelen; 00176 00177 if (!name || !names) 00178 return 0; 00179 00180 namelen = strlen(name); 00181 while ((p = strchr(names, ','))) { 00182 len = FFMAX(p - names, namelen); 00183 if (!strncasecmp(name, names, len)) 00184 return 1; 00185 names = p+1; 00186 } 00187 return !strcasecmp(name, names); 00188 } 00189 00190 #if LIBAVFORMAT_VERSION_MAJOR < 53 00191 AVOutputFormat *guess_format(const char *short_name, const char *filename, 00192 const char *mime_type) 00193 { 00194 return av_guess_format(short_name, filename, mime_type); 00195 } 00196 #endif 00197 00198 AVOutputFormat *av_guess_format(const char *short_name, const char *filename, 00199 const char *mime_type) 00200 { 00201 AVOutputFormat *fmt, *fmt_found; 00202 int score_max, score; 00203 00204 /* specific test for image sequences */ 00205 #if CONFIG_IMAGE2_MUXER 00206 if (!short_name && filename && 00207 av_filename_number_test(filename) && 00208 av_guess_image2_codec(filename) != CODEC_ID_NONE) { 00209 return av_guess_format("image2", NULL, NULL); 00210 } 00211 #endif 00212 /* Find the proper file type. */ 00213 fmt_found = NULL; 00214 score_max = 0; 00215 fmt = first_oformat; 00216 while (fmt != NULL) { 00217 score = 0; 00218 if (fmt->name && short_name && !strcmp(fmt->name, short_name)) 00219 score += 100; 00220 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type)) 00221 score += 10; 00222 if (filename && fmt->extensions && 00223 av_match_ext(filename, fmt->extensions)) { 00224 score += 5; 00225 } 00226 if (score > score_max) { 00227 score_max = score; 00228 fmt_found = fmt; 00229 } 00230 fmt = fmt->next; 00231 } 00232 return fmt_found; 00233 } 00234 00235 #if LIBAVFORMAT_VERSION_MAJOR < 53 00236 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename, 00237 const char *mime_type) 00238 { 00239 AVOutputFormat *fmt = av_guess_format(short_name, filename, mime_type); 00240 00241 if (fmt) { 00242 AVOutputFormat *stream_fmt; 00243 char stream_format_name[64]; 00244 00245 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name); 00246 stream_fmt = av_guess_format(stream_format_name, NULL, NULL); 00247 00248 if (stream_fmt) 00249 fmt = stream_fmt; 00250 } 00251 00252 return fmt; 00253 } 00254 #endif 00255 00256 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name, 00257 const char *filename, const char *mime_type, enum AVMediaType type){ 00258 if(type == AVMEDIA_TYPE_VIDEO){ 00259 enum CodecID codec_id= CODEC_ID_NONE; 00260 00261 #if CONFIG_IMAGE2_MUXER 00262 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){ 00263 codec_id= av_guess_image2_codec(filename); 00264 } 00265 #endif 00266 if(codec_id == CODEC_ID_NONE) 00267 codec_id= fmt->video_codec; 00268 return codec_id; 00269 }else if(type == AVMEDIA_TYPE_AUDIO) 00270 return fmt->audio_codec; 00271 else 00272 return CODEC_ID_NONE; 00273 } 00274 00275 AVInputFormat *av_find_input_format(const char *short_name) 00276 { 00277 AVInputFormat *fmt; 00278 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) { 00279 if (match_format(short_name, fmt->name)) 00280 return fmt; 00281 } 00282 return NULL; 00283 } 00284 00285 #if LIBAVFORMAT_VERSION_MAJOR < 53 && CONFIG_SHARED && HAVE_SYMVER 00286 FF_SYMVER(void, av_destruct_packet_nofree, (AVPacket *pkt), "LIBAVFORMAT_52") 00287 { 00288 av_destruct_packet_nofree(pkt); 00289 } 00290 00291 FF_SYMVER(void, av_destruct_packet, (AVPacket *pkt), "LIBAVFORMAT_52") 00292 { 00293 av_destruct_packet(pkt); 00294 } 00295 00296 FF_SYMVER(int, av_new_packet, (AVPacket *pkt, int size), "LIBAVFORMAT_52") 00297 { 00298 return av_new_packet(pkt, size); 00299 } 00300 00301 FF_SYMVER(int, av_dup_packet, (AVPacket *pkt), "LIBAVFORMAT_52") 00302 { 00303 return av_dup_packet(pkt); 00304 } 00305 00306 FF_SYMVER(void, av_free_packet, (AVPacket *pkt), "LIBAVFORMAT_52") 00307 { 00308 av_free_packet(pkt); 00309 } 00310 00311 FF_SYMVER(void, av_init_packet, (AVPacket *pkt), "LIBAVFORMAT_52") 00312 { 00313 av_log(NULL, AV_LOG_WARNING, "Diverting av_*_packet function calls to libavcodec. Recompile to improve performance\n"); 00314 av_init_packet(pkt); 00315 } 00316 #endif 00317 00318 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size) 00319 { 00320 int ret= av_new_packet(pkt, size); 00321 00322 if(ret<0) 00323 return ret; 00324 00325 pkt->pos= url_ftell(s); 00326 00327 ret= get_buffer(s, pkt->data, size); 00328 if(ret<=0) 00329 av_free_packet(pkt); 00330 else 00331 av_shrink_packet(pkt, ret); 00332 00333 return ret; 00334 } 00335 00336 00337 int av_filename_number_test(const char *filename) 00338 { 00339 char buf[1024]; 00340 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0); 00341 } 00342 00343 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max) 00344 { 00345 AVInputFormat *fmt1, *fmt; 00346 int score; 00347 00348 fmt = NULL; 00349 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) { 00350 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE)) 00351 continue; 00352 score = 0; 00353 if (fmt1->read_probe) { 00354 score = fmt1->read_probe(pd); 00355 } else if (fmt1->extensions) { 00356 if (av_match_ext(pd->filename, fmt1->extensions)) { 00357 score = 50; 00358 } 00359 } 00360 if (score > *score_max) { 00361 *score_max = score; 00362 fmt = fmt1; 00363 }else if (score == *score_max) 00364 fmt = NULL; 00365 } 00366 return fmt; 00367 } 00368 00369 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){ 00370 int score=0; 00371 return av_probe_input_format2(pd, is_opened, &score); 00372 } 00373 00374 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score) 00375 { 00376 AVInputFormat *fmt; 00377 fmt = av_probe_input_format2(pd, 1, &score); 00378 00379 if (fmt) { 00380 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n", 00381 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score); 00382 if (!strcmp(fmt->name, "mp3")) { 00383 st->codec->codec_id = CODEC_ID_MP3; 00384 st->codec->codec_type = AVMEDIA_TYPE_AUDIO; 00385 } else if (!strcmp(fmt->name, "ac3")) { 00386 st->codec->codec_id = CODEC_ID_AC3; 00387 st->codec->codec_type = AVMEDIA_TYPE_AUDIO; 00388 } else if (!strcmp(fmt->name, "eac3")) { 00389 st->codec->codec_id = CODEC_ID_EAC3; 00390 st->codec->codec_type = AVMEDIA_TYPE_AUDIO; 00391 } else if (!strcmp(fmt->name, "mpegvideo")) { 00392 st->codec->codec_id = CODEC_ID_MPEG2VIDEO; 00393 st->codec->codec_type = AVMEDIA_TYPE_VIDEO; 00394 } else if (!strcmp(fmt->name, "m4v")) { 00395 st->codec->codec_id = CODEC_ID_MPEG4; 00396 st->codec->codec_type = AVMEDIA_TYPE_VIDEO; 00397 } else if (!strcmp(fmt->name, "h264")) { 00398 st->codec->codec_id = CODEC_ID_H264; 00399 st->codec->codec_type = AVMEDIA_TYPE_VIDEO; 00400 } else if (!strcmp(fmt->name, "dts")) { 00401 st->codec->codec_id = CODEC_ID_DTS; 00402 st->codec->codec_type = AVMEDIA_TYPE_AUDIO; 00403 } else if (!strcmp(fmt->name, "aac")) { 00404 st->codec->codec_id = CODEC_ID_AAC; 00405 st->codec->codec_type = AVMEDIA_TYPE_AUDIO; 00406 } 00407 } 00408 return !!fmt; 00409 } 00410 00411 /************************************************************/ 00412 /* input media file */ 00413 00417 int av_open_input_stream(AVFormatContext **ic_ptr, 00418 ByteIOContext *pb, const char *filename, 00419 AVInputFormat *fmt, AVFormatParameters *ap) 00420 { 00421 int err; 00422 AVFormatContext *ic; 00423 AVFormatParameters default_ap; 00424 00425 if(!ap){ 00426 ap=&default_ap; 00427 memset(ap, 0, sizeof(default_ap)); 00428 } 00429 00430 if(!ap->prealloced_context) 00431 ic = avformat_alloc_context(); 00432 else 00433 ic = *ic_ptr; 00434 if (!ic) { 00435 err = AVERROR(ENOMEM); 00436 goto fail; 00437 } 00438 ic->iformat = fmt; 00439 ic->pb = pb; 00440 ic->duration = AV_NOPTS_VALUE; 00441 ic->start_time = AV_NOPTS_VALUE; 00442 av_strlcpy(ic->filename, filename, sizeof(ic->filename)); 00443 00444 /* allocate private data */ 00445 if (fmt->priv_data_size > 0) { 00446 ic->priv_data = av_mallocz(fmt->priv_data_size); 00447 if (!ic->priv_data) { 00448 err = AVERROR(ENOMEM); 00449 goto fail; 00450 } 00451 } else { 00452 ic->priv_data = NULL; 00453 } 00454 00455 if (ic->iformat->read_header) { 00456 err = ic->iformat->read_header(ic, ap); 00457 if (err < 0) 00458 goto fail; 00459 } 00460 00461 if (pb && !ic->data_offset) 00462 ic->data_offset = url_ftell(ic->pb); 00463 00464 #if LIBAVFORMAT_VERSION_MAJOR < 53 00465 ff_metadata_demux_compat(ic); 00466 #endif 00467 00468 ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE; 00469 00470 *ic_ptr = ic; 00471 return 0; 00472 fail: 00473 if (ic) { 00474 int i; 00475 av_freep(&ic->priv_data); 00476 for(i=0;i<ic->nb_streams;i++) { 00477 AVStream *st = ic->streams[i]; 00478 if (st) { 00479 av_free(st->priv_data); 00480 av_free(st->codec->extradata); 00481 } 00482 av_free(st); 00483 } 00484 } 00485 av_free(ic); 00486 *ic_ptr = NULL; 00487 return err; 00488 } 00489 00491 #define PROBE_BUF_MIN 2048 00492 #define PROBE_BUF_MAX (1<<20) 00493 00494 int ff_probe_input_buffer(ByteIOContext **pb, AVInputFormat **fmt, 00495 const char *filename, void *logctx, 00496 unsigned int offset, unsigned int max_probe_size) 00497 { 00498 AVProbeData pd = { filename ? filename : "", NULL, -offset }; 00499 unsigned char *buf = NULL; 00500 int ret = 0, probe_size; 00501 00502 if (!max_probe_size) { 00503 max_probe_size = PROBE_BUF_MAX; 00504 } else if (max_probe_size > PROBE_BUF_MAX) { 00505 max_probe_size = PROBE_BUF_MAX; 00506 } else if (max_probe_size < PROBE_BUF_MIN) { 00507 return AVERROR(EINVAL); 00508 } 00509 00510 if (offset >= max_probe_size) { 00511 return AVERROR(EINVAL); 00512 } 00513 00514 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt && ret >= 0; 00515 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) { 00516 int ret, score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0; 00517 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1; 00518 00519 if (probe_size < offset) { 00520 continue; 00521 } 00522 00523 /* read probe data */ 00524 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE); 00525 if ((ret = get_buffer(*pb, buf + buf_offset, probe_size - buf_offset)) < 0) { 00526 /* fail if error was not end of file, otherwise, lower score */ 00527 if (ret != AVERROR_EOF) { 00528 av_free(buf); 00529 return ret; 00530 } 00531 score = 0; 00532 ret = 0; /* error was end of file, nothing read */ 00533 } 00534 pd.buf_size += ret; 00535 pd.buf = &buf[offset]; 00536 00537 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE); 00538 00539 /* guess file format */ 00540 *fmt = av_probe_input_format2(&pd, 1, &score); 00541 if(*fmt){ 00542 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration 00543 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score); 00544 }else 00545 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score); 00546 } 00547 } 00548 00549 if (!*fmt) { 00550 av_free(buf); 00551 return AVERROR_INVALIDDATA; 00552 } 00553 00554 /* rewind. reuse probe buffer to avoid seeking */ 00555 if ((ret = ff_rewind_with_probe_data(*pb, buf, pd.buf_size)) < 0) 00556 av_free(buf); 00557 00558 return ret; 00559 } 00560 00561 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename, 00562 AVInputFormat *fmt, 00563 int buf_size, 00564 AVFormatParameters *ap) 00565 { 00566 int err; 00567 AVProbeData probe_data, *pd = &probe_data; 00568 ByteIOContext *pb = NULL; 00569 void *logctx= ap && ap->prealloced_context ? *ic_ptr : NULL; 00570 00571 pd->filename = ""; 00572 if (filename) 00573 pd->filename = filename; 00574 pd->buf = NULL; 00575 pd->buf_size = 0; 00576 00577 if (!fmt) { 00578 /* guess format if no file can be opened */ 00579 fmt = av_probe_input_format(pd, 0); 00580 } 00581 00582 /* Do not open file if the format does not need it. XXX: specific 00583 hack needed to handle RTSP/TCP */ 00584 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) { 00585 /* if no file needed do not try to open one */ 00586 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) { 00587 goto fail; 00588 } 00589 if (buf_size > 0) { 00590 url_setbufsize(pb, buf_size); 00591 } 00592 if (!fmt && (err = ff_probe_input_buffer(&pb, &fmt, filename, logctx, 0, logctx ? (*ic_ptr)->probesize : 0)) < 0) { 00593 goto fail; 00594 } 00595 } 00596 00597 /* if still no format found, error */ 00598 if (!fmt) { 00599 err = AVERROR_INVALIDDATA; 00600 goto fail; 00601 } 00602 00603 /* check filename in case an image number is expected */ 00604 if (fmt->flags & AVFMT_NEEDNUMBER) { 00605 if (!av_filename_number_test(filename)) { 00606 err = AVERROR_NUMEXPECTED; 00607 goto fail; 00608 } 00609 } 00610 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap); 00611 if (err) 00612 goto fail; 00613 return 0; 00614 fail: 00615 av_freep(&pd->buf); 00616 if (pb) 00617 url_fclose(pb); 00618 if (ap && ap->prealloced_context) 00619 av_free(*ic_ptr); 00620 *ic_ptr = NULL; 00621 return err; 00622 00623 } 00624 00625 /*******************************************************/ 00626 00627 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt, 00628 AVPacketList **plast_pktl){ 00629 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList)); 00630 if (!pktl) 00631 return NULL; 00632 00633 if (*packet_buffer) 00634 (*plast_pktl)->next = pktl; 00635 else 00636 *packet_buffer = pktl; 00637 00638 /* add the packet in the buffered packet list */ 00639 *plast_pktl = pktl; 00640 pktl->pkt= *pkt; 00641 return &pktl->pkt; 00642 } 00643 00644 int av_read_packet(AVFormatContext *s, AVPacket *pkt) 00645 { 00646 int ret, i; 00647 AVStream *st; 00648 00649 for(;;){ 00650 AVPacketList *pktl = s->raw_packet_buffer; 00651 00652 if (pktl) { 00653 *pkt = pktl->pkt; 00654 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE || 00655 !s->streams[pkt->stream_index]->probe_packets || 00656 s->raw_packet_buffer_remaining_size < pkt->size){ 00657 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data; 00658 av_freep(&pd->buf); 00659 pd->buf_size = 0; 00660 s->raw_packet_buffer = pktl->next; 00661 s->raw_packet_buffer_remaining_size += pkt->size; 00662 av_free(pktl); 00663 return 0; 00664 } 00665 } 00666 00667 av_init_packet(pkt); 00668 ret= s->iformat->read_packet(s, pkt); 00669 if (ret < 0) { 00670 if (!pktl || ret == AVERROR(EAGAIN)) 00671 return ret; 00672 for (i = 0; i < s->nb_streams; i++) 00673 s->streams[i]->probe_packets = 0; 00674 continue; 00675 } 00676 st= s->streams[pkt->stream_index]; 00677 00678 switch(st->codec->codec_type){ 00679 case AVMEDIA_TYPE_VIDEO: 00680 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id; 00681 break; 00682 case AVMEDIA_TYPE_AUDIO: 00683 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id; 00684 break; 00685 case AVMEDIA_TYPE_SUBTITLE: 00686 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id; 00687 break; 00688 } 00689 00690 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE || 00691 !st->probe_packets)) 00692 return ret; 00693 00694 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end); 00695 s->raw_packet_buffer_remaining_size -= pkt->size; 00696 00697 if(st->codec->codec_id == CODEC_ID_PROBE){ 00698 AVProbeData *pd = &st->probe_data; 00699 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index); 00700 --st->probe_packets; 00701 00702 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE); 00703 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size); 00704 pd->buf_size += pkt->size; 00705 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE); 00706 00707 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){ 00708 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes 00709 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0); 00710 if(st->codec->codec_id != CODEC_ID_PROBE){ 00711 pd->buf_size=0; 00712 av_freep(&pd->buf); 00713 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index); 00714 } 00715 } 00716 } 00717 } 00718 } 00719 00720 /**********************************************************/ 00721 00725 static int get_audio_frame_size(AVCodecContext *enc, int size) 00726 { 00727 int frame_size; 00728 00729 if(enc->codec_id == CODEC_ID_VORBIS) 00730 return -1; 00731 00732 if (enc->frame_size <= 1) { 00733 int bits_per_sample = av_get_bits_per_sample(enc->codec_id); 00734 00735 if (bits_per_sample) { 00736 if (enc->channels == 0) 00737 return -1; 00738 frame_size = (size << 3) / (bits_per_sample * enc->channels); 00739 } else { 00740 /* used for example by ADPCM codecs */ 00741 if (enc->bit_rate == 0) 00742 return -1; 00743 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate; 00744 } 00745 } else { 00746 frame_size = enc->frame_size; 00747 } 00748 return frame_size; 00749 } 00750 00751 00755 static void compute_frame_duration(int *pnum, int *pden, AVStream *st, 00756 AVCodecParserContext *pc, AVPacket *pkt) 00757 { 00758 int frame_size; 00759 00760 *pnum = 0; 00761 *pden = 0; 00762 switch(st->codec->codec_type) { 00763 case AVMEDIA_TYPE_VIDEO: 00764 if(st->time_base.num*1000LL > st->time_base.den){ 00765 *pnum = st->time_base.num; 00766 *pden = st->time_base.den; 00767 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){ 00768 *pnum = st->codec->time_base.num; 00769 *pden = st->codec->time_base.den; 00770 if (pc && pc->repeat_pict) { 00771 *pnum = (*pnum) * (1 + pc->repeat_pict); 00772 } 00773 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet 00774 //Thus if we have no parser in such case leave duration undefined. 00775 if(st->codec->ticks_per_frame>1 && !pc){ 00776 *pnum = *pden = 0; 00777 } 00778 } 00779 break; 00780 case AVMEDIA_TYPE_AUDIO: 00781 frame_size = get_audio_frame_size(st->codec, pkt->size); 00782 if (frame_size < 0) 00783 break; 00784 *pnum = frame_size; 00785 *pden = st->codec->sample_rate; 00786 break; 00787 default: 00788 break; 00789 } 00790 } 00791 00792 static int is_intra_only(AVCodecContext *enc){ 00793 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){ 00794 return 1; 00795 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){ 00796 switch(enc->codec_id){ 00797 case CODEC_ID_MJPEG: 00798 case CODEC_ID_MJPEGB: 00799 case CODEC_ID_LJPEG: 00800 case CODEC_ID_RAWVIDEO: 00801 case CODEC_ID_DVVIDEO: 00802 case CODEC_ID_HUFFYUV: 00803 case CODEC_ID_FFVHUFF: 00804 case CODEC_ID_ASV1: 00805 case CODEC_ID_ASV2: 00806 case CODEC_ID_VCR1: 00807 case CODEC_ID_DNXHD: 00808 case CODEC_ID_JPEG2000: 00809 return 1; 00810 default: break; 00811 } 00812 } 00813 return 0; 00814 } 00815 00816 static void update_initial_timestamps(AVFormatContext *s, int stream_index, 00817 int64_t dts, int64_t pts) 00818 { 00819 AVStream *st= s->streams[stream_index]; 00820 AVPacketList *pktl= s->packet_buffer; 00821 00822 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE) 00823 return; 00824 00825 st->first_dts= dts - st->cur_dts; 00826 st->cur_dts= dts; 00827 00828 for(; pktl; pktl= pktl->next){ 00829 if(pktl->pkt.stream_index != stream_index) 00830 continue; 00831 //FIXME think more about this check 00832 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts) 00833 pktl->pkt.pts += st->first_dts; 00834 00835 if(pktl->pkt.dts != AV_NOPTS_VALUE) 00836 pktl->pkt.dts += st->first_dts; 00837 00838 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE) 00839 st->start_time= pktl->pkt.pts; 00840 } 00841 if (st->start_time == AV_NOPTS_VALUE) 00842 st->start_time = pts; 00843 } 00844 00845 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt) 00846 { 00847 AVPacketList *pktl= s->packet_buffer; 00848 int64_t cur_dts= 0; 00849 00850 if(st->first_dts != AV_NOPTS_VALUE){ 00851 cur_dts= st->first_dts; 00852 for(; pktl; pktl= pktl->next){ 00853 if(pktl->pkt.stream_index == pkt->stream_index){ 00854 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration) 00855 break; 00856 cur_dts -= pkt->duration; 00857 } 00858 } 00859 pktl= s->packet_buffer; 00860 st->first_dts = cur_dts; 00861 }else if(st->cur_dts) 00862 return; 00863 00864 for(; pktl; pktl= pktl->next){ 00865 if(pktl->pkt.stream_index != pkt->stream_index) 00866 continue; 00867 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE 00868 && !pktl->pkt.duration){ 00869 pktl->pkt.dts= cur_dts; 00870 if(!st->codec->has_b_frames) 00871 pktl->pkt.pts= cur_dts; 00872 cur_dts += pkt->duration; 00873 pktl->pkt.duration= pkt->duration; 00874 }else 00875 break; 00876 } 00877 if(st->first_dts == AV_NOPTS_VALUE) 00878 st->cur_dts= cur_dts; 00879 } 00880 00881 static void compute_pkt_fields(AVFormatContext *s, AVStream *st, 00882 AVCodecParserContext *pc, AVPacket *pkt) 00883 { 00884 int num, den, presentation_delayed, delay, i; 00885 int64_t offset; 00886 00887 if (s->flags & AVFMT_FLAG_NOFILLIN) 00888 return; 00889 00890 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE) 00891 pkt->dts= AV_NOPTS_VALUE; 00892 00893 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == FF_B_TYPE) 00894 //FIXME Set low_delay = 0 when has_b_frames = 1 00895 st->codec->has_b_frames = 1; 00896 00897 /* do we have a video B-frame ? */ 00898 delay= st->codec->has_b_frames; 00899 presentation_delayed = 0; 00900 /* XXX: need has_b_frame, but cannot get it if the codec is 00901 not initialized */ 00902 if (delay && 00903 pc && pc->pict_type != FF_B_TYPE) 00904 presentation_delayed = 1; 00905 00906 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63 00907 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){ 00908 pkt->dts -= 1LL<<st->pts_wrap_bits; 00909 } 00910 00911 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg) 00912 // we take the conservative approach and discard both 00913 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly. 00914 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){ 00915 av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n"); 00916 pkt->dts= pkt->pts= AV_NOPTS_VALUE; 00917 } 00918 00919 if (pkt->duration == 0) { 00920 compute_frame_duration(&num, &den, st, pc, pkt); 00921 if (den && num) { 00922 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN); 00923 00924 if(pkt->duration != 0 && s->packet_buffer) 00925 update_initial_durations(s, st, pkt); 00926 } 00927 } 00928 00929 /* correct timestamps with byte offset if demuxers only have timestamps 00930 on packet boundaries */ 00931 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){ 00932 /* this will estimate bitrate based on this frame's duration and size */ 00933 offset = av_rescale(pc->offset, pkt->duration, pkt->size); 00934 if(pkt->pts != AV_NOPTS_VALUE) 00935 pkt->pts += offset; 00936 if(pkt->dts != AV_NOPTS_VALUE) 00937 pkt->dts += offset; 00938 } 00939 00940 if (pc && pc->dts_sync_point >= 0) { 00941 // we have synchronization info from the parser 00942 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num; 00943 if (den > 0) { 00944 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den; 00945 if (pkt->dts != AV_NOPTS_VALUE) { 00946 // got DTS from the stream, update reference timestamp 00947 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den; 00948 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den; 00949 } else if (st->reference_dts != AV_NOPTS_VALUE) { 00950 // compute DTS based on reference timestamp 00951 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den; 00952 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den; 00953 } 00954 if (pc->dts_sync_point > 0) 00955 st->reference_dts = pkt->dts; // new reference 00956 } 00957 } 00958 00959 /* This may be redundant, but it should not hurt. */ 00960 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts) 00961 presentation_delayed = 1; 00962 00963 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc); 00964 /* interpolate PTS and DTS if they are not present */ 00965 //We skip H264 currently because delay and has_b_frames are not reliably set 00966 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){ 00967 if (presentation_delayed) { 00968 /* DTS = decompression timestamp */ 00969 /* PTS = presentation timestamp */ 00970 if (pkt->dts == AV_NOPTS_VALUE) 00971 pkt->dts = st->last_IP_pts; 00972 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); 00973 if (pkt->dts == AV_NOPTS_VALUE) 00974 pkt->dts = st->cur_dts; 00975 00976 /* this is tricky: the dts must be incremented by the duration 00977 of the frame we are displaying, i.e. the last I- or P-frame */ 00978 if (st->last_IP_duration == 0) 00979 st->last_IP_duration = pkt->duration; 00980 if(pkt->dts != AV_NOPTS_VALUE) 00981 st->cur_dts = pkt->dts + st->last_IP_duration; 00982 st->last_IP_duration = pkt->duration; 00983 st->last_IP_pts= pkt->pts; 00984 /* cannot compute PTS if not present (we can compute it only 00985 by knowing the future */ 00986 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){ 00987 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){ 00988 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts); 00989 int64_t new_diff= FFABS(st->cur_dts - pkt->pts); 00990 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){ 00991 pkt->pts += pkt->duration; 00992 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size); 00993 } 00994 } 00995 00996 /* presentation is not delayed : PTS and DTS are the same */ 00997 if(pkt->pts == AV_NOPTS_VALUE) 00998 pkt->pts = pkt->dts; 00999 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts); 01000 if(pkt->pts == AV_NOPTS_VALUE) 01001 pkt->pts = st->cur_dts; 01002 pkt->dts = pkt->pts; 01003 if(pkt->pts != AV_NOPTS_VALUE) 01004 st->cur_dts = pkt->pts + pkt->duration; 01005 } 01006 } 01007 01008 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){ 01009 st->pts_buffer[0]= pkt->pts; 01010 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++) 01011 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]); 01012 if(pkt->dts == AV_NOPTS_VALUE) 01013 pkt->dts= st->pts_buffer[0]; 01014 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here 01015 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet 01016 } 01017 if(pkt->dts > st->cur_dts) 01018 st->cur_dts = pkt->dts; 01019 } 01020 01021 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts); 01022 01023 /* update flags */ 01024 if(is_intra_only(st->codec)) 01025 pkt->flags |= AV_PKT_FLAG_KEY; 01026 else if (pc) { 01027 pkt->flags = 0; 01028 /* keyframe computation */ 01029 if (pc->key_frame == 1) 01030 pkt->flags |= AV_PKT_FLAG_KEY; 01031 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE) 01032 pkt->flags |= AV_PKT_FLAG_KEY; 01033 } 01034 if (pc) 01035 pkt->convergence_duration = pc->convergence_duration; 01036 } 01037 01038 01039 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt) 01040 { 01041 AVStream *st; 01042 int len, ret, i; 01043 01044 av_init_packet(pkt); 01045 01046 for(;;) { 01047 /* select current input stream component */ 01048 st = s->cur_st; 01049 if (st) { 01050 if (!st->need_parsing || !st->parser) { 01051 /* no parsing needed: we just output the packet as is */ 01052 /* raw data support */ 01053 *pkt = st->cur_pkt; st->cur_pkt.data= NULL; 01054 compute_pkt_fields(s, st, NULL, pkt); 01055 s->cur_st = NULL; 01056 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && 01057 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) { 01058 ff_reduce_index(s, st->index); 01059 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME); 01060 } 01061 break; 01062 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) { 01063 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size, 01064 st->cur_ptr, st->cur_len, 01065 st->cur_pkt.pts, st->cur_pkt.dts, 01066 st->cur_pkt.pos); 01067 st->cur_pkt.pts = AV_NOPTS_VALUE; 01068 st->cur_pkt.dts = AV_NOPTS_VALUE; 01069 /* increment read pointer */ 01070 st->cur_ptr += len; 01071 st->cur_len -= len; 01072 01073 /* return packet if any */ 01074 if (pkt->size) { 01075 got_packet: 01076 pkt->duration = 0; 01077 pkt->stream_index = st->index; 01078 pkt->pts = st->parser->pts; 01079 pkt->dts = st->parser->dts; 01080 pkt->pos = st->parser->pos; 01081 pkt->destruct = NULL; 01082 compute_pkt_fields(s, st, st->parser, pkt); 01083 01084 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){ 01085 ff_reduce_index(s, st->index); 01086 av_add_index_entry(st, st->parser->frame_offset, pkt->dts, 01087 0, 0, AVINDEX_KEYFRAME); 01088 } 01089 01090 break; 01091 } 01092 } else { 01093 /* free packet */ 01094 av_free_packet(&st->cur_pkt); 01095 s->cur_st = NULL; 01096 } 01097 } else { 01098 AVPacket cur_pkt; 01099 /* read next packet */ 01100 ret = av_read_packet(s, &cur_pkt); 01101 if (ret < 0) { 01102 if (ret == AVERROR(EAGAIN)) 01103 return ret; 01104 /* return the last frames, if any */ 01105 for(i = 0; i < s->nb_streams; i++) { 01106 st = s->streams[i]; 01107 if (st->parser && st->need_parsing) { 01108 av_parser_parse2(st->parser, st->codec, 01109 &pkt->data, &pkt->size, 01110 NULL, 0, 01111 AV_NOPTS_VALUE, AV_NOPTS_VALUE, 01112 AV_NOPTS_VALUE); 01113 if (pkt->size) 01114 goto got_packet; 01115 } 01116 } 01117 /* no more packets: really terminate parsing */ 01118 return ret; 01119 } 01120 st = s->streams[cur_pkt.stream_index]; 01121 st->cur_pkt= cur_pkt; 01122 01123 if(st->cur_pkt.pts != AV_NOPTS_VALUE && 01124 st->cur_pkt.dts != AV_NOPTS_VALUE && 01125 st->cur_pkt.pts < st->cur_pkt.dts){ 01126 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n", 01127 st->cur_pkt.stream_index, 01128 st->cur_pkt.pts, 01129 st->cur_pkt.dts, 01130 st->cur_pkt.size); 01131 // av_free_packet(&st->cur_pkt); 01132 // return -1; 01133 } 01134 01135 if(s->debug & FF_FDEBUG_TS) 01136 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n", 01137 st->cur_pkt.stream_index, 01138 st->cur_pkt.pts, 01139 st->cur_pkt.dts, 01140 st->cur_pkt.size, 01141 st->cur_pkt.duration, 01142 st->cur_pkt.flags); 01143 01144 s->cur_st = st; 01145 st->cur_ptr = st->cur_pkt.data; 01146 st->cur_len = st->cur_pkt.size; 01147 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) { 01148 st->parser = av_parser_init(st->codec->codec_id); 01149 if (!st->parser) { 01150 /* no parser available: just output the raw packets */ 01151 st->need_parsing = AVSTREAM_PARSE_NONE; 01152 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){ 01153 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; 01154 } 01155 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){ 01156 st->parser->next_frame_offset= 01157 st->parser->cur_offset= st->cur_pkt.pos; 01158 } 01159 } 01160 } 01161 } 01162 if(s->debug & FF_FDEBUG_TS) 01163 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n", 01164 pkt->stream_index, 01165 pkt->pts, 01166 pkt->dts, 01167 pkt->size, 01168 pkt->duration, 01169 pkt->flags); 01170 01171 return 0; 01172 } 01173 01174 int av_read_frame(AVFormatContext *s, AVPacket *pkt) 01175 { 01176 AVPacketList *pktl; 01177 int eof=0; 01178 const int genpts= s->flags & AVFMT_FLAG_GENPTS; 01179 01180 for(;;){ 01181 pktl = s->packet_buffer; 01182 if (pktl) { 01183 AVPacket *next_pkt= &pktl->pkt; 01184 01185 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){ 01186 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){ 01187 if( pktl->pkt.stream_index == next_pkt->stream_index 01188 && next_pkt->dts < pktl->pkt.dts 01189 && pktl->pkt.pts != pktl->pkt.dts //not b frame 01190 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){ 01191 next_pkt->pts= pktl->pkt.dts; 01192 } 01193 pktl= pktl->next; 01194 } 01195 pktl = s->packet_buffer; 01196 } 01197 01198 if( next_pkt->pts != AV_NOPTS_VALUE 01199 || next_pkt->dts == AV_NOPTS_VALUE 01200 || !genpts || eof){ 01201 /* read packet from packet buffer, if there is data */ 01202 *pkt = *next_pkt; 01203 s->packet_buffer = pktl->next; 01204 av_free(pktl); 01205 return 0; 01206 } 01207 } 01208 if(genpts){ 01209 int ret= av_read_frame_internal(s, pkt); 01210 if(ret<0){ 01211 if(pktl && ret != AVERROR(EAGAIN)){ 01212 eof=1; 01213 continue; 01214 }else 01215 return ret; 01216 } 01217 01218 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt, 01219 &s->packet_buffer_end)) < 0) 01220 return AVERROR(ENOMEM); 01221 }else{ 01222 assert(!s->packet_buffer); 01223 return av_read_frame_internal(s, pkt); 01224 } 01225 } 01226 } 01227 01228 /* XXX: suppress the packet queue */ 01229 static void flush_packet_queue(AVFormatContext *s) 01230 { 01231 AVPacketList *pktl; 01232 01233 for(;;) { 01234 pktl = s->packet_buffer; 01235 if (!pktl) 01236 break; 01237 s->packet_buffer = pktl->next; 01238 av_free_packet(&pktl->pkt); 01239 av_free(pktl); 01240 } 01241 while(s->raw_packet_buffer){ 01242 pktl = s->raw_packet_buffer; 01243 s->raw_packet_buffer = pktl->next; 01244 av_free_packet(&pktl->pkt); 01245 av_free(pktl); 01246 } 01247 s->packet_buffer_end= 01248 s->raw_packet_buffer_end= NULL; 01249 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE; 01250 } 01251 01252 /*******************************************************/ 01253 /* seek support */ 01254 01255 int av_find_default_stream_index(AVFormatContext *s) 01256 { 01257 int first_audio_index = -1; 01258 int i; 01259 AVStream *st; 01260 01261 if (s->nb_streams <= 0) 01262 return -1; 01263 for(i = 0; i < s->nb_streams; i++) { 01264 st = s->streams[i]; 01265 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 01266 return i; 01267 } 01268 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) 01269 first_audio_index = i; 01270 } 01271 return first_audio_index >= 0 ? first_audio_index : 0; 01272 } 01273 01277 void ff_read_frame_flush(AVFormatContext *s) 01278 { 01279 AVStream *st; 01280 int i, j; 01281 01282 flush_packet_queue(s); 01283 01284 s->cur_st = NULL; 01285 01286 /* for each stream, reset read state */ 01287 for(i = 0; i < s->nb_streams; i++) { 01288 st = s->streams[i]; 01289 01290 if (st->parser) { 01291 av_parser_close(st->parser); 01292 st->parser = NULL; 01293 av_free_packet(&st->cur_pkt); 01294 } 01295 st->last_IP_pts = AV_NOPTS_VALUE; 01296 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */ 01297 st->reference_dts = AV_NOPTS_VALUE; 01298 /* fail safe */ 01299 st->cur_ptr = NULL; 01300 st->cur_len = 0; 01301 01302 st->probe_packets = MAX_PROBE_PACKETS; 01303 01304 for(j=0; j<MAX_REORDER_DELAY+1; j++) 01305 st->pts_buffer[j]= AV_NOPTS_VALUE; 01306 } 01307 } 01308 01309 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){ 01310 int i; 01311 01312 for(i = 0; i < s->nb_streams; i++) { 01313 AVStream *st = s->streams[i]; 01314 01315 st->cur_dts = av_rescale(timestamp, 01316 st->time_base.den * (int64_t)ref_st->time_base.num, 01317 st->time_base.num * (int64_t)ref_st->time_base.den); 01318 } 01319 } 01320 01321 void ff_reduce_index(AVFormatContext *s, int stream_index) 01322 { 01323 AVStream *st= s->streams[stream_index]; 01324 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry); 01325 01326 if((unsigned)st->nb_index_entries >= max_entries){ 01327 int i; 01328 for(i=0; 2*i<st->nb_index_entries; i++) 01329 st->index_entries[i]= st->index_entries[2*i]; 01330 st->nb_index_entries= i; 01331 } 01332 } 01333 01334 int av_add_index_entry(AVStream *st, 01335 int64_t pos, int64_t timestamp, int size, int distance, int flags) 01336 { 01337 AVIndexEntry *entries, *ie; 01338 int index; 01339 01340 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry)) 01341 return -1; 01342 01343 entries = av_fast_realloc(st->index_entries, 01344 &st->index_entries_allocated_size, 01345 (st->nb_index_entries + 1) * 01346 sizeof(AVIndexEntry)); 01347 if(!entries) 01348 return -1; 01349 01350 st->index_entries= entries; 01351 01352 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY); 01353 01354 if(index<0){ 01355 index= st->nb_index_entries++; 01356 ie= &entries[index]; 01357 assert(index==0 || ie[-1].timestamp < timestamp); 01358 }else{ 01359 ie= &entries[index]; 01360 if(ie->timestamp != timestamp){ 01361 if(ie->timestamp <= timestamp) 01362 return -1; 01363 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index)); 01364 st->nb_index_entries++; 01365 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance 01366 distance= ie->min_distance; 01367 } 01368 01369 ie->pos = pos; 01370 ie->timestamp = timestamp; 01371 ie->min_distance= distance; 01372 ie->size= size; 01373 ie->flags = flags; 01374 01375 return index; 01376 } 01377 01378 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, 01379 int flags) 01380 { 01381 AVIndexEntry *entries= st->index_entries; 01382 int nb_entries= st->nb_index_entries; 01383 int a, b, m; 01384 int64_t timestamp; 01385 01386 a = - 1; 01387 b = nb_entries; 01388 01389 //optimize appending index entries at the end 01390 if(b && entries[b-1].timestamp < wanted_timestamp) 01391 a= b-1; 01392 01393 while (b - a > 1) { 01394 m = (a + b) >> 1; 01395 timestamp = entries[m].timestamp; 01396 if(timestamp >= wanted_timestamp) 01397 b = m; 01398 if(timestamp <= wanted_timestamp) 01399 a = m; 01400 } 01401 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b; 01402 01403 if(!(flags & AVSEEK_FLAG_ANY)){ 01404 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){ 01405 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1; 01406 } 01407 } 01408 01409 if(m == nb_entries) 01410 return -1; 01411 return m; 01412 } 01413 01414 #define DEBUG_SEEK 01415 01416 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){ 01417 AVInputFormat *avif= s->iformat; 01418 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit; 01419 int64_t ts_min, ts_max, ts; 01420 int index; 01421 int64_t ret; 01422 AVStream *st; 01423 01424 if (stream_index < 0) 01425 return -1; 01426 01427 #ifdef DEBUG_SEEK 01428 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts); 01429 #endif 01430 01431 ts_max= 01432 ts_min= AV_NOPTS_VALUE; 01433 pos_limit= -1; //gcc falsely says it may be uninitialized 01434 01435 st= s->streams[stream_index]; 01436 if(st->index_entries){ 01437 AVIndexEntry *e; 01438 01439 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp() 01440 index= FFMAX(index, 0); 01441 e= &st->index_entries[index]; 01442 01443 if(e->timestamp <= target_ts || e->pos == e->min_distance){ 01444 pos_min= e->pos; 01445 ts_min= e->timestamp; 01446 #ifdef DEBUG_SEEK 01447 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n", 01448 pos_min,ts_min); 01449 #endif 01450 }else{ 01451 assert(index==0); 01452 } 01453 01454 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD); 01455 assert(index < st->nb_index_entries); 01456 if(index >= 0){ 01457 e= &st->index_entries[index]; 01458 assert(e->timestamp >= target_ts); 01459 pos_max= e->pos; 01460 ts_max= e->timestamp; 01461 pos_limit= pos_max - e->min_distance; 01462 #ifdef DEBUG_SEEK 01463 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n", 01464 pos_max,pos_limit, ts_max); 01465 #endif 01466 } 01467 } 01468 01469 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp); 01470 if(pos<0) 01471 return -1; 01472 01473 /* do the seek */ 01474 if ((ret = url_fseek(s->pb, pos, SEEK_SET)) < 0) 01475 return ret; 01476 01477 av_update_cur_dts(s, st, ts); 01478 01479 return 0; 01480 } 01481 01482 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){ 01483 int64_t pos, ts; 01484 int64_t start_pos, filesize; 01485 int no_change; 01486 01487 #ifdef DEBUG_SEEK 01488 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts); 01489 #endif 01490 01491 if(ts_min == AV_NOPTS_VALUE){ 01492 pos_min = s->data_offset; 01493 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX); 01494 if (ts_min == AV_NOPTS_VALUE) 01495 return -1; 01496 } 01497 01498 if(ts_max == AV_NOPTS_VALUE){ 01499 int step= 1024; 01500 filesize = url_fsize(s->pb); 01501 pos_max = filesize - 1; 01502 do{ 01503 pos_max -= step; 01504 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step); 01505 step += step; 01506 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step); 01507 if (ts_max == AV_NOPTS_VALUE) 01508 return -1; 01509 01510 for(;;){ 01511 int64_t tmp_pos= pos_max + 1; 01512 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX); 01513 if(tmp_ts == AV_NOPTS_VALUE) 01514 break; 01515 ts_max= tmp_ts; 01516 pos_max= tmp_pos; 01517 if(tmp_pos >= filesize) 01518 break; 01519 } 01520 pos_limit= pos_max; 01521 } 01522 01523 if(ts_min > ts_max){ 01524 return -1; 01525 }else if(ts_min == ts_max){ 01526 pos_limit= pos_min; 01527 } 01528 01529 no_change=0; 01530 while (pos_min < pos_limit) { 01531 #ifdef DEBUG_SEEK 01532 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n", 01533 pos_min, pos_max, 01534 ts_min, ts_max); 01535 #endif 01536 assert(pos_limit <= pos_max); 01537 01538 if(no_change==0){ 01539 int64_t approximate_keyframe_distance= pos_max - pos_limit; 01540 // interpolate position (better than dichotomy) 01541 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min) 01542 + pos_min - approximate_keyframe_distance; 01543 }else if(no_change==1){ 01544 // bisection, if interpolation failed to change min or max pos last time 01545 pos = (pos_min + pos_limit)>>1; 01546 }else{ 01547 /* linear search if bisection failed, can only happen if there 01548 are very few or no keyframes between min/max */ 01549 pos=pos_min; 01550 } 01551 if(pos <= pos_min) 01552 pos= pos_min + 1; 01553 else if(pos > pos_limit) 01554 pos= pos_limit; 01555 start_pos= pos; 01556 01557 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1 01558 if(pos == pos_max) 01559 no_change++; 01560 else 01561 no_change=0; 01562 #ifdef DEBUG_SEEK 01563 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", 01564 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, 01565 start_pos, no_change); 01566 #endif 01567 if(ts == AV_NOPTS_VALUE){ 01568 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n"); 01569 return -1; 01570 } 01571 assert(ts != AV_NOPTS_VALUE); 01572 if (target_ts <= ts) { 01573 pos_limit = start_pos - 1; 01574 pos_max = pos; 01575 ts_max = ts; 01576 } 01577 if (target_ts >= ts) { 01578 pos_min = pos; 01579 ts_min = ts; 01580 } 01581 } 01582 01583 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max; 01584 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max; 01585 #ifdef DEBUG_SEEK 01586 pos_min = pos; 01587 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX); 01588 pos_min++; 01589 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX); 01590 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n", 01591 pos, ts_min, target_ts, ts_max); 01592 #endif 01593 *ts_ret= ts; 01594 return pos; 01595 } 01596 01597 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){ 01598 int64_t pos_min, pos_max; 01599 #if 0 01600 AVStream *st; 01601 01602 if (stream_index < 0) 01603 return -1; 01604 01605 st= s->streams[stream_index]; 01606 #endif 01607 01608 pos_min = s->data_offset; 01609 pos_max = url_fsize(s->pb) - 1; 01610 01611 if (pos < pos_min) pos= pos_min; 01612 else if(pos > pos_max) pos= pos_max; 01613 01614 url_fseek(s->pb, pos, SEEK_SET); 01615 01616 #if 0 01617 av_update_cur_dts(s, st, ts); 01618 #endif 01619 return 0; 01620 } 01621 01622 static int av_seek_frame_generic(AVFormatContext *s, 01623 int stream_index, int64_t timestamp, int flags) 01624 { 01625 int index; 01626 int64_t ret; 01627 AVStream *st; 01628 AVIndexEntry *ie; 01629 01630 st = s->streams[stream_index]; 01631 01632 index = av_index_search_timestamp(st, timestamp, flags); 01633 01634 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp) 01635 return -1; 01636 01637 if(index < 0 || index==st->nb_index_entries-1){ 01638 int i; 01639 AVPacket pkt; 01640 01641 if(st->nb_index_entries){ 01642 assert(st->index_entries); 01643 ie= &st->index_entries[st->nb_index_entries-1]; 01644 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0) 01645 return ret; 01646 av_update_cur_dts(s, st, ie->timestamp); 01647 }else{ 01648 if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0) 01649 return ret; 01650 } 01651 for(i=0;; i++) { 01652 int ret; 01653 do{ 01654 ret = av_read_frame(s, &pkt); 01655 }while(ret == AVERROR(EAGAIN)); 01656 if(ret<0) 01657 break; 01658 av_free_packet(&pkt); 01659 if(stream_index == pkt.stream_index){ 01660 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp) 01661 break; 01662 } 01663 } 01664 index = av_index_search_timestamp(st, timestamp, flags); 01665 } 01666 if (index < 0) 01667 return -1; 01668 01669 ff_read_frame_flush(s); 01670 if (s->iformat->read_seek){ 01671 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0) 01672 return 0; 01673 } 01674 ie = &st->index_entries[index]; 01675 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0) 01676 return ret; 01677 av_update_cur_dts(s, st, ie->timestamp); 01678 01679 return 0; 01680 } 01681 01682 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) 01683 { 01684 int ret; 01685 AVStream *st; 01686 01687 ff_read_frame_flush(s); 01688 01689 if(flags & AVSEEK_FLAG_BYTE) 01690 return av_seek_frame_byte(s, stream_index, timestamp, flags); 01691 01692 if(stream_index < 0){ 01693 stream_index= av_find_default_stream_index(s); 01694 if(stream_index < 0) 01695 return -1; 01696 01697 st= s->streams[stream_index]; 01698 /* timestamp for default must be expressed in AV_TIME_BASE units */ 01699 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num); 01700 } 01701 01702 /* first, we try the format specific seek */ 01703 if (s->iformat->read_seek) 01704 ret = s->iformat->read_seek(s, stream_index, timestamp, flags); 01705 else 01706 ret = -1; 01707 if (ret >= 0) { 01708 return 0; 01709 } 01710 01711 if(s->iformat->read_timestamp) 01712 return av_seek_frame_binary(s, stream_index, timestamp, flags); 01713 else 01714 return av_seek_frame_generic(s, stream_index, timestamp, flags); 01715 } 01716 01717 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags) 01718 { 01719 if(min_ts > ts || max_ts < ts) 01720 return -1; 01721 01722 ff_read_frame_flush(s); 01723 01724 if (s->iformat->read_seek2) 01725 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags); 01726 01727 if(s->iformat->read_timestamp){ 01728 //try to seek via read_timestamp() 01729 } 01730 01731 //Fallback to old API if new is not implemented but old is 01732 //Note the old has somewat different sematics 01733 if(s->iformat->read_seek || 1) 01734 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0)); 01735 01736 // try some generic seek like av_seek_frame_generic() but with new ts semantics 01737 } 01738 01739 /*******************************************************/ 01740 01746 static int av_has_duration(AVFormatContext *ic) 01747 { 01748 int i; 01749 AVStream *st; 01750 01751 for(i = 0;i < ic->nb_streams; i++) { 01752 st = ic->streams[i]; 01753 if (st->duration != AV_NOPTS_VALUE) 01754 return 1; 01755 } 01756 return 0; 01757 } 01758 01764 static void av_update_stream_timings(AVFormatContext *ic) 01765 { 01766 int64_t start_time, start_time1, end_time, end_time1; 01767 int64_t duration, duration1; 01768 int i; 01769 AVStream *st; 01770 01771 start_time = INT64_MAX; 01772 end_time = INT64_MIN; 01773 duration = INT64_MIN; 01774 for(i = 0;i < ic->nb_streams; i++) { 01775 st = ic->streams[i]; 01776 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) { 01777 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q); 01778 if (start_time1 < start_time) 01779 start_time = start_time1; 01780 if (st->duration != AV_NOPTS_VALUE) { 01781 end_time1 = start_time1 01782 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q); 01783 if (end_time1 > end_time) 01784 end_time = end_time1; 01785 } 01786 } 01787 if (st->duration != AV_NOPTS_VALUE) { 01788 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q); 01789 if (duration1 > duration) 01790 duration = duration1; 01791 } 01792 } 01793 if (start_time != INT64_MAX) { 01794 ic->start_time = start_time; 01795 if (end_time != INT64_MIN) { 01796 if (end_time - start_time > duration) 01797 duration = end_time - start_time; 01798 } 01799 } 01800 if (duration != INT64_MIN) { 01801 ic->duration = duration; 01802 if (ic->file_size > 0) { 01803 /* compute the bitrate */ 01804 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE / 01805 (double)ic->duration; 01806 } 01807 } 01808 } 01809 01810 static void fill_all_stream_timings(AVFormatContext *ic) 01811 { 01812 int i; 01813 AVStream *st; 01814 01815 av_update_stream_timings(ic); 01816 for(i = 0;i < ic->nb_streams; i++) { 01817 st = ic->streams[i]; 01818 if (st->start_time == AV_NOPTS_VALUE) { 01819 if(ic->start_time != AV_NOPTS_VALUE) 01820 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base); 01821 if(ic->duration != AV_NOPTS_VALUE) 01822 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base); 01823 } 01824 } 01825 } 01826 01827 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic) 01828 { 01829 int64_t filesize, duration; 01830 int bit_rate, i; 01831 AVStream *st; 01832 01833 /* if bit_rate is already set, we believe it */ 01834 if (ic->bit_rate == 0) { 01835 bit_rate = 0; 01836 for(i=0;i<ic->nb_streams;i++) { 01837 st = ic->streams[i]; 01838 bit_rate += st->codec->bit_rate; 01839 } 01840 ic->bit_rate = bit_rate; 01841 } 01842 01843 /* if duration is already set, we believe it */ 01844 if (ic->duration == AV_NOPTS_VALUE && 01845 ic->bit_rate != 0 && 01846 ic->file_size != 0) { 01847 filesize = ic->file_size; 01848 if (filesize > 0) { 01849 for(i = 0; i < ic->nb_streams; i++) { 01850 st = ic->streams[i]; 01851 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num); 01852 if (st->duration == AV_NOPTS_VALUE) 01853 st->duration = duration; 01854 } 01855 } 01856 } 01857 } 01858 01859 #define DURATION_MAX_READ_SIZE 250000 01860 #define DURATION_MAX_RETRY 3 01861 01862 /* only usable for MPEG-PS streams */ 01863 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset) 01864 { 01865 AVPacket pkt1, *pkt = &pkt1; 01866 AVStream *st; 01867 int read_size, i, ret; 01868 int64_t end_time, start_time[MAX_STREAMS]; 01869 int64_t filesize, offset, duration; 01870 int retry=0; 01871 01872 ic->cur_st = NULL; 01873 01874 /* flush packet queue */ 01875 flush_packet_queue(ic); 01876 01877 for(i=0;i<ic->nb_streams;i++) { 01878 st = ic->streams[i]; 01879 if(st->start_time != AV_NOPTS_VALUE){ 01880 start_time[i]= st->start_time; 01881 }else if(st->first_dts != AV_NOPTS_VALUE){ 01882 start_time[i]= st->first_dts; 01883 }else 01884 av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n"); 01885 01886 if (st->parser) { 01887 av_parser_close(st->parser); 01888 st->parser= NULL; 01889 av_free_packet(&st->cur_pkt); 01890 } 01891 } 01892 01893 /* estimate the end time (duration) */ 01894 /* XXX: may need to support wrapping */ 01895 filesize = ic->file_size; 01896 end_time = AV_NOPTS_VALUE; 01897 do{ 01898 offset = filesize - (DURATION_MAX_READ_SIZE<<retry); 01899 if (offset < 0) 01900 offset = 0; 01901 01902 url_fseek(ic->pb, offset, SEEK_SET); 01903 read_size = 0; 01904 for(;;) { 01905 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0))) 01906 break; 01907 01908 do{ 01909 ret = av_read_packet(ic, pkt); 01910 }while(ret == AVERROR(EAGAIN)); 01911 if (ret != 0) 01912 break; 01913 read_size += pkt->size; 01914 st = ic->streams[pkt->stream_index]; 01915 if (pkt->pts != AV_NOPTS_VALUE && 01916 start_time[pkt->stream_index] != AV_NOPTS_VALUE) { 01917 end_time = pkt->pts; 01918 duration = end_time - start_time[pkt->stream_index]; 01919 if (duration < 0) 01920 duration += 1LL<<st->pts_wrap_bits; 01921 if (duration > 0) { 01922 if (st->duration == AV_NOPTS_VALUE || 01923 st->duration < duration) 01924 st->duration = duration; 01925 } 01926 } 01927 av_free_packet(pkt); 01928 } 01929 }while( end_time==AV_NOPTS_VALUE 01930 && filesize > (DURATION_MAX_READ_SIZE<<retry) 01931 && ++retry <= DURATION_MAX_RETRY); 01932 01933 fill_all_stream_timings(ic); 01934 01935 url_fseek(ic->pb, old_offset, SEEK_SET); 01936 for(i=0; i<ic->nb_streams; i++){ 01937 st= ic->streams[i]; 01938 st->cur_dts= st->first_dts; 01939 st->last_IP_pts = AV_NOPTS_VALUE; 01940 } 01941 } 01942 01943 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset) 01944 { 01945 int64_t file_size; 01946 01947 /* get the file size, if possible */ 01948 if (ic->iformat->flags & AVFMT_NOFILE) { 01949 file_size = 0; 01950 } else { 01951 file_size = url_fsize(ic->pb); 01952 if (file_size < 0) 01953 file_size = 0; 01954 } 01955 ic->file_size = file_size; 01956 01957 if ((!strcmp(ic->iformat->name, "mpeg") || 01958 !strcmp(ic->iformat->name, "mpegts")) && 01959 file_size && !url_is_streamed(ic->pb)) { 01960 /* get accurate estimate from the PTSes */ 01961 av_estimate_timings_from_pts(ic, old_offset); 01962 } else if (av_has_duration(ic)) { 01963 /* at least one component has timings - we use them for all 01964 the components */ 01965 fill_all_stream_timings(ic); 01966 } else { 01967 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n"); 01968 /* less precise: use bitrate info */ 01969 av_estimate_timings_from_bit_rate(ic); 01970 } 01971 av_update_stream_timings(ic); 01972 01973 #if 0 01974 { 01975 int i; 01976 AVStream *st; 01977 for(i = 0;i < ic->nb_streams; i++) { 01978 st = ic->streams[i]; 01979 printf("%d: start_time: %0.3f duration: %0.3f\n", 01980 i, (double)st->start_time / AV_TIME_BASE, 01981 (double)st->duration / AV_TIME_BASE); 01982 } 01983 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n", 01984 (double)ic->start_time / AV_TIME_BASE, 01985 (double)ic->duration / AV_TIME_BASE, 01986 ic->bit_rate / 1000); 01987 } 01988 #endif 01989 } 01990 01991 static int has_codec_parameters(AVCodecContext *enc) 01992 { 01993 int val; 01994 switch(enc->codec_type) { 01995 case AVMEDIA_TYPE_AUDIO: 01996 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE; 01997 if(!enc->frame_size && 01998 (enc->codec_id == CODEC_ID_VORBIS || 01999 enc->codec_id == CODEC_ID_AAC || 02000 enc->codec_id == CODEC_ID_MP1 || 02001 enc->codec_id == CODEC_ID_MP2 || 02002 enc->codec_id == CODEC_ID_MP3 || 02003 enc->codec_id == CODEC_ID_SPEEX)) 02004 return 0; 02005 break; 02006 case AVMEDIA_TYPE_VIDEO: 02007 val = enc->width && enc->pix_fmt != PIX_FMT_NONE; 02008 break; 02009 default: 02010 val = 1; 02011 break; 02012 } 02013 return enc->codec_id != CODEC_ID_NONE && val != 0; 02014 } 02015 02016 static int try_decode_frame(AVStream *st, AVPacket *avpkt) 02017 { 02018 int16_t *samples; 02019 AVCodec *codec; 02020 int got_picture, data_size, ret=0; 02021 AVFrame picture; 02022 02023 if(!st->codec->codec){ 02024 codec = avcodec_find_decoder(st->codec->codec_id); 02025 if (!codec) 02026 return -1; 02027 ret = avcodec_open(st->codec, codec); 02028 if (ret < 0) 02029 return ret; 02030 } 02031 02032 if(!has_codec_parameters(st->codec)){ 02033 switch(st->codec->codec_type) { 02034 case AVMEDIA_TYPE_VIDEO: 02035 avcodec_get_frame_defaults(&picture); 02036 ret = avcodec_decode_video2(st->codec, &picture, 02037 &got_picture, avpkt); 02038 break; 02039 case AVMEDIA_TYPE_AUDIO: 02040 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE); 02041 samples = av_malloc(data_size); 02042 if (!samples) 02043 goto fail; 02044 ret = avcodec_decode_audio3(st->codec, samples, 02045 &data_size, avpkt); 02046 av_free(samples); 02047 break; 02048 default: 02049 break; 02050 } 02051 } 02052 fail: 02053 return ret; 02054 } 02055 02056 unsigned int ff_codec_get_tag(const AVCodecTag *tags, int id) 02057 { 02058 while (tags->id != CODEC_ID_NONE) { 02059 if (tags->id == id) 02060 return tags->tag; 02061 tags++; 02062 } 02063 return 0; 02064 } 02065 02066 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag) 02067 { 02068 int i; 02069 for(i=0; tags[i].id != CODEC_ID_NONE;i++) { 02070 if(tag == tags[i].tag) 02071 return tags[i].id; 02072 } 02073 for(i=0; tags[i].id != CODEC_ID_NONE; i++) { 02074 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF) 02075 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF) 02076 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF) 02077 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF)) 02078 return tags[i].id; 02079 } 02080 return CODEC_ID_NONE; 02081 } 02082 02083 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id) 02084 { 02085 int i; 02086 for(i=0; tags && tags[i]; i++){ 02087 int tag= ff_codec_get_tag(tags[i], id); 02088 if(tag) return tag; 02089 } 02090 return 0; 02091 } 02092 02093 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag) 02094 { 02095 int i; 02096 for(i=0; tags && tags[i]; i++){ 02097 enum CodecID id= ff_codec_get_id(tags[i], tag); 02098 if(id!=CODEC_ID_NONE) return id; 02099 } 02100 return CODEC_ID_NONE; 02101 } 02102 02103 static void compute_chapters_end(AVFormatContext *s) 02104 { 02105 unsigned int i; 02106 02107 for (i=0; i+1<s->nb_chapters; i++) 02108 if (s->chapters[i]->end == AV_NOPTS_VALUE) { 02109 assert(s->chapters[i]->start <= s->chapters[i+1]->start); 02110 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base)); 02111 s->chapters[i]->end = s->chapters[i+1]->start; 02112 } 02113 02114 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) { 02115 assert(s->start_time != AV_NOPTS_VALUE); 02116 assert(s->duration > 0); 02117 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration, 02118 AV_TIME_BASE_Q, 02119 s->chapters[i]->time_base); 02120 } 02121 } 02122 02123 #define MAX_STD_TIMEBASES (60*12+5) 02124 static int get_std_framerate(int i){ 02125 if(i<60*12) return i*1001; 02126 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12; 02127 } 02128 02129 /* 02130 * Is the time base unreliable. 02131 * This is a heuristic to balance between quick acceptance of the values in 02132 * the headers vs. some extra checks. 02133 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps. 02134 * MPEG-2 commonly misuses field repeat flags to store different framerates. 02135 * And there are "variable" fps files this needs to detect as well. 02136 */ 02137 static int tb_unreliable(AVCodecContext *c){ 02138 if( c->time_base.den >= 101L*c->time_base.num 02139 || c->time_base.den < 5L*c->time_base.num 02140 /* || c->codec_tag == AV_RL32("DIVX") 02141 || c->codec_tag == AV_RL32("XVID")*/ 02142 || c->codec_id == CODEC_ID_MPEG2VIDEO 02143 || c->codec_id == CODEC_ID_H264 02144 ) 02145 return 1; 02146 return 0; 02147 } 02148 02149 int av_find_stream_info(AVFormatContext *ic) 02150 { 02151 int i, count, ret, read_size, j; 02152 AVStream *st; 02153 AVPacket pkt1, *pkt; 02154 int64_t last_dts[MAX_STREAMS]; 02155 int64_t duration_gcd[MAX_STREAMS]={0}; 02156 int duration_count[MAX_STREAMS]={0}; 02157 double (*duration_error)[MAX_STD_TIMEBASES]; 02158 int64_t old_offset = url_ftell(ic->pb); 02159 int64_t codec_info_duration[MAX_STREAMS]={0}; 02160 02161 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error)); 02162 if (!duration_error) return AVERROR(ENOMEM); 02163 02164 for(i=0;i<ic->nb_streams;i++) { 02165 st = ic->streams[i]; 02166 if (st->codec->codec_id == CODEC_ID_AAC) { 02167 st->codec->sample_rate = 0; 02168 st->codec->frame_size = 0; 02169 st->codec->channels = 0; 02170 } 02171 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){ 02172 /* if(!st->time_base.num) 02173 st->time_base= */ 02174 if(!st->codec->time_base.num) 02175 st->codec->time_base= st->time_base; 02176 } 02177 //only for the split stuff 02178 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) { 02179 st->parser = av_parser_init(st->codec->codec_id); 02180 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){ 02181 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; 02182 } 02183 } 02184 assert(!st->codec->codec); 02185 //try to just open decoders, in case this is enough to get parameters 02186 if(!has_codec_parameters(st->codec)){ 02187 AVCodec *codec = avcodec_find_decoder(st->codec->codec_id); 02188 if (codec) 02189 avcodec_open(st->codec, codec); 02190 } 02191 } 02192 02193 for(i=0;i<MAX_STREAMS;i++){ 02194 last_dts[i]= AV_NOPTS_VALUE; 02195 } 02196 02197 count = 0; 02198 read_size = 0; 02199 for(;;) { 02200 if(url_interrupt_cb()){ 02201 ret= AVERROR(EINTR); 02202 av_log(ic, AV_LOG_DEBUG, "interrupted\n"); 02203 break; 02204 } 02205 02206 /* check if one codec still needs to be handled */ 02207 for(i=0;i<ic->nb_streams;i++) { 02208 int fps_analyze_framecount = 20; 02209 02210 st = ic->streams[i]; 02211 if (!has_codec_parameters(st->codec)) 02212 break; 02213 /* if the timebase is coarse (like the usual millisecond precision 02214 of mkv), we need to analyze more frames to reliably arrive at 02215 the correct fps */ 02216 if (av_q2d(st->time_base) > 0.0005) 02217 fps_analyze_framecount *= 2; 02218 /* variable fps and no guess at the real fps */ 02219 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num) 02220 && duration_count[i] < fps_analyze_framecount 02221 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO) 02222 break; 02223 if(st->parser && st->parser->parser->split && !st->codec->extradata) 02224 break; 02225 if(st->first_dts == AV_NOPTS_VALUE) 02226 break; 02227 } 02228 if (i == ic->nb_streams) { 02229 /* NOTE: if the format has no header, then we need to read 02230 some packets to get most of the streams, so we cannot 02231 stop here */ 02232 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) { 02233 /* if we found the info for all the codecs, we can stop */ 02234 ret = count; 02235 av_log(ic, AV_LOG_DEBUG, "All info found\n"); 02236 break; 02237 } 02238 } 02239 /* we did not get all the codec info, but we read too much data */ 02240 if (read_size >= ic->probesize) { 02241 ret = count; 02242 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize); 02243 break; 02244 } 02245 02246 /* NOTE: a new stream can be added there if no header in file 02247 (AVFMTCTX_NOHEADER) */ 02248 ret = av_read_frame_internal(ic, &pkt1); 02249 if(ret == AVERROR(EAGAIN)) 02250 continue; 02251 if (ret < 0) { 02252 /* EOF or error */ 02253 ret = -1; /* we could not have all the codec parameters before EOF */ 02254 for(i=0;i<ic->nb_streams;i++) { 02255 st = ic->streams[i]; 02256 if (!has_codec_parameters(st->codec)){ 02257 char buf[256]; 02258 avcodec_string(buf, sizeof(buf), st->codec, 0); 02259 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf); 02260 } else { 02261 ret = 0; 02262 } 02263 } 02264 break; 02265 } 02266 02267 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end); 02268 if(av_dup_packet(pkt) < 0) { 02269 av_free(duration_error); 02270 return AVERROR(ENOMEM); 02271 } 02272 02273 read_size += pkt->size; 02274 02275 st = ic->streams[pkt->stream_index]; 02276 if(st->codec_info_nb_frames>1) { 02277 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration){ 02278 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n"); 02279 break; 02280 } 02281 codec_info_duration[st->index] += pkt->duration; 02282 } 02283 st->codec_info_nb_frames++; 02284 02285 { 02286 int index= pkt->stream_index; 02287 int64_t last= last_dts[index]; 02288 int64_t duration= pkt->dts - last; 02289 02290 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){ 02291 double dur= duration * av_q2d(st->time_base); 02292 02293 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO) 02294 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur); 02295 if(duration_count[index] < 2) 02296 memset(duration_error[index], 0, sizeof(*duration_error)); 02297 for(i=1; i<MAX_STD_TIMEBASES; i++){ 02298 int framerate= get_std_framerate(i); 02299 int ticks= lrintf(dur*framerate/(1001*12)); 02300 double error= dur - ticks*1001*12/(double)framerate; 02301 duration_error[index][i] += error*error; 02302 } 02303 duration_count[index]++; 02304 // ignore the first 4 values, they might have some random jitter 02305 if (duration_count[index] > 3) 02306 duration_gcd[index] = av_gcd(duration_gcd[index], duration); 02307 } 02308 if(last == AV_NOPTS_VALUE || duration_count[index]<=1) 02309 last_dts[pkt->stream_index]= pkt->dts; 02310 } 02311 if(st->parser && st->parser->parser->split && !st->codec->extradata){ 02312 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size); 02313 if(i){ 02314 st->codec->extradata_size= i; 02315 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); 02316 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size); 02317 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE); 02318 } 02319 } 02320 02321 /* if still no information, we try to open the codec and to 02322 decompress the frame. We try to avoid that in most cases as 02323 it takes longer and uses more memory. For MPEG-4, we need to 02324 decompress for QuickTime. */ 02325 if (!has_codec_parameters(st->codec)) 02326 try_decode_frame(st, pkt); 02327 02328 count++; 02329 } 02330 02331 // close codecs which were opened in try_decode_frame() 02332 for(i=0;i<ic->nb_streams;i++) { 02333 st = ic->streams[i]; 02334 if(st->codec->codec) 02335 avcodec_close(st->codec); 02336 } 02337 for(i=0;i<ic->nb_streams;i++) { 02338 st = ic->streams[i]; 02339 if(st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && codec_info_duration[i]) 02340 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den, 02341 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den, 02342 codec_info_duration[i] *(int64_t)st->time_base.num, 60000); 02343 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 02344 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample) 02345 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt); 02346 02347 // the check for tb_unreliable() is not completely correct, since this is not about handling 02348 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g. 02349 // ipmovie.c produces. 02350 if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1 && !st->r_frame_rate.num) 02351 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX); 02352 if(duration_count[i] && !st->r_frame_rate.num 02353 && tb_unreliable(st->codec) /*&& 02354 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ... 02355 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){ 02356 int num = 0; 02357 double best_error= 2*av_q2d(st->time_base); 02358 best_error= best_error*best_error*duration_count[i]*1000*12*30; 02359 02360 for(j=1; j<MAX_STD_TIMEBASES; j++){ 02361 double error= duration_error[i][j] * get_std_framerate(j); 02362 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO) 02363 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error); 02364 if(error < best_error){ 02365 best_error= error; 02366 num = get_std_framerate(j); 02367 } 02368 } 02369 // do not increase frame rate by more than 1 % in order to match a standard rate. 02370 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate))) 02371 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX); 02372 } 02373 02374 if (!st->r_frame_rate.num){ 02375 if( st->codec->time_base.den * (int64_t)st->time_base.num 02376 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){ 02377 st->r_frame_rate.num = st->codec->time_base.den; 02378 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame; 02379 }else{ 02380 st->r_frame_rate.num = st->time_base.den; 02381 st->r_frame_rate.den = st->time_base.num; 02382 } 02383 } 02384 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { 02385 if(!st->codec->bits_per_coded_sample) 02386 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id); 02387 } 02388 } 02389 02390 av_estimate_timings(ic, old_offset); 02391 02392 compute_chapters_end(ic); 02393 02394 #if 0 02395 /* correct DTS for B-frame streams with no timestamps */ 02396 for(i=0;i<ic->nb_streams;i++) { 02397 st = ic->streams[i]; 02398 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 02399 if(b-frames){ 02400 ppktl = &ic->packet_buffer; 02401 while(ppkt1){ 02402 if(ppkt1->stream_index != i) 02403 continue; 02404 if(ppkt1->pkt->dts < 0) 02405 break; 02406 if(ppkt1->pkt->pts != AV_NOPTS_VALUE) 02407 break; 02408 ppkt1->pkt->dts -= delta; 02409 ppkt1= ppkt1->next; 02410 } 02411 if(ppkt1) 02412 continue; 02413 st->cur_dts -= delta; 02414 } 02415 } 02416 } 02417 #endif 02418 02419 av_free(duration_error); 02420 02421 return ret; 02422 } 02423 02424 /*******************************************************/ 02425 02426 int av_read_play(AVFormatContext *s) 02427 { 02428 if (s->iformat->read_play) 02429 return s->iformat->read_play(s); 02430 if (s->pb) 02431 return av_url_read_fpause(s->pb, 0); 02432 return AVERROR(ENOSYS); 02433 } 02434 02435 int av_read_pause(AVFormatContext *s) 02436 { 02437 if (s->iformat->read_pause) 02438 return s->iformat->read_pause(s); 02439 if (s->pb) 02440 return av_url_read_fpause(s->pb, 1); 02441 return AVERROR(ENOSYS); 02442 } 02443 02444 void av_close_input_stream(AVFormatContext *s) 02445 { 02446 int i; 02447 AVStream *st; 02448 02449 if (s->iformat->read_close) 02450 s->iformat->read_close(s); 02451 for(i=0;i<s->nb_streams;i++) { 02452 /* free all data in a stream component */ 02453 st = s->streams[i]; 02454 if (st->parser) { 02455 av_parser_close(st->parser); 02456 av_free_packet(&st->cur_pkt); 02457 } 02458 av_metadata_free(&st->metadata); 02459 av_free(st->index_entries); 02460 av_free(st->codec->extradata); 02461 av_free(st->codec); 02462 #if LIBAVFORMAT_VERSION_INT < (53<<16) 02463 av_free(st->filename); 02464 #endif 02465 av_free(st->priv_data); 02466 av_free(st); 02467 } 02468 for(i=s->nb_programs-1; i>=0; i--) { 02469 #if LIBAVFORMAT_VERSION_INT < (53<<16) 02470 av_freep(&s->programs[i]->provider_name); 02471 av_freep(&s->programs[i]->name); 02472 #endif 02473 av_metadata_free(&s->programs[i]->metadata); 02474 av_freep(&s->programs[i]->stream_index); 02475 av_freep(&s->programs[i]); 02476 } 02477 av_freep(&s->programs); 02478 flush_packet_queue(s); 02479 av_freep(&s->priv_data); 02480 while(s->nb_chapters--) { 02481 #if LIBAVFORMAT_VERSION_INT < (53<<16) 02482 av_free(s->chapters[s->nb_chapters]->title); 02483 #endif 02484 av_metadata_free(&s->chapters[s->nb_chapters]->metadata); 02485 av_free(s->chapters[s->nb_chapters]); 02486 } 02487 av_freep(&s->chapters); 02488 av_metadata_free(&s->metadata); 02489 av_free(s); 02490 } 02491 02492 void av_close_input_file(AVFormatContext *s) 02493 { 02494 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb; 02495 av_close_input_stream(s); 02496 if (pb) 02497 url_fclose(pb); 02498 } 02499 02500 AVStream *av_new_stream(AVFormatContext *s, int id) 02501 { 02502 AVStream *st; 02503 int i; 02504 02505 if (s->nb_streams >= MAX_STREAMS) 02506 return NULL; 02507 02508 st = av_mallocz(sizeof(AVStream)); 02509 if (!st) 02510 return NULL; 02511 02512 st->codec= avcodec_alloc_context(); 02513 if (s->iformat) { 02514 /* no default bitrate if decoding */ 02515 st->codec->bit_rate = 0; 02516 } 02517 st->index = s->nb_streams; 02518 st->id = id; 02519 st->start_time = AV_NOPTS_VALUE; 02520 st->duration = AV_NOPTS_VALUE; 02521 /* we set the current DTS to 0 so that formats without any timestamps 02522 but durations get some timestamps, formats with some unknown 02523 timestamps have their first few packets buffered and the 02524 timestamps corrected before they are returned to the user */ 02525 st->cur_dts = 0; 02526 st->first_dts = AV_NOPTS_VALUE; 02527 st->probe_packets = MAX_PROBE_PACKETS; 02528 02529 /* default pts setting is MPEG-like */ 02530 av_set_pts_info(st, 33, 1, 90000); 02531 st->last_IP_pts = AV_NOPTS_VALUE; 02532 for(i=0; i<MAX_REORDER_DELAY+1; i++) 02533 st->pts_buffer[i]= AV_NOPTS_VALUE; 02534 st->reference_dts = AV_NOPTS_VALUE; 02535 02536 st->sample_aspect_ratio = (AVRational){0,1}; 02537 02538 s->streams[s->nb_streams++] = st; 02539 return st; 02540 } 02541 02542 AVProgram *av_new_program(AVFormatContext *ac, int id) 02543 { 02544 AVProgram *program=NULL; 02545 int i; 02546 02547 #ifdef DEBUG_SI 02548 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id); 02549 #endif 02550 02551 for(i=0; i<ac->nb_programs; i++) 02552 if(ac->programs[i]->id == id) 02553 program = ac->programs[i]; 02554 02555 if(!program){ 02556 program = av_mallocz(sizeof(AVProgram)); 02557 if (!program) 02558 return NULL; 02559 dynarray_add(&ac->programs, &ac->nb_programs, program); 02560 program->discard = AVDISCARD_NONE; 02561 } 02562 program->id = id; 02563 02564 return program; 02565 } 02566 02567 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title) 02568 { 02569 AVChapter *chapter = NULL; 02570 int i; 02571 02572 for(i=0; i<s->nb_chapters; i++) 02573 if(s->chapters[i]->id == id) 02574 chapter = s->chapters[i]; 02575 02576 if(!chapter){ 02577 chapter= av_mallocz(sizeof(AVChapter)); 02578 if(!chapter) 02579 return NULL; 02580 dynarray_add(&s->chapters, &s->nb_chapters, chapter); 02581 } 02582 #if LIBAVFORMAT_VERSION_INT < (53<<16) 02583 av_free(chapter->title); 02584 #endif 02585 av_metadata_set2(&chapter->metadata, "title", title, 0); 02586 chapter->id = id; 02587 chapter->time_base= time_base; 02588 chapter->start = start; 02589 chapter->end = end; 02590 02591 return chapter; 02592 } 02593 02594 /************************************************************/ 02595 /* output media file */ 02596 02597 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap) 02598 { 02599 int ret; 02600 02601 if (s->oformat->priv_data_size > 0) { 02602 s->priv_data = av_mallocz(s->oformat->priv_data_size); 02603 if (!s->priv_data) 02604 return AVERROR(ENOMEM); 02605 } else 02606 s->priv_data = NULL; 02607 02608 if (s->oformat->set_parameters) { 02609 ret = s->oformat->set_parameters(s, ap); 02610 if (ret < 0) 02611 return ret; 02612 } 02613 return 0; 02614 } 02615 02616 int av_write_header(AVFormatContext *s) 02617 { 02618 int ret, i; 02619 AVStream *st; 02620 02621 // some sanity checks 02622 if (s->nb_streams == 0) { 02623 av_log(s, AV_LOG_ERROR, "no streams\n"); 02624 return -1; 02625 } 02626 02627 for(i=0;i<s->nb_streams;i++) { 02628 st = s->streams[i]; 02629 02630 switch (st->codec->codec_type) { 02631 case AVMEDIA_TYPE_AUDIO: 02632 if(st->codec->sample_rate<=0){ 02633 av_log(s, AV_LOG_ERROR, "sample rate not set\n"); 02634 return -1; 02635 } 02636 if(!st->codec->block_align) 02637 st->codec->block_align = st->codec->channels * 02638 av_get_bits_per_sample(st->codec->codec_id) >> 3; 02639 break; 02640 case AVMEDIA_TYPE_VIDEO: 02641 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too? 02642 av_log(s, AV_LOG_ERROR, "time base not set\n"); 02643 return -1; 02644 } 02645 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){ 02646 av_log(s, AV_LOG_ERROR, "dimensions not set\n"); 02647 return -1; 02648 } 02649 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){ 02650 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n"); 02651 return -1; 02652 } 02653 break; 02654 } 02655 02656 if(s->oformat->codec_tag){ 02657 if(st->codec->codec_tag){ 02658 //FIXME 02659 //check that tag + id is in the table 02660 //if neither is in the table -> OK 02661 //if tag is in the table with another id -> FAIL 02662 //if id is in the table with another tag -> FAIL unless strict < ? 02663 }else 02664 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id); 02665 } 02666 02667 if(s->oformat->flags & AVFMT_GLOBALHEADER && 02668 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER)) 02669 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i); 02670 } 02671 02672 if (!s->priv_data && s->oformat->priv_data_size > 0) { 02673 s->priv_data = av_mallocz(s->oformat->priv_data_size); 02674 if (!s->priv_data) 02675 return AVERROR(ENOMEM); 02676 } 02677 02678 #if LIBAVFORMAT_VERSION_MAJOR < 53 02679 ff_metadata_mux_compat(s); 02680 #endif 02681 02682 /* set muxer identification string */ 02683 if (!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) { 02684 AVMetadata *m; 02685 AVMetadataTag *t; 02686 02687 if (!(m = av_mallocz(sizeof(AVMetadata)))) 02688 return AVERROR(ENOMEM); 02689 av_metadata_set2(&m, "encoder", LIBAVFORMAT_IDENT, 0); 02690 metadata_conv(&m, s->oformat->metadata_conv, NULL); 02691 if ((t = av_metadata_get(m, "", NULL, AV_METADATA_IGNORE_SUFFIX))) 02692 av_metadata_set2(&s->metadata, t->key, t->value, 0); 02693 av_metadata_free(&m); 02694 } 02695 02696 if(s->oformat->write_header){ 02697 ret = s->oformat->write_header(s); 02698 if (ret < 0) 02699 return ret; 02700 } 02701 02702 /* init PTS generation */ 02703 for(i=0;i<s->nb_streams;i++) { 02704 int64_t den = AV_NOPTS_VALUE; 02705 st = s->streams[i]; 02706 02707 switch (st->codec->codec_type) { 02708 case AVMEDIA_TYPE_AUDIO: 02709 den = (int64_t)st->time_base.num * st->codec->sample_rate; 02710 break; 02711 case AVMEDIA_TYPE_VIDEO: 02712 den = (int64_t)st->time_base.num * st->codec->time_base.den; 02713 break; 02714 default: 02715 break; 02716 } 02717 if (den != AV_NOPTS_VALUE) { 02718 if (den <= 0) 02719 return AVERROR_INVALIDDATA; 02720 av_frac_init(&st->pts, 0, 0, den); 02721 } 02722 } 02723 return 0; 02724 } 02725 02726 //FIXME merge with compute_pkt_fields 02727 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){ 02728 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames); 02729 int num, den, frame_size, i; 02730 02731 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index); 02732 02733 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE) 02734 return -1;*/ 02735 02736 /* duration field */ 02737 if (pkt->duration == 0) { 02738 compute_frame_duration(&num, &den, st, NULL, pkt); 02739 if (den && num) { 02740 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num); 02741 } 02742 } 02743 02744 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0) 02745 pkt->pts= pkt->dts; 02746 02747 //XXX/FIXME this is a temporary hack until all encoders output pts 02748 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){ 02749 pkt->dts= 02750 // pkt->pts= st->cur_dts; 02751 pkt->pts= st->pts.val; 02752 } 02753 02754 //calculate dts from pts 02755 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){ 02756 st->pts_buffer[0]= pkt->pts; 02757 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++) 02758 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration; 02759 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++) 02760 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]); 02761 02762 pkt->dts= st->pts_buffer[0]; 02763 } 02764 02765 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){ 02766 av_log(s, AV_LOG_ERROR, 02767 "st:%d error, non monotone timestamps %"PRId64" >= %"PRId64"\n", 02768 st->index, st->cur_dts, pkt->dts); 02769 return -1; 02770 } 02771 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){ 02772 av_log(s, AV_LOG_ERROR, "st:%d error, pts < dts\n", st->index); 02773 return -1; 02774 } 02775 02776 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts); 02777 st->cur_dts= pkt->dts; 02778 st->pts.val= pkt->dts; 02779 02780 /* update pts */ 02781 switch (st->codec->codec_type) { 02782 case AVMEDIA_TYPE_AUDIO: 02783 frame_size = get_audio_frame_size(st->codec, pkt->size); 02784 02785 /* HACK/FIXME, we skip the initial 0 size packets as they are most 02786 likely equal to the encoder delay, but it would be better if we 02787 had the real timestamps from the encoder */ 02788 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) { 02789 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size); 02790 } 02791 break; 02792 case AVMEDIA_TYPE_VIDEO: 02793 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num); 02794 break; 02795 default: 02796 break; 02797 } 02798 return 0; 02799 } 02800 02801 int av_write_frame(AVFormatContext *s, AVPacket *pkt) 02802 { 02803 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt); 02804 02805 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) 02806 return ret; 02807 02808 ret= s->oformat->write_packet(s, pkt); 02809 if(!ret) 02810 ret= url_ferror(s->pb); 02811 return ret; 02812 } 02813 02814 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt, 02815 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *)) 02816 { 02817 AVPacketList **next_point, *this_pktl; 02818 02819 this_pktl = av_mallocz(sizeof(AVPacketList)); 02820 this_pktl->pkt= *pkt; 02821 pkt->destruct= NULL; // do not free original but only the copy 02822 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory 02823 02824 if(s->streams[pkt->stream_index]->last_in_packet_buffer){ 02825 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next); 02826 }else 02827 next_point = &s->packet_buffer; 02828 02829 if(*next_point){ 02830 if(compare(s, &s->packet_buffer_end->pkt, pkt)){ 02831 while(!compare(s, &(*next_point)->pkt, pkt)){ 02832 next_point= &(*next_point)->next; 02833 } 02834 goto next_non_null; 02835 }else{ 02836 next_point = &(s->packet_buffer_end->next); 02837 } 02838 } 02839 assert(!*next_point); 02840 02841 s->packet_buffer_end= this_pktl; 02842 next_non_null: 02843 02844 this_pktl->next= *next_point; 02845 02846 s->streams[pkt->stream_index]->last_in_packet_buffer= 02847 *next_point= this_pktl; 02848 } 02849 02850 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt) 02851 { 02852 AVStream *st = s->streams[ pkt ->stream_index]; 02853 AVStream *st2= s->streams[ next->stream_index]; 02854 int64_t a= st2->time_base.num * (int64_t)st ->time_base.den; 02855 int64_t b= st ->time_base.num * (int64_t)st2->time_base.den; 02856 return av_rescale_rnd(pkt->dts, b, a, AV_ROUND_DOWN) < next->dts; 02857 } 02858 02859 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){ 02860 AVPacketList *pktl; 02861 int stream_count=0; 02862 int i; 02863 02864 if(pkt){ 02865 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts); 02866 } 02867 02868 for(i=0; i < s->nb_streams; i++) 02869 stream_count+= !!s->streams[i]->last_in_packet_buffer; 02870 02871 if(stream_count && (s->nb_streams == stream_count || flush)){ 02872 pktl= s->packet_buffer; 02873 *out= pktl->pkt; 02874 02875 s->packet_buffer= pktl->next; 02876 if(!s->packet_buffer) 02877 s->packet_buffer_end= NULL; 02878 02879 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl) 02880 s->streams[out->stream_index]->last_in_packet_buffer= NULL; 02881 av_freep(&pktl); 02882 return 1; 02883 }else{ 02884 av_init_packet(out); 02885 return 0; 02886 } 02887 } 02888 02898 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){ 02899 if(s->oformat->interleave_packet) 02900 return s->oformat->interleave_packet(s, out, in, flush); 02901 else 02902 return av_interleave_packet_per_dts(s, out, in, flush); 02903 } 02904 02905 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){ 02906 AVStream *st= s->streams[ pkt->stream_index]; 02907 02908 //FIXME/XXX/HACK drop zero sized packets 02909 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0) 02910 return 0; 02911 02912 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts); 02913 if(compute_pkt_fields2(s, st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) 02914 return -1; 02915 02916 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) 02917 return -1; 02918 02919 for(;;){ 02920 AVPacket opkt; 02921 int ret= av_interleave_packet(s, &opkt, pkt, 0); 02922 if(ret<=0) //FIXME cleanup needed for ret<0 ? 02923 return ret; 02924 02925 ret= s->oformat->write_packet(s, &opkt); 02926 02927 av_free_packet(&opkt); 02928 pkt= NULL; 02929 02930 if(ret<0) 02931 return ret; 02932 if(url_ferror(s->pb)) 02933 return url_ferror(s->pb); 02934 } 02935 } 02936 02937 int av_write_trailer(AVFormatContext *s) 02938 { 02939 int ret, i; 02940 02941 for(;;){ 02942 AVPacket pkt; 02943 ret= av_interleave_packet(s, &pkt, NULL, 1); 02944 if(ret<0) //FIXME cleanup needed for ret<0 ? 02945 goto fail; 02946 if(!ret) 02947 break; 02948 02949 ret= s->oformat->write_packet(s, &pkt); 02950 02951 av_free_packet(&pkt); 02952 02953 if(ret<0) 02954 goto fail; 02955 if(url_ferror(s->pb)) 02956 goto fail; 02957 } 02958 02959 if(s->oformat->write_trailer) 02960 ret = s->oformat->write_trailer(s); 02961 fail: 02962 if(ret == 0) 02963 ret=url_ferror(s->pb); 02964 for(i=0;i<s->nb_streams;i++) { 02965 av_freep(&s->streams[i]->priv_data); 02966 av_freep(&s->streams[i]->index_entries); 02967 } 02968 av_freep(&s->priv_data); 02969 return ret; 02970 } 02971 02972 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx) 02973 { 02974 int i, j; 02975 AVProgram *program=NULL; 02976 void *tmp; 02977 02978 if (idx >= ac->nb_streams) { 02979 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx); 02980 return; 02981 } 02982 02983 for(i=0; i<ac->nb_programs; i++){ 02984 if(ac->programs[i]->id != progid) 02985 continue; 02986 program = ac->programs[i]; 02987 for(j=0; j<program->nb_stream_indexes; j++) 02988 if(program->stream_index[j] == idx) 02989 return; 02990 02991 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1)); 02992 if(!tmp) 02993 return; 02994 program->stream_index = tmp; 02995 program->stream_index[program->nb_stream_indexes++] = idx; 02996 return; 02997 } 02998 } 02999 03000 static void print_fps(double d, const char *postfix){ 03001 uint64_t v= lrintf(d*100); 03002 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix); 03003 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix); 03004 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix); 03005 } 03006 03007 static void dump_metadata(void *ctx, AVMetadata *m, const char *indent) 03008 { 03009 if(m && !(m->count == 1 && av_metadata_get(m, "language", NULL, 0))){ 03010 AVMetadataTag *tag=NULL; 03011 03012 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent); 03013 while((tag=av_metadata_get(m, "", tag, AV_METADATA_IGNORE_SUFFIX))) { 03014 if(strcmp("language", tag->key)) 03015 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value); 03016 } 03017 } 03018 } 03019 03020 /* "user interface" functions */ 03021 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output) 03022 { 03023 char buf[256]; 03024 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags); 03025 AVStream *st = ic->streams[i]; 03026 int g = av_gcd(st->time_base.num, st->time_base.den); 03027 AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0); 03028 avcodec_string(buf, sizeof(buf), st->codec, is_output); 03029 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i); 03030 /* the pid is an important information, so we display it */ 03031 /* XXX: add a generic system */ 03032 if (flags & AVFMT_SHOW_IDS) 03033 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id); 03034 if (lang) 03035 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value); 03036 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g); 03037 av_log(NULL, AV_LOG_INFO, ": %s", buf); 03038 if (st->sample_aspect_ratio.num && // default 03039 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) { 03040 AVRational display_aspect_ratio; 03041 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, 03042 st->codec->width*st->sample_aspect_ratio.num, 03043 st->codec->height*st->sample_aspect_ratio.den, 03044 1024*1024); 03045 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d", 03046 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den, 03047 display_aspect_ratio.num, display_aspect_ratio.den); 03048 } 03049 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){ 03050 if(st->avg_frame_rate.den && st->avg_frame_rate.num) 03051 print_fps(av_q2d(st->avg_frame_rate), "fps"); 03052 if(st->r_frame_rate.den && st->r_frame_rate.num) 03053 print_fps(av_q2d(st->r_frame_rate), "tbr"); 03054 if(st->time_base.den && st->time_base.num) 03055 print_fps(1/av_q2d(st->time_base), "tbn"); 03056 if(st->codec->time_base.den && st->codec->time_base.num) 03057 print_fps(1/av_q2d(st->codec->time_base), "tbc"); 03058 } 03059 av_log(NULL, AV_LOG_INFO, "\n"); 03060 dump_metadata(NULL, st->metadata, " "); 03061 } 03062 03063 void dump_format(AVFormatContext *ic, 03064 int index, 03065 const char *url, 03066 int is_output) 03067 { 03068 int i; 03069 uint8_t *printed = av_mallocz(ic->nb_streams); 03070 if (ic->nb_streams && !printed) 03071 return; 03072 03073 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n", 03074 is_output ? "Output" : "Input", 03075 index, 03076 is_output ? ic->oformat->name : ic->iformat->name, 03077 is_output ? "to" : "from", url); 03078 dump_metadata(NULL, ic->metadata, " "); 03079 if (!is_output) { 03080 av_log(NULL, AV_LOG_INFO, " Duration: "); 03081 if (ic->duration != AV_NOPTS_VALUE) { 03082 int hours, mins, secs, us; 03083 secs = ic->duration / AV_TIME_BASE; 03084 us = ic->duration % AV_TIME_BASE; 03085 mins = secs / 60; 03086 secs %= 60; 03087 hours = mins / 60; 03088 mins %= 60; 03089 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs, 03090 (100 * us) / AV_TIME_BASE); 03091 } else { 03092 av_log(NULL, AV_LOG_INFO, "N/A"); 03093 } 03094 if (ic->start_time != AV_NOPTS_VALUE) { 03095 int secs, us; 03096 av_log(NULL, AV_LOG_INFO, ", start: "); 03097 secs = ic->start_time / AV_TIME_BASE; 03098 us = ic->start_time % AV_TIME_BASE; 03099 av_log(NULL, AV_LOG_INFO, "%d.%06d", 03100 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE)); 03101 } 03102 av_log(NULL, AV_LOG_INFO, ", bitrate: "); 03103 if (ic->bit_rate) { 03104 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000); 03105 } else { 03106 av_log(NULL, AV_LOG_INFO, "N/A"); 03107 } 03108 av_log(NULL, AV_LOG_INFO, "\n"); 03109 } 03110 for (i = 0; i < ic->nb_chapters; i++) { 03111 AVChapter *ch = ic->chapters[i]; 03112 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i); 03113 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base)); 03114 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base)); 03115 03116 dump_metadata(NULL, ch->metadata, " "); 03117 } 03118 if(ic->nb_programs) { 03119 int j, k, total = 0; 03120 for(j=0; j<ic->nb_programs; j++) { 03121 AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata, 03122 "name", NULL, 0); 03123 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id, 03124 name ? name->value : ""); 03125 dump_metadata(NULL, ic->programs[j]->metadata, " "); 03126 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) { 03127 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output); 03128 printed[ic->programs[j]->stream_index[k]] = 1; 03129 } 03130 total += ic->programs[j]->nb_stream_indexes; 03131 } 03132 if (total < ic->nb_streams) 03133 av_log(NULL, AV_LOG_INFO, " No Program\n"); 03134 } 03135 for(i=0;i<ic->nb_streams;i++) 03136 if (!printed[i]) 03137 dump_stream_format(ic, i, index, is_output); 03138 03139 av_free(printed); 03140 } 03141 03142 #if LIBAVFORMAT_VERSION_MAJOR < 53 03143 int parse_image_size(int *width_ptr, int *height_ptr, const char *str) 03144 { 03145 return av_parse_video_frame_size(width_ptr, height_ptr, str); 03146 } 03147 03148 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg) 03149 { 03150 AVRational frame_rate; 03151 int ret = av_parse_video_frame_rate(&frame_rate, arg); 03152 *frame_rate_num= frame_rate.num; 03153 *frame_rate_den= frame_rate.den; 03154 return ret; 03155 } 03156 #endif 03157 03158 int64_t av_gettime(void) 03159 { 03160 struct timeval tv; 03161 gettimeofday(&tv,NULL); 03162 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec; 03163 } 03164 03165 uint64_t ff_ntp_time(void) 03166 { 03167 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US; 03168 } 03169 03170 int64_t parse_date(const char *datestr, int duration) 03171 { 03172 const char *p; 03173 int64_t t; 03174 struct tm dt; 03175 int i; 03176 static const char * const date_fmt[] = { 03177 "%Y-%m-%d", 03178 "%Y%m%d", 03179 }; 03180 static const char * const time_fmt[] = { 03181 "%H:%M:%S", 03182 "%H%M%S", 03183 }; 03184 const char *q; 03185 int is_utc, len; 03186 char lastch; 03187 int negative = 0; 03188 03189 #undef time 03190 time_t now = time(0); 03191 03192 len = strlen(datestr); 03193 if (len > 0) 03194 lastch = datestr[len - 1]; 03195 else 03196 lastch = '\0'; 03197 is_utc = (lastch == 'z' || lastch == 'Z'); 03198 03199 memset(&dt, 0, sizeof(dt)); 03200 03201 p = datestr; 03202 q = NULL; 03203 if (!duration) { 03204 if (!strncasecmp(datestr, "now", len)) 03205 return (int64_t) now * 1000000; 03206 03207 /* parse the year-month-day part */ 03208 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) { 03209 q = small_strptime(p, date_fmt[i], &dt); 03210 if (q) { 03211 break; 03212 } 03213 } 03214 03215 /* if the year-month-day part is missing, then take the 03216 * current year-month-day time */ 03217 if (!q) { 03218 if (is_utc) { 03219 dt = *gmtime(&now); 03220 } else { 03221 dt = *localtime(&now); 03222 } 03223 dt.tm_hour = dt.tm_min = dt.tm_sec = 0; 03224 } else { 03225 p = q; 03226 } 03227 03228 if (*p == 'T' || *p == 't' || *p == ' ') 03229 p++; 03230 03231 /* parse the hour-minute-second part */ 03232 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) { 03233 q = small_strptime(p, time_fmt[i], &dt); 03234 if (q) { 03235 break; 03236 } 03237 } 03238 } else { 03239 /* parse datestr as a duration */ 03240 if (p[0] == '-') { 03241 negative = 1; 03242 ++p; 03243 } 03244 /* parse datestr as HH:MM:SS */ 03245 q = small_strptime(p, time_fmt[0], &dt); 03246 if (!q) { 03247 /* parse datestr as S+ */ 03248 dt.tm_sec = strtol(p, (char **)&q, 10); 03249 if (q == p) 03250 /* the parsing didn't succeed */ 03251 return INT64_MIN; 03252 dt.tm_min = 0; 03253 dt.tm_hour = 0; 03254 } 03255 } 03256 03257 /* Now we have all the fields that we can get */ 03258 if (!q) { 03259 return INT64_MIN; 03260 } 03261 03262 if (duration) { 03263 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec; 03264 } else { 03265 dt.tm_isdst = -1; /* unknown */ 03266 if (is_utc) { 03267 t = mktimegm(&dt); 03268 } else { 03269 t = mktime(&dt); 03270 } 03271 } 03272 03273 t *= 1000000; 03274 03275 /* parse the .m... part */ 03276 if (*q == '.') { 03277 int val, n; 03278 q++; 03279 for (val = 0, n = 100000; n >= 1; n /= 10, q++) { 03280 if (!isdigit(*q)) 03281 break; 03282 val += n * (*q - '0'); 03283 } 03284 t += val; 03285 } 03286 return negative ? -t : t; 03287 } 03288 03289 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info) 03290 { 03291 const char *p; 03292 char tag[128], *q; 03293 03294 p = info; 03295 if (*p == '?') 03296 p++; 03297 for(;;) { 03298 q = tag; 03299 while (*p != '\0' && *p != '=' && *p != '&') { 03300 if ((q - tag) < sizeof(tag) - 1) 03301 *q++ = *p; 03302 p++; 03303 } 03304 *q = '\0'; 03305 q = arg; 03306 if (*p == '=') { 03307 p++; 03308 while (*p != '&' && *p != '\0') { 03309 if ((q - arg) < arg_size - 1) { 03310 if (*p == '+') 03311 *q++ = ' '; 03312 else 03313 *q++ = *p; 03314 } 03315 p++; 03316 } 03317 *q = '\0'; 03318 } 03319 if (!strcmp(tag, tag1)) 03320 return 1; 03321 if (*p != '&') 03322 break; 03323 p++; 03324 } 03325 return 0; 03326 } 03327 03328 int av_get_frame_filename(char *buf, int buf_size, 03329 const char *path, int number) 03330 { 03331 const char *p; 03332 char *q, buf1[20], c; 03333 int nd, len, percentd_found; 03334 03335 q = buf; 03336 p = path; 03337 percentd_found = 0; 03338 for(;;) { 03339 c = *p++; 03340 if (c == '\0') 03341 break; 03342 if (c == '%') { 03343 do { 03344 nd = 0; 03345 while (isdigit(*p)) { 03346 nd = nd * 10 + *p++ - '0'; 03347 } 03348 c = *p++; 03349 } while (isdigit(c)); 03350 03351 switch(c) { 03352 case '%': 03353 goto addchar; 03354 case 'd': 03355 if (percentd_found) 03356 goto fail; 03357 percentd_found = 1; 03358 snprintf(buf1, sizeof(buf1), "%0*d", nd, number); 03359 len = strlen(buf1); 03360 if ((q - buf + len) > buf_size - 1) 03361 goto fail; 03362 memcpy(q, buf1, len); 03363 q += len; 03364 break; 03365 default: 03366 goto fail; 03367 } 03368 } else { 03369 addchar: 03370 if ((q - buf) < buf_size - 1) 03371 *q++ = c; 03372 } 03373 } 03374 if (!percentd_found) 03375 goto fail; 03376 *q = '\0'; 03377 return 0; 03378 fail: 03379 *q = '\0'; 03380 return -1; 03381 } 03382 03383 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size) 03384 { 03385 int len, i, j, c; 03386 #undef fprintf 03387 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0) 03388 03389 for(i=0;i<size;i+=16) { 03390 len = size - i; 03391 if (len > 16) 03392 len = 16; 03393 PRINT("%08x ", i); 03394 for(j=0;j<16;j++) { 03395 if (j < len) 03396 PRINT(" %02x", buf[i+j]); 03397 else 03398 PRINT(" "); 03399 } 03400 PRINT(" "); 03401 for(j=0;j<len;j++) { 03402 c = buf[i+j]; 03403 if (c < ' ' || c > '~') 03404 c = '.'; 03405 PRINT("%c", c); 03406 } 03407 PRINT("\n"); 03408 } 03409 #undef PRINT 03410 } 03411 03412 void av_hex_dump(FILE *f, uint8_t *buf, int size) 03413 { 03414 hex_dump_internal(NULL, f, 0, buf, size); 03415 } 03416 03417 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size) 03418 { 03419 hex_dump_internal(avcl, NULL, level, buf, size); 03420 } 03421 03422 //FIXME needs to know the time_base 03423 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload) 03424 { 03425 #undef fprintf 03426 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0) 03427 PRINT("stream #%d:\n", pkt->stream_index); 03428 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0)); 03429 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE); 03430 /* DTS is _always_ valid after av_read_frame() */ 03431 PRINT(" dts="); 03432 if (pkt->dts == AV_NOPTS_VALUE) 03433 PRINT("N/A"); 03434 else 03435 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE); 03436 /* PTS may not be known if B-frames are present. */ 03437 PRINT(" pts="); 03438 if (pkt->pts == AV_NOPTS_VALUE) 03439 PRINT("N/A"); 03440 else 03441 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE); 03442 PRINT("\n"); 03443 PRINT(" size=%d\n", pkt->size); 03444 #undef PRINT 03445 if (dump_payload) 03446 av_hex_dump(f, pkt->data, pkt->size); 03447 } 03448 03449 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload) 03450 { 03451 pkt_dump_internal(NULL, f, 0, pkt, dump_payload); 03452 } 03453 03454 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload) 03455 { 03456 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload); 03457 } 03458 03459 void ff_url_split(char *proto, int proto_size, 03460 char *authorization, int authorization_size, 03461 char *hostname, int hostname_size, 03462 int *port_ptr, 03463 char *path, int path_size, 03464 const char *url) 03465 { 03466 const char *p, *ls, *at, *col, *brk; 03467 03468 if (port_ptr) *port_ptr = -1; 03469 if (proto_size > 0) proto[0] = 0; 03470 if (authorization_size > 0) authorization[0] = 0; 03471 if (hostname_size > 0) hostname[0] = 0; 03472 if (path_size > 0) path[0] = 0; 03473 03474 /* parse protocol */ 03475 if ((p = strchr(url, ':'))) { 03476 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url)); 03477 p++; /* skip ':' */ 03478 if (*p == '/') p++; 03479 if (*p == '/') p++; 03480 } else { 03481 /* no protocol means plain filename */ 03482 av_strlcpy(path, url, path_size); 03483 return; 03484 } 03485 03486 /* separate path from hostname */ 03487 ls = strchr(p, '/'); 03488 if(!ls) 03489 ls = strchr(p, '?'); 03490 if(ls) 03491 av_strlcpy(path, ls, path_size); 03492 else 03493 ls = &p[strlen(p)]; // XXX 03494 03495 /* the rest is hostname, use that to parse auth/port */ 03496 if (ls != p) { 03497 /* authorization (user[:pass]@hostname) */ 03498 if ((at = strchr(p, '@')) && at < ls) { 03499 av_strlcpy(authorization, p, 03500 FFMIN(authorization_size, at + 1 - p)); 03501 p = at + 1; /* skip '@' */ 03502 } 03503 03504 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) { 03505 /* [host]:port */ 03506 av_strlcpy(hostname, p + 1, 03507 FFMIN(hostname_size, brk - p)); 03508 if (brk[1] == ':' && port_ptr) 03509 *port_ptr = atoi(brk + 2); 03510 } else if ((col = strchr(p, ':')) && col < ls) { 03511 av_strlcpy(hostname, p, 03512 FFMIN(col + 1 - p, hostname_size)); 03513 if (port_ptr) *port_ptr = atoi(col + 1); 03514 } else 03515 av_strlcpy(hostname, p, 03516 FFMIN(ls + 1 - p, hostname_size)); 03517 } 03518 } 03519 03520 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase) 03521 { 03522 int i; 03523 static const char hex_table_uc[16] = { '0', '1', '2', '3', 03524 '4', '5', '6', '7', 03525 '8', '9', 'A', 'B', 03526 'C', 'D', 'E', 'F' }; 03527 static const char hex_table_lc[16] = { '0', '1', '2', '3', 03528 '4', '5', '6', '7', 03529 '8', '9', 'a', 'b', 03530 'c', 'd', 'e', 'f' }; 03531 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc; 03532 03533 for(i = 0; i < s; i++) { 03534 buff[i * 2] = hex_table[src[i] >> 4]; 03535 buff[i * 2 + 1] = hex_table[src[i] & 0xF]; 03536 } 03537 03538 return buff; 03539 } 03540 03541 void av_set_pts_info(AVStream *s, int pts_wrap_bits, 03542 unsigned int pts_num, unsigned int pts_den) 03543 { 03544 s->pts_wrap_bits = pts_wrap_bits; 03545 03546 if(av_reduce(&s->time_base.num, &s->time_base.den, pts_num, pts_den, INT_MAX)){ 03547 if(s->time_base.num != pts_num) 03548 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/s->time_base.num); 03549 }else 03550 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index); 03551 03552 if(!s->time_base.num || !s->time_base.den) 03553 s->time_base.num= s->time_base.den= 0; 03554 } 03555 03556 int ff_url_join(char *str, int size, const char *proto, 03557 const char *authorization, const char *hostname, 03558 int port, const char *fmt, ...) 03559 { 03560 #if CONFIG_NETWORK 03561 struct addrinfo hints, *ai; 03562 #endif 03563 03564 str[0] = '\0'; 03565 if (proto) 03566 av_strlcatf(str, size, "%s://", proto); 03567 if (authorization) 03568 av_strlcatf(str, size, "%s@", authorization); 03569 #if CONFIG_NETWORK && defined(AF_INET6) 03570 /* Determine if hostname is a numerical IPv6 address, 03571 * properly escape it within [] in that case. */ 03572 memset(&hints, 0, sizeof(hints)); 03573 hints.ai_flags = AI_NUMERICHOST; 03574 if (!getaddrinfo(hostname, NULL, &hints, &ai)) { 03575 if (ai->ai_family == AF_INET6) { 03576 av_strlcat(str, "[", size); 03577 av_strlcat(str, hostname, size); 03578 av_strlcat(str, "]", size); 03579 } else { 03580 av_strlcat(str, hostname, size); 03581 } 03582 freeaddrinfo(ai); 03583 } else 03584 #endif 03585 /* Not an IPv6 address, just output the plain string. */ 03586 av_strlcat(str, hostname, size); 03587 03588 if (port >= 0) 03589 av_strlcatf(str, size, ":%d", port); 03590 if (fmt) { 03591 va_list vl; 03592 int len = strlen(str); 03593 03594 va_start(vl, fmt); 03595 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl); 03596 va_end(vl); 03597 } 03598 return strlen(str); 03599 }