00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022 #include "config.h"
00023 #include <inttypes.h>
00024 #include <math.h>
00025 #include <limits.h>
00026 #include "libavutil/avstring.h"
00027 #include "libavutil/colorspace.h"
00028 #include "libavutil/mathematics.h"
00029 #include "libavutil/pixdesc.h"
00030 #include "libavutil/imgutils.h"
00031 #include "libavutil/dict.h"
00032 #include "libavutil/parseutils.h"
00033 #include "libavutil/samplefmt.h"
00034 #include "libavformat/avformat.h"
00035 #include "libavdevice/avdevice.h"
00036 #include "libswscale/swscale.h"
00037 #include "libavcodec/audioconvert.h"
00038 #include "libavutil/opt.h"
00039 #include "libavcodec/avfft.h"
00040
00041 #if CONFIG_AVFILTER
00042 # include "libavfilter/avfilter.h"
00043 # include "libavfilter/avfiltergraph.h"
00044 #endif
00045
00046 #include "cmdutils.h"
00047
00048 #include <SDL.h>
00049 #include <SDL_thread.h>
00050
00051 #ifdef __MINGW32__
00052 #undef main
00053 #endif
00054
00055 #include <unistd.h>
00056 #include <assert.h>
00057
00058 const char program_name[] = "avplay";
00059 const int program_birth_year = 2003;
00060
00061 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
00062 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
00063 #define MIN_FRAMES 5
00064
00065
00066
00067 #define SDL_AUDIO_BUFFER_SIZE 1024
00068
00069
00070 #define AV_SYNC_THRESHOLD 0.01
00071
00072 #define AV_NOSYNC_THRESHOLD 10.0
00073
00074 #define FRAME_SKIP_FACTOR 0.05
00075
00076
00077 #define SAMPLE_CORRECTION_PERCENT_MAX 10
00078
00079
00080 #define AUDIO_DIFF_AVG_NB 20
00081
00082
00083 #define SAMPLE_ARRAY_SIZE (2 * 65536)
00084
00085 static int sws_flags = SWS_BICUBIC;
00086
00087 typedef struct PacketQueue {
00088 AVPacketList *first_pkt, *last_pkt;
00089 int nb_packets;
00090 int size;
00091 int abort_request;
00092 SDL_mutex *mutex;
00093 SDL_cond *cond;
00094 } PacketQueue;
00095
00096 #define VIDEO_PICTURE_QUEUE_SIZE 2
00097 #define SUBPICTURE_QUEUE_SIZE 4
00098
00099 typedef struct VideoPicture {
00100 double pts;
00101 double target_clock;
00102 int64_t pos;
00103 SDL_Overlay *bmp;
00104 int width, height;
00105 int allocated;
00106 int reallocate;
00107 enum PixelFormat pix_fmt;
00108
00109 #if CONFIG_AVFILTER
00110 AVFilterBufferRef *picref;
00111 #endif
00112 } VideoPicture;
00113
00114 typedef struct SubPicture {
00115 double pts;
00116 AVSubtitle sub;
00117 } SubPicture;
00118
00119 enum {
00120 AV_SYNC_AUDIO_MASTER,
00121 AV_SYNC_VIDEO_MASTER,
00122 AV_SYNC_EXTERNAL_CLOCK,
00123 };
00124
00125 typedef struct VideoState {
00126 SDL_Thread *parse_tid;
00127 SDL_Thread *video_tid;
00128 SDL_Thread *refresh_tid;
00129 AVInputFormat *iformat;
00130 int no_background;
00131 int abort_request;
00132 int paused;
00133 int last_paused;
00134 int seek_req;
00135 int seek_flags;
00136 int64_t seek_pos;
00137 int64_t seek_rel;
00138 int read_pause_return;
00139 AVFormatContext *ic;
00140 int dtg_active_format;
00141
00142 int audio_stream;
00143
00144 int av_sync_type;
00145 double external_clock;
00146 int64_t external_clock_time;
00147
00148 double audio_clock;
00149 double audio_diff_cum;
00150 double audio_diff_avg_coef;
00151 double audio_diff_threshold;
00152 int audio_diff_avg_count;
00153 AVStream *audio_st;
00154 PacketQueue audioq;
00155 int audio_hw_buf_size;
00156 uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
00157 uint8_t *audio_buf;
00158 uint8_t *audio_buf1;
00159 unsigned int audio_buf_size;
00160 int audio_buf_index;
00161 AVPacket audio_pkt_temp;
00162 AVPacket audio_pkt;
00163 enum AVSampleFormat audio_src_fmt;
00164 AVAudioConvert *reformat_ctx;
00165 AVFrame *frame;
00166
00167 int show_audio;
00168 int16_t sample_array[SAMPLE_ARRAY_SIZE];
00169 int sample_array_index;
00170 int last_i_start;
00171 RDFTContext *rdft;
00172 int rdft_bits;
00173 FFTSample *rdft_data;
00174 int xpos;
00175
00176 SDL_Thread *subtitle_tid;
00177 int subtitle_stream;
00178 int subtitle_stream_changed;
00179 AVStream *subtitle_st;
00180 PacketQueue subtitleq;
00181 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
00182 int subpq_size, subpq_rindex, subpq_windex;
00183 SDL_mutex *subpq_mutex;
00184 SDL_cond *subpq_cond;
00185
00186 double frame_timer;
00187 double frame_last_pts;
00188 double frame_last_delay;
00189 double video_clock;
00190 int video_stream;
00191 AVStream *video_st;
00192 PacketQueue videoq;
00193 double video_current_pts;
00194 double video_current_pts_drift;
00195 int64_t video_current_pos;
00196 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
00197 int pictq_size, pictq_rindex, pictq_windex;
00198 SDL_mutex *pictq_mutex;
00199 SDL_cond *pictq_cond;
00200 #if !CONFIG_AVFILTER
00201 struct SwsContext *img_convert_ctx;
00202 #endif
00203
00204
00205 char filename[1024];
00206 int width, height, xleft, ytop;
00207
00208 PtsCorrectionContext pts_ctx;
00209
00210 #if CONFIG_AVFILTER
00211 AVFilterContext *out_video_filter;
00212 #endif
00213
00214 float skip_frames;
00215 float skip_frames_index;
00216 int refresh;
00217 } VideoState;
00218
00219 static void show_help(void);
00220
00221
00222 static AVInputFormat *file_iformat;
00223 static const char *input_filename;
00224 static const char *window_title;
00225 static int fs_screen_width;
00226 static int fs_screen_height;
00227 static int screen_width = 0;
00228 static int screen_height = 0;
00229 static int audio_disable;
00230 static int video_disable;
00231 static int wanted_stream[AVMEDIA_TYPE_NB] = {
00232 [AVMEDIA_TYPE_AUDIO] = -1,
00233 [AVMEDIA_TYPE_VIDEO] = -1,
00234 [AVMEDIA_TYPE_SUBTITLE] = -1,
00235 };
00236 static int seek_by_bytes = -1;
00237 static int display_disable;
00238 static int show_status = 1;
00239 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
00240 static int64_t start_time = AV_NOPTS_VALUE;
00241 static int64_t duration = AV_NOPTS_VALUE;
00242 static int debug = 0;
00243 static int debug_mv = 0;
00244 static int step = 0;
00245 static int workaround_bugs = 1;
00246 static int fast = 0;
00247 static int genpts = 0;
00248 static int lowres = 0;
00249 static int idct = FF_IDCT_AUTO;
00250 static enum AVDiscard skip_frame = AVDISCARD_DEFAULT;
00251 static enum AVDiscard skip_idct = AVDISCARD_DEFAULT;
00252 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
00253 static int error_recognition = FF_ER_CAREFUL;
00254 static int error_concealment = 3;
00255 static int decoder_reorder_pts = -1;
00256 static int autoexit;
00257 static int exit_on_keydown;
00258 static int exit_on_mousedown;
00259 static int loop = 1;
00260 static int framedrop = 1;
00261
00262 static int rdftspeed = 20;
00263 #if CONFIG_AVFILTER
00264 static char *vfilters = NULL;
00265 #endif
00266
00267
00268 static int is_full_screen;
00269 static VideoState *cur_stream;
00270 static int64_t audio_callback_time;
00271
00272 static AVPacket flush_pkt;
00273
00274 #define FF_ALLOC_EVENT (SDL_USEREVENT)
00275 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
00276 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
00277
00278 static SDL_Surface *screen;
00279
00280 void exit_program(int ret)
00281 {
00282 exit(ret);
00283 }
00284
00285 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
00286
00287
00288 static void packet_queue_init(PacketQueue *q)
00289 {
00290 memset(q, 0, sizeof(PacketQueue));
00291 q->mutex = SDL_CreateMutex();
00292 q->cond = SDL_CreateCond();
00293 packet_queue_put(q, &flush_pkt);
00294 }
00295
00296 static void packet_queue_flush(PacketQueue *q)
00297 {
00298 AVPacketList *pkt, *pkt1;
00299
00300 SDL_LockMutex(q->mutex);
00301 for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
00302 pkt1 = pkt->next;
00303 av_free_packet(&pkt->pkt);
00304 av_freep(&pkt);
00305 }
00306 q->last_pkt = NULL;
00307 q->first_pkt = NULL;
00308 q->nb_packets = 0;
00309 q->size = 0;
00310 SDL_UnlockMutex(q->mutex);
00311 }
00312
00313 static void packet_queue_end(PacketQueue *q)
00314 {
00315 packet_queue_flush(q);
00316 SDL_DestroyMutex(q->mutex);
00317 SDL_DestroyCond(q->cond);
00318 }
00319
00320 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
00321 {
00322 AVPacketList *pkt1;
00323
00324
00325 if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
00326 return -1;
00327
00328 pkt1 = av_malloc(sizeof(AVPacketList));
00329 if (!pkt1)
00330 return -1;
00331 pkt1->pkt = *pkt;
00332 pkt1->next = NULL;
00333
00334
00335 SDL_LockMutex(q->mutex);
00336
00337 if (!q->last_pkt)
00338
00339 q->first_pkt = pkt1;
00340 else
00341 q->last_pkt->next = pkt1;
00342 q->last_pkt = pkt1;
00343 q->nb_packets++;
00344 q->size += pkt1->pkt.size + sizeof(*pkt1);
00345
00346 SDL_CondSignal(q->cond);
00347
00348 SDL_UnlockMutex(q->mutex);
00349 return 0;
00350 }
00351
00352 static void packet_queue_abort(PacketQueue *q)
00353 {
00354 SDL_LockMutex(q->mutex);
00355
00356 q->abort_request = 1;
00357
00358 SDL_CondSignal(q->cond);
00359
00360 SDL_UnlockMutex(q->mutex);
00361 }
00362
00363
00364 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
00365 {
00366 AVPacketList *pkt1;
00367 int ret;
00368
00369 SDL_LockMutex(q->mutex);
00370
00371 for (;;) {
00372 if (q->abort_request) {
00373 ret = -1;
00374 break;
00375 }
00376
00377 pkt1 = q->first_pkt;
00378 if (pkt1) {
00379 q->first_pkt = pkt1->next;
00380 if (!q->first_pkt)
00381 q->last_pkt = NULL;
00382 q->nb_packets--;
00383 q->size -= pkt1->pkt.size + sizeof(*pkt1);
00384 *pkt = pkt1->pkt;
00385 av_free(pkt1);
00386 ret = 1;
00387 break;
00388 } else if (!block) {
00389 ret = 0;
00390 break;
00391 } else {
00392 SDL_CondWait(q->cond, q->mutex);
00393 }
00394 }
00395 SDL_UnlockMutex(q->mutex);
00396 return ret;
00397 }
00398
00399 static inline void fill_rectangle(SDL_Surface *screen,
00400 int x, int y, int w, int h, int color)
00401 {
00402 SDL_Rect rect;
00403 rect.x = x;
00404 rect.y = y;
00405 rect.w = w;
00406 rect.h = h;
00407 SDL_FillRect(screen, &rect, color);
00408 }
00409
00410 #define ALPHA_BLEND(a, oldp, newp, s)\
00411 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
00412
00413 #define RGBA_IN(r, g, b, a, s)\
00414 {\
00415 unsigned int v = ((const uint32_t *)(s))[0];\
00416 a = (v >> 24) & 0xff;\
00417 r = (v >> 16) & 0xff;\
00418 g = (v >> 8) & 0xff;\
00419 b = v & 0xff;\
00420 }
00421
00422 #define YUVA_IN(y, u, v, a, s, pal)\
00423 {\
00424 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
00425 a = (val >> 24) & 0xff;\
00426 y = (val >> 16) & 0xff;\
00427 u = (val >> 8) & 0xff;\
00428 v = val & 0xff;\
00429 }
00430
00431 #define YUVA_OUT(d, y, u, v, a)\
00432 {\
00433 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
00434 }
00435
00436
00437 #define BPP 1
00438
00439 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
00440 {
00441 int wrap, wrap3, width2, skip2;
00442 int y, u, v, a, u1, v1, a1, w, h;
00443 uint8_t *lum, *cb, *cr;
00444 const uint8_t *p;
00445 const uint32_t *pal;
00446 int dstx, dsty, dstw, dsth;
00447
00448 dstw = av_clip(rect->w, 0, imgw);
00449 dsth = av_clip(rect->h, 0, imgh);
00450 dstx = av_clip(rect->x, 0, imgw - dstw);
00451 dsty = av_clip(rect->y, 0, imgh - dsth);
00452 lum = dst->data[0] + dsty * dst->linesize[0];
00453 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
00454 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
00455
00456 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
00457 skip2 = dstx >> 1;
00458 wrap = dst->linesize[0];
00459 wrap3 = rect->pict.linesize[0];
00460 p = rect->pict.data[0];
00461 pal = (const uint32_t *)rect->pict.data[1];
00462
00463 if (dsty & 1) {
00464 lum += dstx;
00465 cb += skip2;
00466 cr += skip2;
00467
00468 if (dstx & 1) {
00469 YUVA_IN(y, u, v, a, p, pal);
00470 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00471 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00472 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00473 cb++;
00474 cr++;
00475 lum++;
00476 p += BPP;
00477 }
00478 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
00479 YUVA_IN(y, u, v, a, p, pal);
00480 u1 = u;
00481 v1 = v;
00482 a1 = a;
00483 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00484
00485 YUVA_IN(y, u, v, a, p + BPP, pal);
00486 u1 += u;
00487 v1 += v;
00488 a1 += a;
00489 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00490 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00491 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00492 cb++;
00493 cr++;
00494 p += 2 * BPP;
00495 lum += 2;
00496 }
00497 if (w) {
00498 YUVA_IN(y, u, v, a, p, pal);
00499 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00500 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00501 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00502 p++;
00503 lum++;
00504 }
00505 p += wrap3 - dstw * BPP;
00506 lum += wrap - dstw - dstx;
00507 cb += dst->linesize[1] - width2 - skip2;
00508 cr += dst->linesize[2] - width2 - skip2;
00509 }
00510 for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
00511 lum += dstx;
00512 cb += skip2;
00513 cr += skip2;
00514
00515 if (dstx & 1) {
00516 YUVA_IN(y, u, v, a, p, pal);
00517 u1 = u;
00518 v1 = v;
00519 a1 = a;
00520 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00521 p += wrap3;
00522 lum += wrap;
00523 YUVA_IN(y, u, v, a, p, pal);
00524 u1 += u;
00525 v1 += v;
00526 a1 += a;
00527 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00528 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00529 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00530 cb++;
00531 cr++;
00532 p += -wrap3 + BPP;
00533 lum += -wrap + 1;
00534 }
00535 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
00536 YUVA_IN(y, u, v, a, p, pal);
00537 u1 = u;
00538 v1 = v;
00539 a1 = a;
00540 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00541
00542 YUVA_IN(y, u, v, a, p + BPP, pal);
00543 u1 += u;
00544 v1 += v;
00545 a1 += a;
00546 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00547 p += wrap3;
00548 lum += wrap;
00549
00550 YUVA_IN(y, u, v, a, p, pal);
00551 u1 += u;
00552 v1 += v;
00553 a1 += a;
00554 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00555
00556 YUVA_IN(y, u, v, a, p + BPP, pal);
00557 u1 += u;
00558 v1 += v;
00559 a1 += a;
00560 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00561
00562 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
00563 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
00564
00565 cb++;
00566 cr++;
00567 p += -wrap3 + 2 * BPP;
00568 lum += -wrap + 2;
00569 }
00570 if (w) {
00571 YUVA_IN(y, u, v, a, p, pal);
00572 u1 = u;
00573 v1 = v;
00574 a1 = a;
00575 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00576 p += wrap3;
00577 lum += wrap;
00578 YUVA_IN(y, u, v, a, p, pal);
00579 u1 += u;
00580 v1 += v;
00581 a1 += a;
00582 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00583 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00584 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00585 cb++;
00586 cr++;
00587 p += -wrap3 + BPP;
00588 lum += -wrap + 1;
00589 }
00590 p += wrap3 + (wrap3 - dstw * BPP);
00591 lum += wrap + (wrap - dstw - dstx);
00592 cb += dst->linesize[1] - width2 - skip2;
00593 cr += dst->linesize[2] - width2 - skip2;
00594 }
00595
00596 if (h) {
00597 lum += dstx;
00598 cb += skip2;
00599 cr += skip2;
00600
00601 if (dstx & 1) {
00602 YUVA_IN(y, u, v, a, p, pal);
00603 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00604 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00605 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00606 cb++;
00607 cr++;
00608 lum++;
00609 p += BPP;
00610 }
00611 for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
00612 YUVA_IN(y, u, v, a, p, pal);
00613 u1 = u;
00614 v1 = v;
00615 a1 = a;
00616 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00617
00618 YUVA_IN(y, u, v, a, p + BPP, pal);
00619 u1 += u;
00620 v1 += v;
00621 a1 += a;
00622 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00623 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
00624 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
00625 cb++;
00626 cr++;
00627 p += 2 * BPP;
00628 lum += 2;
00629 }
00630 if (w) {
00631 YUVA_IN(y, u, v, a, p, pal);
00632 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00633 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00634 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00635 }
00636 }
00637 }
00638
00639 static void free_subpicture(SubPicture *sp)
00640 {
00641 avsubtitle_free(&sp->sub);
00642 }
00643
00644 static void video_image_display(VideoState *is)
00645 {
00646 VideoPicture *vp;
00647 SubPicture *sp;
00648 AVPicture pict;
00649 float aspect_ratio;
00650 int width, height, x, y;
00651 SDL_Rect rect;
00652 int i;
00653
00654 vp = &is->pictq[is->pictq_rindex];
00655 if (vp->bmp) {
00656 #if CONFIG_AVFILTER
00657 if (vp->picref->video->pixel_aspect.num == 0)
00658 aspect_ratio = 0;
00659 else
00660 aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
00661 #else
00662
00663
00664 if (is->video_st->sample_aspect_ratio.num)
00665 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
00666 else if (is->video_st->codec->sample_aspect_ratio.num)
00667 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
00668 else
00669 aspect_ratio = 0;
00670 #endif
00671 if (aspect_ratio <= 0.0)
00672 aspect_ratio = 1.0;
00673 aspect_ratio *= (float)vp->width / (float)vp->height;
00674
00675 if (is->subtitle_st)
00676 {
00677 if (is->subpq_size > 0)
00678 {
00679 sp = &is->subpq[is->subpq_rindex];
00680
00681 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
00682 {
00683 SDL_LockYUVOverlay (vp->bmp);
00684
00685 pict.data[0] = vp->bmp->pixels[0];
00686 pict.data[1] = vp->bmp->pixels[2];
00687 pict.data[2] = vp->bmp->pixels[1];
00688
00689 pict.linesize[0] = vp->bmp->pitches[0];
00690 pict.linesize[1] = vp->bmp->pitches[2];
00691 pict.linesize[2] = vp->bmp->pitches[1];
00692
00693 for (i = 0; i < sp->sub.num_rects; i++)
00694 blend_subrect(&pict, sp->sub.rects[i],
00695 vp->bmp->w, vp->bmp->h);
00696
00697 SDL_UnlockYUVOverlay (vp->bmp);
00698 }
00699 }
00700 }
00701
00702
00703
00704 height = is->height;
00705 width = ((int)rint(height * aspect_ratio)) & ~1;
00706 if (width > is->width) {
00707 width = is->width;
00708 height = ((int)rint(width / aspect_ratio)) & ~1;
00709 }
00710 x = (is->width - width) / 2;
00711 y = (is->height - height) / 2;
00712 is->no_background = 0;
00713 rect.x = is->xleft + x;
00714 rect.y = is->ytop + y;
00715 rect.w = width;
00716 rect.h = height;
00717 SDL_DisplayYUVOverlay(vp->bmp, &rect);
00718 }
00719 }
00720
00721
00722
00723 static int audio_write_get_buf_size(VideoState *is)
00724 {
00725 return is->audio_buf_size - is->audio_buf_index;
00726 }
00727
00728 static inline int compute_mod(int a, int b)
00729 {
00730 a = a % b;
00731 if (a >= 0)
00732 return a;
00733 else
00734 return a + b;
00735 }
00736
00737 static void video_audio_display(VideoState *s)
00738 {
00739 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
00740 int ch, channels, h, h2, bgcolor, fgcolor;
00741 int16_t time_diff;
00742 int rdft_bits, nb_freq;
00743
00744 for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
00745 ;
00746 nb_freq = 1 << (rdft_bits - 1);
00747
00748
00749 channels = s->audio_st->codec->channels;
00750 nb_display_channels = channels;
00751 if (!s->paused) {
00752 int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
00753 n = 2 * channels;
00754 delay = audio_write_get_buf_size(s);
00755 delay /= n;
00756
00757
00758
00759 if (audio_callback_time) {
00760 time_diff = av_gettime() - audio_callback_time;
00761 delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
00762 }
00763
00764 delay += 2 * data_used;
00765 if (delay < data_used)
00766 delay = data_used;
00767
00768 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
00769 if (s->show_audio == 1) {
00770 h = INT_MIN;
00771 for (i = 0; i < 1000; i += channels) {
00772 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
00773 int a = s->sample_array[idx];
00774 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
00775 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
00776 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
00777 int score = a - d;
00778 if (h < score && (b ^ c) < 0) {
00779 h = score;
00780 i_start = idx;
00781 }
00782 }
00783 }
00784
00785 s->last_i_start = i_start;
00786 } else {
00787 i_start = s->last_i_start;
00788 }
00789
00790 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
00791 if (s->show_audio == 1) {
00792 fill_rectangle(screen,
00793 s->xleft, s->ytop, s->width, s->height,
00794 bgcolor);
00795
00796 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
00797
00798
00799 h = s->height / nb_display_channels;
00800
00801 h2 = (h * 9) / 20;
00802 for (ch = 0; ch < nb_display_channels; ch++) {
00803 i = i_start + ch;
00804 y1 = s->ytop + ch * h + (h / 2);
00805 for (x = 0; x < s->width; x++) {
00806 y = (s->sample_array[i] * h2) >> 15;
00807 if (y < 0) {
00808 y = -y;
00809 ys = y1 - y;
00810 } else {
00811 ys = y1;
00812 }
00813 fill_rectangle(screen,
00814 s->xleft + x, ys, 1, y,
00815 fgcolor);
00816 i += channels;
00817 if (i >= SAMPLE_ARRAY_SIZE)
00818 i -= SAMPLE_ARRAY_SIZE;
00819 }
00820 }
00821
00822 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
00823
00824 for (ch = 1; ch < nb_display_channels; ch++) {
00825 y = s->ytop + ch * h;
00826 fill_rectangle(screen,
00827 s->xleft, y, s->width, 1,
00828 fgcolor);
00829 }
00830 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
00831 } else {
00832 nb_display_channels= FFMIN(nb_display_channels, 2);
00833 if (rdft_bits != s->rdft_bits) {
00834 av_rdft_end(s->rdft);
00835 av_free(s->rdft_data);
00836 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
00837 s->rdft_bits = rdft_bits;
00838 s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
00839 }
00840 {
00841 FFTSample *data[2];
00842 for (ch = 0; ch < nb_display_channels; ch++) {
00843 data[ch] = s->rdft_data + 2 * nb_freq * ch;
00844 i = i_start + ch;
00845 for (x = 0; x < 2 * nb_freq; x++) {
00846 double w = (x-nb_freq) * (1.0 / nb_freq);
00847 data[ch][x] = s->sample_array[i] * (1.0 - w * w);
00848 i += channels;
00849 if (i >= SAMPLE_ARRAY_SIZE)
00850 i -= SAMPLE_ARRAY_SIZE;
00851 }
00852 av_rdft_calc(s->rdft, data[ch]);
00853 }
00854
00855 for (y = 0; y < s->height; y++) {
00856 double w = 1 / sqrt(nb_freq);
00857 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
00858 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
00859 + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
00860 a = FFMIN(a, 255);
00861 b = FFMIN(b, 255);
00862 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
00863
00864 fill_rectangle(screen,
00865 s->xpos, s->height-y, 1, 1,
00866 fgcolor);
00867 }
00868 }
00869 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
00870 s->xpos++;
00871 if (s->xpos >= s->width)
00872 s->xpos= s->xleft;
00873 }
00874 }
00875
00876 static int video_open(VideoState *is)
00877 {
00878 int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
00879 int w,h;
00880
00881 if (is_full_screen) flags |= SDL_FULLSCREEN;
00882 else flags |= SDL_RESIZABLE;
00883
00884 if (is_full_screen && fs_screen_width) {
00885 w = fs_screen_width;
00886 h = fs_screen_height;
00887 } else if (!is_full_screen && screen_width) {
00888 w = screen_width;
00889 h = screen_height;
00890 #if CONFIG_AVFILTER
00891 } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
00892 w = is->out_video_filter->inputs[0]->w;
00893 h = is->out_video_filter->inputs[0]->h;
00894 #else
00895 } else if (is->video_st && is->video_st->codec->width) {
00896 w = is->video_st->codec->width;
00897 h = is->video_st->codec->height;
00898 #endif
00899 } else {
00900 w = 640;
00901 h = 480;
00902 }
00903 if (screen && is->width == screen->w && screen->w == w
00904 && is->height== screen->h && screen->h == h)
00905 return 0;
00906
00907 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
00908
00909 screen = SDL_SetVideoMode(w, h, 24, flags);
00910 #else
00911 screen = SDL_SetVideoMode(w, h, 0, flags);
00912 #endif
00913 if (!screen) {
00914 fprintf(stderr, "SDL: could not set video mode - exiting\n");
00915 return -1;
00916 }
00917 if (!window_title)
00918 window_title = input_filename;
00919 SDL_WM_SetCaption(window_title, window_title);
00920
00921 is->width = screen->w;
00922 is->height = screen->h;
00923
00924 return 0;
00925 }
00926
00927
00928 static void video_display(VideoState *is)
00929 {
00930 if (!screen)
00931 video_open(cur_stream);
00932 if (is->audio_st && is->show_audio)
00933 video_audio_display(is);
00934 else if (is->video_st)
00935 video_image_display(is);
00936 }
00937
00938 static int refresh_thread(void *opaque)
00939 {
00940 VideoState *is= opaque;
00941 while (!is->abort_request) {
00942 SDL_Event event;
00943 event.type = FF_REFRESH_EVENT;
00944 event.user.data1 = opaque;
00945 if (!is->refresh) {
00946 is->refresh = 1;
00947 SDL_PushEvent(&event);
00948 }
00949 usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000);
00950 }
00951 return 0;
00952 }
00953
00954
00955 static double get_audio_clock(VideoState *is)
00956 {
00957 double pts;
00958 int hw_buf_size, bytes_per_sec;
00959 pts = is->audio_clock;
00960 hw_buf_size = audio_write_get_buf_size(is);
00961 bytes_per_sec = 0;
00962 if (is->audio_st) {
00963 bytes_per_sec = is->audio_st->codec->sample_rate *
00964 2 * is->audio_st->codec->channels;
00965 }
00966 if (bytes_per_sec)
00967 pts -= (double)hw_buf_size / bytes_per_sec;
00968 return pts;
00969 }
00970
00971
00972 static double get_video_clock(VideoState *is)
00973 {
00974 if (is->paused) {
00975 return is->video_current_pts;
00976 } else {
00977 return is->video_current_pts_drift + av_gettime() / 1000000.0;
00978 }
00979 }
00980
00981
00982 static double get_external_clock(VideoState *is)
00983 {
00984 int64_t ti;
00985 ti = av_gettime();
00986 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
00987 }
00988
00989
00990 static double get_master_clock(VideoState *is)
00991 {
00992 double val;
00993
00994 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
00995 if (is->video_st)
00996 val = get_video_clock(is);
00997 else
00998 val = get_audio_clock(is);
00999 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
01000 if (is->audio_st)
01001 val = get_audio_clock(is);
01002 else
01003 val = get_video_clock(is);
01004 } else {
01005 val = get_external_clock(is);
01006 }
01007 return val;
01008 }
01009
01010
01011 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
01012 {
01013 if (!is->seek_req) {
01014 is->seek_pos = pos;
01015 is->seek_rel = rel;
01016 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
01017 if (seek_by_bytes)
01018 is->seek_flags |= AVSEEK_FLAG_BYTE;
01019 is->seek_req = 1;
01020 }
01021 }
01022
01023
01024 static void stream_pause(VideoState *is)
01025 {
01026 if (is->paused) {
01027 is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
01028 if (is->read_pause_return != AVERROR(ENOSYS)) {
01029 is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
01030 }
01031 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
01032 }
01033 is->paused = !is->paused;
01034 }
01035
01036 static double compute_target_time(double frame_current_pts, VideoState *is)
01037 {
01038 double delay, sync_threshold, diff;
01039
01040
01041 delay = frame_current_pts - is->frame_last_pts;
01042 if (delay <= 0 || delay >= 10.0) {
01043
01044 delay = is->frame_last_delay;
01045 } else {
01046 is->frame_last_delay = delay;
01047 }
01048 is->frame_last_pts = frame_current_pts;
01049
01050
01051 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
01052 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
01053
01054
01055 diff = get_video_clock(is) - get_master_clock(is);
01056
01057
01058
01059
01060 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
01061 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
01062 if (diff <= -sync_threshold)
01063 delay = 0;
01064 else if (diff >= sync_threshold)
01065 delay = 2 * delay;
01066 }
01067 }
01068 is->frame_timer += delay;
01069
01070 av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
01071 delay, frame_current_pts, -diff);
01072
01073 return is->frame_timer;
01074 }
01075
01076
01077 static void video_refresh_timer(void *opaque)
01078 {
01079 VideoState *is = opaque;
01080 VideoPicture *vp;
01081
01082 SubPicture *sp, *sp2;
01083
01084 if (is->video_st) {
01085 retry:
01086 if (is->pictq_size == 0) {
01087
01088 } else {
01089 double time = av_gettime() / 1000000.0;
01090 double next_target;
01091
01092 vp = &is->pictq[is->pictq_rindex];
01093
01094 if (time < vp->target_clock)
01095 return;
01096
01097 is->video_current_pts = vp->pts;
01098 is->video_current_pts_drift = is->video_current_pts - time;
01099 is->video_current_pos = vp->pos;
01100 if (is->pictq_size > 1) {
01101 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
01102 assert(nextvp->target_clock >= vp->target_clock);
01103 next_target= nextvp->target_clock;
01104 } else {
01105 next_target = vp->target_clock + is->video_clock - vp->pts;
01106 }
01107 if (framedrop && time > next_target) {
01108 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
01109 if (is->pictq_size > 1 || time > next_target + 0.5) {
01110
01111 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
01112 is->pictq_rindex = 0;
01113
01114 SDL_LockMutex(is->pictq_mutex);
01115 is->pictq_size--;
01116 SDL_CondSignal(is->pictq_cond);
01117 SDL_UnlockMutex(is->pictq_mutex);
01118 goto retry;
01119 }
01120 }
01121
01122 if (is->subtitle_st) {
01123 if (is->subtitle_stream_changed) {
01124 SDL_LockMutex(is->subpq_mutex);
01125
01126 while (is->subpq_size) {
01127 free_subpicture(&is->subpq[is->subpq_rindex]);
01128
01129
01130 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
01131 is->subpq_rindex = 0;
01132
01133 is->subpq_size--;
01134 }
01135 is->subtitle_stream_changed = 0;
01136
01137 SDL_CondSignal(is->subpq_cond);
01138 SDL_UnlockMutex(is->subpq_mutex);
01139 } else {
01140 if (is->subpq_size > 0) {
01141 sp = &is->subpq[is->subpq_rindex];
01142
01143 if (is->subpq_size > 1)
01144 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
01145 else
01146 sp2 = NULL;
01147
01148 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
01149 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
01150 {
01151 free_subpicture(sp);
01152
01153
01154 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
01155 is->subpq_rindex = 0;
01156
01157 SDL_LockMutex(is->subpq_mutex);
01158 is->subpq_size--;
01159 SDL_CondSignal(is->subpq_cond);
01160 SDL_UnlockMutex(is->subpq_mutex);
01161 }
01162 }
01163 }
01164 }
01165
01166
01167 if (!display_disable)
01168 video_display(is);
01169
01170
01171 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
01172 is->pictq_rindex = 0;
01173
01174 SDL_LockMutex(is->pictq_mutex);
01175 is->pictq_size--;
01176 SDL_CondSignal(is->pictq_cond);
01177 SDL_UnlockMutex(is->pictq_mutex);
01178 }
01179 } else if (is->audio_st) {
01180
01181
01182
01183
01184
01185
01186 if (!display_disable)
01187 video_display(is);
01188 }
01189 if (show_status) {
01190 static int64_t last_time;
01191 int64_t cur_time;
01192 int aqsize, vqsize, sqsize;
01193 double av_diff;
01194
01195 cur_time = av_gettime();
01196 if (!last_time || (cur_time - last_time) >= 30000) {
01197 aqsize = 0;
01198 vqsize = 0;
01199 sqsize = 0;
01200 if (is->audio_st)
01201 aqsize = is->audioq.size;
01202 if (is->video_st)
01203 vqsize = is->videoq.size;
01204 if (is->subtitle_st)
01205 sqsize = is->subtitleq.size;
01206 av_diff = 0;
01207 if (is->audio_st && is->video_st)
01208 av_diff = get_audio_clock(is) - get_video_clock(is);
01209 printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
01210 get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
01211 vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
01212 fflush(stdout);
01213 last_time = cur_time;
01214 }
01215 }
01216 }
01217
01218 static void stream_close(VideoState *is)
01219 {
01220 VideoPicture *vp;
01221 int i;
01222
01223 is->abort_request = 1;
01224 SDL_WaitThread(is->parse_tid, NULL);
01225 SDL_WaitThread(is->refresh_tid, NULL);
01226
01227
01228 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
01229 vp = &is->pictq[i];
01230 #if CONFIG_AVFILTER
01231 if (vp->picref) {
01232 avfilter_unref_buffer(vp->picref);
01233 vp->picref = NULL;
01234 }
01235 #endif
01236 if (vp->bmp) {
01237 SDL_FreeYUVOverlay(vp->bmp);
01238 vp->bmp = NULL;
01239 }
01240 }
01241 SDL_DestroyMutex(is->pictq_mutex);
01242 SDL_DestroyCond(is->pictq_cond);
01243 SDL_DestroyMutex(is->subpq_mutex);
01244 SDL_DestroyCond(is->subpq_cond);
01245 #if !CONFIG_AVFILTER
01246 if (is->img_convert_ctx)
01247 sws_freeContext(is->img_convert_ctx);
01248 #endif
01249 av_free(is);
01250 }
01251
01252 static void do_exit(void)
01253 {
01254 if (cur_stream) {
01255 stream_close(cur_stream);
01256 cur_stream = NULL;
01257 }
01258 uninit_opts();
01259 #if CONFIG_AVFILTER
01260 avfilter_uninit();
01261 #endif
01262 avformat_network_deinit();
01263 if (show_status)
01264 printf("\n");
01265 SDL_Quit();
01266 av_log(NULL, AV_LOG_QUIET, "");
01267 exit(0);
01268 }
01269
01270
01271
01272 static void alloc_picture(void *opaque)
01273 {
01274 VideoState *is = opaque;
01275 VideoPicture *vp;
01276
01277 vp = &is->pictq[is->pictq_windex];
01278
01279 if (vp->bmp)
01280 SDL_FreeYUVOverlay(vp->bmp);
01281
01282 #if CONFIG_AVFILTER
01283 if (vp->picref)
01284 avfilter_unref_buffer(vp->picref);
01285 vp->picref = NULL;
01286
01287 vp->width = is->out_video_filter->inputs[0]->w;
01288 vp->height = is->out_video_filter->inputs[0]->h;
01289 vp->pix_fmt = is->out_video_filter->inputs[0]->format;
01290 #else
01291 vp->width = is->video_st->codec->width;
01292 vp->height = is->video_st->codec->height;
01293 vp->pix_fmt = is->video_st->codec->pix_fmt;
01294 #endif
01295
01296 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
01297 SDL_YV12_OVERLAY,
01298 screen);
01299 if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
01300
01301
01302 fprintf(stderr, "Error: the video system does not support an image\n"
01303 "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
01304 "to reduce the image size.\n", vp->width, vp->height );
01305 do_exit();
01306 }
01307
01308 SDL_LockMutex(is->pictq_mutex);
01309 vp->allocated = 1;
01310 SDL_CondSignal(is->pictq_cond);
01311 SDL_UnlockMutex(is->pictq_mutex);
01312 }
01313
01318 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
01319 {
01320 VideoPicture *vp;
01321 #if CONFIG_AVFILTER
01322 AVPicture pict_src;
01323 #else
01324 int dst_pix_fmt = PIX_FMT_YUV420P;
01325 #endif
01326
01327 SDL_LockMutex(is->pictq_mutex);
01328
01329 if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
01330 is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
01331
01332 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
01333 !is->videoq.abort_request) {
01334 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01335 }
01336 SDL_UnlockMutex(is->pictq_mutex);
01337
01338 if (is->videoq.abort_request)
01339 return -1;
01340
01341 vp = &is->pictq[is->pictq_windex];
01342
01343
01344 if (!vp->bmp || vp->reallocate ||
01345 #if CONFIG_AVFILTER
01346 vp->width != is->out_video_filter->inputs[0]->w ||
01347 vp->height != is->out_video_filter->inputs[0]->h) {
01348 #else
01349 vp->width != is->video_st->codec->width ||
01350 vp->height != is->video_st->codec->height) {
01351 #endif
01352 SDL_Event event;
01353
01354 vp->allocated = 0;
01355 vp->reallocate = 0;
01356
01357
01358
01359 event.type = FF_ALLOC_EVENT;
01360 event.user.data1 = is;
01361 SDL_PushEvent(&event);
01362
01363
01364 SDL_LockMutex(is->pictq_mutex);
01365 while (!vp->allocated && !is->videoq.abort_request) {
01366 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01367 }
01368 SDL_UnlockMutex(is->pictq_mutex);
01369
01370 if (is->videoq.abort_request)
01371 return -1;
01372 }
01373
01374
01375 if (vp->bmp) {
01376 AVPicture pict;
01377 #if CONFIG_AVFILTER
01378 if (vp->picref)
01379 avfilter_unref_buffer(vp->picref);
01380 vp->picref = src_frame->opaque;
01381 #endif
01382
01383
01384 SDL_LockYUVOverlay (vp->bmp);
01385
01386 memset(&pict, 0, sizeof(AVPicture));
01387 pict.data[0] = vp->bmp->pixels[0];
01388 pict.data[1] = vp->bmp->pixels[2];
01389 pict.data[2] = vp->bmp->pixels[1];
01390
01391 pict.linesize[0] = vp->bmp->pitches[0];
01392 pict.linesize[1] = vp->bmp->pitches[2];
01393 pict.linesize[2] = vp->bmp->pitches[1];
01394
01395 #if CONFIG_AVFILTER
01396 pict_src.data[0] = src_frame->data[0];
01397 pict_src.data[1] = src_frame->data[1];
01398 pict_src.data[2] = src_frame->data[2];
01399
01400 pict_src.linesize[0] = src_frame->linesize[0];
01401 pict_src.linesize[1] = src_frame->linesize[1];
01402 pict_src.linesize[2] = src_frame->linesize[2];
01403
01404
01405 av_picture_copy(&pict, &pict_src,
01406 vp->pix_fmt, vp->width, vp->height);
01407 #else
01408 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
01409 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
01410 vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
01411 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
01412 if (is->img_convert_ctx == NULL) {
01413 fprintf(stderr, "Cannot initialize the conversion context\n");
01414 exit(1);
01415 }
01416 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
01417 0, vp->height, pict.data, pict.linesize);
01418 #endif
01419
01420 SDL_UnlockYUVOverlay(vp->bmp);
01421
01422 vp->pts = pts;
01423 vp->pos = pos;
01424
01425
01426 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
01427 is->pictq_windex = 0;
01428 SDL_LockMutex(is->pictq_mutex);
01429 vp->target_clock = compute_target_time(vp->pts, is);
01430
01431 is->pictq_size++;
01432 SDL_UnlockMutex(is->pictq_mutex);
01433 }
01434 return 0;
01435 }
01436
01441 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
01442 {
01443 double frame_delay, pts;
01444
01445 pts = pts1;
01446
01447 if (pts != 0) {
01448
01449 is->video_clock = pts;
01450 } else {
01451 pts = is->video_clock;
01452 }
01453
01454 frame_delay = av_q2d(is->video_st->codec->time_base);
01455
01456
01457 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
01458 is->video_clock += frame_delay;
01459
01460 return queue_picture(is, src_frame, pts, pos);
01461 }
01462
01463 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
01464 {
01465 int got_picture, i;
01466
01467 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
01468 return -1;
01469
01470 if (pkt->data == flush_pkt.data) {
01471 avcodec_flush_buffers(is->video_st->codec);
01472
01473 SDL_LockMutex(is->pictq_mutex);
01474
01475 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
01476 is->pictq[i].target_clock= 0;
01477 }
01478 while (is->pictq_size && !is->videoq.abort_request) {
01479 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01480 }
01481 is->video_current_pos = -1;
01482 SDL_UnlockMutex(is->pictq_mutex);
01483
01484 init_pts_correction(&is->pts_ctx);
01485 is->frame_last_pts = AV_NOPTS_VALUE;
01486 is->frame_last_delay = 0;
01487 is->frame_timer = (double)av_gettime() / 1000000.0;
01488 is->skip_frames = 1;
01489 is->skip_frames_index = 0;
01490 return 0;
01491 }
01492
01493 avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
01494
01495 if (got_picture) {
01496 if (decoder_reorder_pts == -1) {
01497 *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
01498 } else if (decoder_reorder_pts) {
01499 *pts = frame->pkt_pts;
01500 } else {
01501 *pts = frame->pkt_dts;
01502 }
01503
01504 if (*pts == AV_NOPTS_VALUE) {
01505 *pts = 0;
01506 }
01507
01508 is->skip_frames_index += 1;
01509 if (is->skip_frames_index >= is->skip_frames) {
01510 is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
01511 return 1;
01512 }
01513
01514 }
01515 return 0;
01516 }
01517
01518 #if CONFIG_AVFILTER
01519 typedef struct {
01520 VideoState *is;
01521 AVFrame *frame;
01522 int use_dr1;
01523 } FilterPriv;
01524
01525 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
01526 {
01527 AVFilterContext *ctx = codec->opaque;
01528 AVFilterBufferRef *ref;
01529 int perms = AV_PERM_WRITE;
01530 int i, w, h, stride[4];
01531 unsigned edge;
01532 int pixel_size;
01533
01534 if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
01535 perms |= AV_PERM_NEG_LINESIZES;
01536
01537 if (pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
01538 if (pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
01539 if (pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
01540 if (pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
01541 }
01542 if (pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
01543
01544 w = codec->width;
01545 h = codec->height;
01546 avcodec_align_dimensions2(codec, &w, &h, stride);
01547 edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
01548 w += edge << 1;
01549 h += edge << 1;
01550
01551 if (!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
01552 return -1;
01553
01554 pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1 + 1;
01555 ref->video->w = codec->width;
01556 ref->video->h = codec->height;
01557 for (i = 0; i < 4; i ++) {
01558 unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
01559 unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
01560
01561 if (ref->data[i]) {
01562 ref->data[i] += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
01563 }
01564 pic->data[i] = ref->data[i];
01565 pic->linesize[i] = ref->linesize[i];
01566 }
01567 pic->opaque = ref;
01568 pic->type = FF_BUFFER_TYPE_USER;
01569 pic->reordered_opaque = codec->reordered_opaque;
01570 if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
01571 else pic->pkt_pts = AV_NOPTS_VALUE;
01572 return 0;
01573 }
01574
01575 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
01576 {
01577 memset(pic->data, 0, sizeof(pic->data));
01578 avfilter_unref_buffer(pic->opaque);
01579 }
01580
01581 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
01582 {
01583 AVFilterBufferRef *ref = pic->opaque;
01584
01585 if (pic->data[0] == NULL) {
01586 pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
01587 return codec->get_buffer(codec, pic);
01588 }
01589
01590 if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
01591 (codec->pix_fmt != ref->format)) {
01592 av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
01593 return -1;
01594 }
01595
01596 pic->reordered_opaque = codec->reordered_opaque;
01597 if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
01598 else pic->pkt_pts = AV_NOPTS_VALUE;
01599 return 0;
01600 }
01601
01602 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
01603 {
01604 FilterPriv *priv = ctx->priv;
01605 AVCodecContext *codec;
01606 if (!opaque) return -1;
01607
01608 priv->is = opaque;
01609 codec = priv->is->video_st->codec;
01610 codec->opaque = ctx;
01611 if (codec->codec->capabilities & CODEC_CAP_DR1) {
01612 priv->use_dr1 = 1;
01613 codec->get_buffer = input_get_buffer;
01614 codec->release_buffer = input_release_buffer;
01615 codec->reget_buffer = input_reget_buffer;
01616 codec->thread_safe_callbacks = 1;
01617 }
01618
01619 priv->frame = avcodec_alloc_frame();
01620
01621 return 0;
01622 }
01623
01624 static void input_uninit(AVFilterContext *ctx)
01625 {
01626 FilterPriv *priv = ctx->priv;
01627 av_free(priv->frame);
01628 }
01629
01630 static int input_request_frame(AVFilterLink *link)
01631 {
01632 FilterPriv *priv = link->src->priv;
01633 AVFilterBufferRef *picref;
01634 int64_t pts = 0;
01635 AVPacket pkt;
01636 int ret;
01637
01638 while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
01639 av_free_packet(&pkt);
01640 if (ret < 0)
01641 return -1;
01642
01643 if (priv->use_dr1) {
01644 picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
01645 } else {
01646 picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
01647 av_image_copy(picref->data, picref->linesize,
01648 priv->frame->data, priv->frame->linesize,
01649 picref->format, link->w, link->h);
01650 }
01651 av_free_packet(&pkt);
01652
01653 avfilter_copy_frame_props(picref, priv->frame);
01654 picref->pts = pts;
01655
01656 avfilter_start_frame(link, picref);
01657 avfilter_draw_slice(link, 0, link->h, 1);
01658 avfilter_end_frame(link);
01659
01660 return 0;
01661 }
01662
01663 static int input_query_formats(AVFilterContext *ctx)
01664 {
01665 FilterPriv *priv = ctx->priv;
01666 enum PixelFormat pix_fmts[] = {
01667 priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
01668 };
01669
01670 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
01671 return 0;
01672 }
01673
01674 static int input_config_props(AVFilterLink *link)
01675 {
01676 FilterPriv *priv = link->src->priv;
01677 AVCodecContext *c = priv->is->video_st->codec;
01678
01679 link->w = c->width;
01680 link->h = c->height;
01681 link->time_base = priv->is->video_st->time_base;
01682
01683 return 0;
01684 }
01685
01686 static AVFilter input_filter =
01687 {
01688 .name = "avplay_input",
01689
01690 .priv_size = sizeof(FilterPriv),
01691
01692 .init = input_init,
01693 .uninit = input_uninit,
01694
01695 .query_formats = input_query_formats,
01696
01697 .inputs = (AVFilterPad[]) {{ .name = NULL }},
01698 .outputs = (AVFilterPad[]) {{ .name = "default",
01699 .type = AVMEDIA_TYPE_VIDEO,
01700 .request_frame = input_request_frame,
01701 .config_props = input_config_props, },
01702 { .name = NULL }},
01703 };
01704
01705 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
01706 {
01707 char sws_flags_str[128];
01708 int ret;
01709 AVSinkContext avsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
01710 AVFilterContext *filt_src = NULL, *filt_out = NULL;
01711 snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
01712 graph->scale_sws_opts = av_strdup(sws_flags_str);
01713
01714 if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
01715 NULL, is, graph)) < 0)
01716 return ret;
01717 if ((ret = avfilter_graph_create_filter(&filt_out, &avsink, "out",
01718 NULL, &avsink_ctx, graph)) < 0)
01719 return ret;
01720
01721 if (vfilters) {
01722 AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
01723 AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
01724
01725 outputs->name = av_strdup("in");
01726 outputs->filter_ctx = filt_src;
01727 outputs->pad_idx = 0;
01728 outputs->next = NULL;
01729
01730 inputs->name = av_strdup("out");
01731 inputs->filter_ctx = filt_out;
01732 inputs->pad_idx = 0;
01733 inputs->next = NULL;
01734
01735 if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
01736 return ret;
01737 av_freep(&vfilters);
01738 } else {
01739 if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
01740 return ret;
01741 }
01742
01743 if ((ret = avfilter_graph_config(graph, NULL)) < 0)
01744 return ret;
01745
01746 is->out_video_filter = filt_out;
01747
01748 return ret;
01749 }
01750
01751 #endif
01752
01753 static int video_thread(void *arg)
01754 {
01755 VideoState *is = arg;
01756 AVFrame *frame = avcodec_alloc_frame();
01757 int64_t pts_int;
01758 double pts;
01759 int ret;
01760
01761 #if CONFIG_AVFILTER
01762 AVFilterGraph *graph = avfilter_graph_alloc();
01763 AVFilterContext *filt_out = NULL;
01764 int64_t pos;
01765 int last_w = is->video_st->codec->width;
01766 int last_h = is->video_st->codec->height;
01767
01768 if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
01769 goto the_end;
01770 filt_out = is->out_video_filter;
01771 #endif
01772
01773 for (;;) {
01774 #if !CONFIG_AVFILTER
01775 AVPacket pkt;
01776 #else
01777 AVFilterBufferRef *picref;
01778 AVRational tb;
01779 #endif
01780 while (is->paused && !is->videoq.abort_request)
01781 SDL_Delay(10);
01782 #if CONFIG_AVFILTER
01783 if ( last_w != is->video_st->codec->width
01784 || last_h != is->video_st->codec->height) {
01785 av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
01786 is->video_st->codec->width, is->video_st->codec->height);
01787 avfilter_graph_free(&graph);
01788 graph = avfilter_graph_alloc();
01789 if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
01790 goto the_end;
01791 filt_out = is->out_video_filter;
01792 last_w = is->video_st->codec->width;
01793 last_h = is->video_st->codec->height;
01794 }
01795 ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
01796 if (picref) {
01797 pts_int = picref->pts;
01798 pos = picref->pos;
01799 frame->opaque = picref;
01800 }
01801
01802 if (av_cmp_q(tb, is->video_st->time_base)) {
01803 av_unused int64_t pts1 = pts_int;
01804 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
01805 av_dlog(NULL, "video_thread(): "
01806 "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
01807 tb.num, tb.den, pts1,
01808 is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
01809 }
01810 #else
01811 ret = get_video_frame(is, frame, &pts_int, &pkt);
01812 #endif
01813
01814 if (ret < 0)
01815 goto the_end;
01816
01817 if (!ret)
01818 continue;
01819
01820 pts = pts_int * av_q2d(is->video_st->time_base);
01821
01822 #if CONFIG_AVFILTER
01823 ret = output_picture2(is, frame, pts, pos);
01824 #else
01825 ret = output_picture2(is, frame, pts, pkt.pos);
01826 av_free_packet(&pkt);
01827 #endif
01828 if (ret < 0)
01829 goto the_end;
01830
01831 if (step)
01832 if (cur_stream)
01833 stream_pause(cur_stream);
01834 }
01835 the_end:
01836 #if CONFIG_AVFILTER
01837 avfilter_graph_free(&graph);
01838 #endif
01839 av_free(frame);
01840 return 0;
01841 }
01842
01843 static int subtitle_thread(void *arg)
01844 {
01845 VideoState *is = arg;
01846 SubPicture *sp;
01847 AVPacket pkt1, *pkt = &pkt1;
01848 int got_subtitle;
01849 double pts;
01850 int i, j;
01851 int r, g, b, y, u, v, a;
01852
01853 for (;;) {
01854 while (is->paused && !is->subtitleq.abort_request) {
01855 SDL_Delay(10);
01856 }
01857 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
01858 break;
01859
01860 if (pkt->data == flush_pkt.data) {
01861 avcodec_flush_buffers(is->subtitle_st->codec);
01862 continue;
01863 }
01864 SDL_LockMutex(is->subpq_mutex);
01865 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
01866 !is->subtitleq.abort_request) {
01867 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
01868 }
01869 SDL_UnlockMutex(is->subpq_mutex);
01870
01871 if (is->subtitleq.abort_request)
01872 return 0;
01873
01874 sp = &is->subpq[is->subpq_windex];
01875
01876
01877
01878 pts = 0;
01879 if (pkt->pts != AV_NOPTS_VALUE)
01880 pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
01881
01882 avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
01883 &got_subtitle, pkt);
01884
01885 if (got_subtitle && sp->sub.format == 0) {
01886 sp->pts = pts;
01887
01888 for (i = 0; i < sp->sub.num_rects; i++)
01889 {
01890 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
01891 {
01892 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
01893 y = RGB_TO_Y_CCIR(r, g, b);
01894 u = RGB_TO_U_CCIR(r, g, b, 0);
01895 v = RGB_TO_V_CCIR(r, g, b, 0);
01896 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
01897 }
01898 }
01899
01900
01901 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
01902 is->subpq_windex = 0;
01903 SDL_LockMutex(is->subpq_mutex);
01904 is->subpq_size++;
01905 SDL_UnlockMutex(is->subpq_mutex);
01906 }
01907 av_free_packet(pkt);
01908 }
01909 return 0;
01910 }
01911
01912
01913 static void update_sample_display(VideoState *is, short *samples, int samples_size)
01914 {
01915 int size, len;
01916
01917 size = samples_size / sizeof(short);
01918 while (size > 0) {
01919 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
01920 if (len > size)
01921 len = size;
01922 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
01923 samples += len;
01924 is->sample_array_index += len;
01925 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
01926 is->sample_array_index = 0;
01927 size -= len;
01928 }
01929 }
01930
01931
01932
01933 static int synchronize_audio(VideoState *is, short *samples,
01934 int samples_size1, double pts)
01935 {
01936 int n, samples_size;
01937 double ref_clock;
01938
01939 n = 2 * is->audio_st->codec->channels;
01940 samples_size = samples_size1;
01941
01942
01943 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
01944 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
01945 double diff, avg_diff;
01946 int wanted_size, min_size, max_size, nb_samples;
01947
01948 ref_clock = get_master_clock(is);
01949 diff = get_audio_clock(is) - ref_clock;
01950
01951 if (diff < AV_NOSYNC_THRESHOLD) {
01952 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
01953 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
01954
01955 is->audio_diff_avg_count++;
01956 } else {
01957
01958 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
01959
01960 if (fabs(avg_diff) >= is->audio_diff_threshold) {
01961 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
01962 nb_samples = samples_size / n;
01963
01964 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
01965 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
01966 if (wanted_size < min_size)
01967 wanted_size = min_size;
01968 else if (wanted_size > max_size)
01969 wanted_size = max_size;
01970
01971
01972 if (wanted_size < samples_size) {
01973
01974 samples_size = wanted_size;
01975 } else if (wanted_size > samples_size) {
01976 uint8_t *samples_end, *q;
01977 int nb;
01978
01979
01980 nb = (samples_size - wanted_size);
01981 samples_end = (uint8_t *)samples + samples_size - n;
01982 q = samples_end + n;
01983 while (nb > 0) {
01984 memcpy(q, samples_end, n);
01985 q += n;
01986 nb -= n;
01987 }
01988 samples_size = wanted_size;
01989 }
01990 }
01991 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
01992 diff, avg_diff, samples_size - samples_size1,
01993 is->audio_clock, is->video_clock, is->audio_diff_threshold);
01994 }
01995 } else {
01996
01997
01998 is->audio_diff_avg_count = 0;
01999 is->audio_diff_cum = 0;
02000 }
02001 }
02002
02003 return samples_size;
02004 }
02005
02006
02007 static int audio_decode_frame(VideoState *is, double *pts_ptr)
02008 {
02009 AVPacket *pkt_temp = &is->audio_pkt_temp;
02010 AVPacket *pkt = &is->audio_pkt;
02011 AVCodecContext *dec = is->audio_st->codec;
02012 int n, len1, data_size, got_frame;
02013 double pts;
02014 int new_packet = 0;
02015 int flush_complete = 0;
02016
02017 for (;;) {
02018
02019 while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
02020 if (!is->frame) {
02021 if (!(is->frame = avcodec_alloc_frame()))
02022 return AVERROR(ENOMEM);
02023 } else
02024 avcodec_get_frame_defaults(is->frame);
02025
02026 if (flush_complete)
02027 break;
02028 new_packet = 0;
02029 len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
02030 if (len1 < 0) {
02031
02032 pkt_temp->size = 0;
02033 break;
02034 }
02035
02036 pkt_temp->data += len1;
02037 pkt_temp->size -= len1;
02038
02039 if (!got_frame) {
02040
02041 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
02042 flush_complete = 1;
02043 continue;
02044 }
02045 data_size = av_samples_get_buffer_size(NULL, dec->channels,
02046 is->frame->nb_samples,
02047 dec->sample_fmt, 1);
02048
02049 if (dec->sample_fmt != is->audio_src_fmt) {
02050 if (is->reformat_ctx)
02051 av_audio_convert_free(is->reformat_ctx);
02052 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
02053 dec->sample_fmt, 1, NULL, 0);
02054 if (!is->reformat_ctx) {
02055 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
02056 av_get_sample_fmt_name(dec->sample_fmt),
02057 av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
02058 break;
02059 }
02060 is->audio_src_fmt= dec->sample_fmt;
02061 }
02062
02063 if (is->reformat_ctx) {
02064 const void *ibuf[6] = { is->frame->data[0] };
02065 void *obuf[6];
02066 int istride[6] = { av_get_bytes_per_sample(dec->sample_fmt) };
02067 int ostride[6] = { 2 };
02068 int len= data_size/istride[0];
02069 obuf[0] = av_realloc(is->audio_buf1, FFALIGN(len * ostride[0], 32));
02070 if (!obuf[0]) {
02071 return AVERROR(ENOMEM);
02072 }
02073 is->audio_buf1 = obuf[0];
02074 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len) < 0) {
02075 printf("av_audio_convert() failed\n");
02076 break;
02077 }
02078 is->audio_buf = is->audio_buf1;
02079
02080
02081 data_size = len * 2;
02082 } else {
02083 is->audio_buf = is->frame->data[0];
02084 }
02085
02086
02087 pts = is->audio_clock;
02088 *pts_ptr = pts;
02089 n = 2 * dec->channels;
02090 is->audio_clock += (double)data_size /
02091 (double)(n * dec->sample_rate);
02092 #ifdef DEBUG
02093 {
02094 static double last_clock;
02095 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
02096 is->audio_clock - last_clock,
02097 is->audio_clock, pts);
02098 last_clock = is->audio_clock;
02099 }
02100 #endif
02101 return data_size;
02102 }
02103
02104
02105 if (pkt->data)
02106 av_free_packet(pkt);
02107 memset(pkt_temp, 0, sizeof(*pkt_temp));
02108
02109 if (is->paused || is->audioq.abort_request) {
02110 return -1;
02111 }
02112
02113
02114 if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
02115 return -1;
02116
02117 if (pkt->data == flush_pkt.data)
02118 avcodec_flush_buffers(dec);
02119
02120 *pkt_temp = *pkt;
02121
02122
02123 if (pkt->pts != AV_NOPTS_VALUE) {
02124 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
02125 }
02126 }
02127 }
02128
02129
02130 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
02131 {
02132 VideoState *is = opaque;
02133 int audio_size, len1;
02134 double pts;
02135
02136 audio_callback_time = av_gettime();
02137
02138 while (len > 0) {
02139 if (is->audio_buf_index >= is->audio_buf_size) {
02140 audio_size = audio_decode_frame(is, &pts);
02141 if (audio_size < 0) {
02142
02143 is->audio_buf = is->silence_buf;
02144 is->audio_buf_size = sizeof(is->silence_buf);
02145 } else {
02146 if (is->show_audio)
02147 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
02148 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
02149 pts);
02150 is->audio_buf_size = audio_size;
02151 }
02152 is->audio_buf_index = 0;
02153 }
02154 len1 = is->audio_buf_size - is->audio_buf_index;
02155 if (len1 > len)
02156 len1 = len;
02157 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
02158 len -= len1;
02159 stream += len1;
02160 is->audio_buf_index += len1;
02161 }
02162 }
02163
02164
02165 static int stream_component_open(VideoState *is, int stream_index)
02166 {
02167 AVFormatContext *ic = is->ic;
02168 AVCodecContext *avctx;
02169 AVCodec *codec;
02170 SDL_AudioSpec wanted_spec, spec;
02171 AVDictionary *opts;
02172 AVDictionaryEntry *t = NULL;
02173
02174 if (stream_index < 0 || stream_index >= ic->nb_streams)
02175 return -1;
02176 avctx = ic->streams[stream_index]->codec;
02177
02178 opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
02179
02180 codec = avcodec_find_decoder(avctx->codec_id);
02181 avctx->debug_mv = debug_mv;
02182 avctx->debug = debug;
02183 avctx->workaround_bugs = workaround_bugs;
02184 avctx->lowres = lowres;
02185 avctx->idct_algo = idct;
02186 avctx->skip_frame = skip_frame;
02187 avctx->skip_idct = skip_idct;
02188 avctx->skip_loop_filter = skip_loop_filter;
02189 avctx->error_recognition = error_recognition;
02190 avctx->error_concealment = error_concealment;
02191
02192 if (lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
02193 if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
02194
02195 if (!av_dict_get(opts, "threads", NULL, 0))
02196 av_dict_set(&opts, "threads", "auto", 0);
02197 if (!codec ||
02198 avcodec_open2(avctx, codec, &opts) < 0)
02199 return -1;
02200 if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
02201 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
02202 return AVERROR_OPTION_NOT_FOUND;
02203 }
02204
02205
02206 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
02207 wanted_spec.freq = avctx->sample_rate;
02208 wanted_spec.format = AUDIO_S16SYS;
02209 wanted_spec.channels = avctx->channels;
02210 wanted_spec.silence = 0;
02211 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
02212 wanted_spec.callback = sdl_audio_callback;
02213 wanted_spec.userdata = is;
02214 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
02215 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
02216 return -1;
02217 }
02218 is->audio_hw_buf_size = spec.size;
02219 is->audio_src_fmt = AV_SAMPLE_FMT_S16;
02220 }
02221
02222 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
02223 switch (avctx->codec_type) {
02224 case AVMEDIA_TYPE_AUDIO:
02225 is->audio_stream = stream_index;
02226 is->audio_st = ic->streams[stream_index];
02227 is->audio_buf_size = 0;
02228 is->audio_buf_index = 0;
02229
02230
02231 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
02232 is->audio_diff_avg_count = 0;
02233
02234
02235 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
02236
02237 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
02238 packet_queue_init(&is->audioq);
02239 SDL_PauseAudio(0);
02240 break;
02241 case AVMEDIA_TYPE_VIDEO:
02242 is->video_stream = stream_index;
02243 is->video_st = ic->streams[stream_index];
02244
02245 packet_queue_init(&is->videoq);
02246 is->video_tid = SDL_CreateThread(video_thread, is);
02247 break;
02248 case AVMEDIA_TYPE_SUBTITLE:
02249 is->subtitle_stream = stream_index;
02250 is->subtitle_st = ic->streams[stream_index];
02251 packet_queue_init(&is->subtitleq);
02252
02253 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
02254 break;
02255 default:
02256 break;
02257 }
02258 return 0;
02259 }
02260
02261 static void stream_component_close(VideoState *is, int stream_index)
02262 {
02263 AVFormatContext *ic = is->ic;
02264 AVCodecContext *avctx;
02265
02266 if (stream_index < 0 || stream_index >= ic->nb_streams)
02267 return;
02268 avctx = ic->streams[stream_index]->codec;
02269
02270 switch (avctx->codec_type) {
02271 case AVMEDIA_TYPE_AUDIO:
02272 packet_queue_abort(&is->audioq);
02273
02274 SDL_CloseAudio();
02275
02276 packet_queue_end(&is->audioq);
02277 av_free_packet(&is->audio_pkt);
02278 if (is->reformat_ctx)
02279 av_audio_convert_free(is->reformat_ctx);
02280 is->reformat_ctx = NULL;
02281 av_freep(&is->audio_buf1);
02282 is->audio_buf = NULL;
02283 av_freep(&is->frame);
02284
02285 if (is->rdft) {
02286 av_rdft_end(is->rdft);
02287 av_freep(&is->rdft_data);
02288 is->rdft = NULL;
02289 is->rdft_bits = 0;
02290 }
02291 break;
02292 case AVMEDIA_TYPE_VIDEO:
02293 packet_queue_abort(&is->videoq);
02294
02295
02296
02297 SDL_LockMutex(is->pictq_mutex);
02298 SDL_CondSignal(is->pictq_cond);
02299 SDL_UnlockMutex(is->pictq_mutex);
02300
02301 SDL_WaitThread(is->video_tid, NULL);
02302
02303 packet_queue_end(&is->videoq);
02304 break;
02305 case AVMEDIA_TYPE_SUBTITLE:
02306 packet_queue_abort(&is->subtitleq);
02307
02308
02309
02310 SDL_LockMutex(is->subpq_mutex);
02311 is->subtitle_stream_changed = 1;
02312
02313 SDL_CondSignal(is->subpq_cond);
02314 SDL_UnlockMutex(is->subpq_mutex);
02315
02316 SDL_WaitThread(is->subtitle_tid, NULL);
02317
02318 packet_queue_end(&is->subtitleq);
02319 break;
02320 default:
02321 break;
02322 }
02323
02324 ic->streams[stream_index]->discard = AVDISCARD_ALL;
02325 avcodec_close(avctx);
02326 switch (avctx->codec_type) {
02327 case AVMEDIA_TYPE_AUDIO:
02328 is->audio_st = NULL;
02329 is->audio_stream = -1;
02330 break;
02331 case AVMEDIA_TYPE_VIDEO:
02332 is->video_st = NULL;
02333 is->video_stream = -1;
02334 break;
02335 case AVMEDIA_TYPE_SUBTITLE:
02336 is->subtitle_st = NULL;
02337 is->subtitle_stream = -1;
02338 break;
02339 default:
02340 break;
02341 }
02342 }
02343
02344
02345
02346 static VideoState *global_video_state;
02347
02348 static int decode_interrupt_cb(void *ctx)
02349 {
02350 return global_video_state && global_video_state->abort_request;
02351 }
02352
02353
02354 static int decode_thread(void *arg)
02355 {
02356 VideoState *is = arg;
02357 AVFormatContext *ic = NULL;
02358 int err, i, ret;
02359 int st_index[AVMEDIA_TYPE_NB];
02360 AVPacket pkt1, *pkt = &pkt1;
02361 int eof = 0;
02362 int pkt_in_play_range = 0;
02363 AVDictionaryEntry *t;
02364 AVDictionary **opts;
02365 int orig_nb_streams;
02366
02367 memset(st_index, -1, sizeof(st_index));
02368 is->video_stream = -1;
02369 is->audio_stream = -1;
02370 is->subtitle_stream = -1;
02371
02372 global_video_state = is;
02373
02374 ic = avformat_alloc_context();
02375 ic->interrupt_callback.callback = decode_interrupt_cb;
02376 err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
02377 if (err < 0) {
02378 print_error(is->filename, err);
02379 ret = -1;
02380 goto fail;
02381 }
02382 if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
02383 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
02384 ret = AVERROR_OPTION_NOT_FOUND;
02385 goto fail;
02386 }
02387 is->ic = ic;
02388
02389 if (genpts)
02390 ic->flags |= AVFMT_FLAG_GENPTS;
02391
02392 opts = setup_find_stream_info_opts(ic, codec_opts);
02393 orig_nb_streams = ic->nb_streams;
02394
02395 err = avformat_find_stream_info(ic, opts);
02396 if (err < 0) {
02397 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
02398 ret = -1;
02399 goto fail;
02400 }
02401 for (i = 0; i < orig_nb_streams; i++)
02402 av_dict_free(&opts[i]);
02403 av_freep(&opts);
02404
02405 if (ic->pb)
02406 ic->pb->eof_reached = 0;
02407
02408 if (seek_by_bytes < 0)
02409 seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
02410
02411
02412 if (start_time != AV_NOPTS_VALUE) {
02413 int64_t timestamp;
02414
02415 timestamp = start_time;
02416
02417 if (ic->start_time != AV_NOPTS_VALUE)
02418 timestamp += ic->start_time;
02419 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
02420 if (ret < 0) {
02421 fprintf(stderr, "%s: could not seek to position %0.3f\n",
02422 is->filename, (double)timestamp / AV_TIME_BASE);
02423 }
02424 }
02425
02426 for (i = 0; i < ic->nb_streams; i++)
02427 ic->streams[i]->discard = AVDISCARD_ALL;
02428 if (!video_disable)
02429 st_index[AVMEDIA_TYPE_VIDEO] =
02430 av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
02431 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
02432 if (!audio_disable)
02433 st_index[AVMEDIA_TYPE_AUDIO] =
02434 av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
02435 wanted_stream[AVMEDIA_TYPE_AUDIO],
02436 st_index[AVMEDIA_TYPE_VIDEO],
02437 NULL, 0);
02438 if (!video_disable)
02439 st_index[AVMEDIA_TYPE_SUBTITLE] =
02440 av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
02441 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
02442 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
02443 st_index[AVMEDIA_TYPE_AUDIO] :
02444 st_index[AVMEDIA_TYPE_VIDEO]),
02445 NULL, 0);
02446 if (show_status) {
02447 av_dump_format(ic, 0, is->filename, 0);
02448 }
02449
02450
02451 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
02452 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
02453 }
02454
02455 ret = -1;
02456 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
02457 ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
02458 }
02459 is->refresh_tid = SDL_CreateThread(refresh_thread, is);
02460 if (ret < 0) {
02461 if (!display_disable)
02462 is->show_audio = 2;
02463 }
02464
02465 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
02466 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
02467 }
02468
02469 if (is->video_stream < 0 && is->audio_stream < 0) {
02470 fprintf(stderr, "%s: could not open codecs\n", is->filename);
02471 ret = -1;
02472 goto fail;
02473 }
02474
02475 for (;;) {
02476 if (is->abort_request)
02477 break;
02478 if (is->paused != is->last_paused) {
02479 is->last_paused = is->paused;
02480 if (is->paused)
02481 is->read_pause_return = av_read_pause(ic);
02482 else
02483 av_read_play(ic);
02484 }
02485 #if CONFIG_RTSP_DEMUXER
02486 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
02487
02488
02489 SDL_Delay(10);
02490 continue;
02491 }
02492 #endif
02493 if (is->seek_req) {
02494 int64_t seek_target = is->seek_pos;
02495 int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
02496 int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
02497
02498
02499
02500 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
02501 if (ret < 0) {
02502 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
02503 } else {
02504 if (is->audio_stream >= 0) {
02505 packet_queue_flush(&is->audioq);
02506 packet_queue_put(&is->audioq, &flush_pkt);
02507 }
02508 if (is->subtitle_stream >= 0) {
02509 packet_queue_flush(&is->subtitleq);
02510 packet_queue_put(&is->subtitleq, &flush_pkt);
02511 }
02512 if (is->video_stream >= 0) {
02513 packet_queue_flush(&is->videoq);
02514 packet_queue_put(&is->videoq, &flush_pkt);
02515 }
02516 }
02517 is->seek_req = 0;
02518 eof = 0;
02519 }
02520
02521
02522 if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
02523 || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
02524 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0)
02525 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0))) {
02526
02527 SDL_Delay(10);
02528 continue;
02529 }
02530 if (eof) {
02531 if (is->video_stream >= 0) {
02532 av_init_packet(pkt);
02533 pkt->data = NULL;
02534 pkt->size = 0;
02535 pkt->stream_index = is->video_stream;
02536 packet_queue_put(&is->videoq, pkt);
02537 }
02538 if (is->audio_stream >= 0 &&
02539 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
02540 av_init_packet(pkt);
02541 pkt->data = NULL;
02542 pkt->size = 0;
02543 pkt->stream_index = is->audio_stream;
02544 packet_queue_put(&is->audioq, pkt);
02545 }
02546 SDL_Delay(10);
02547 if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
02548 if (loop != 1 && (!loop || --loop)) {
02549 stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
02550 } else if (autoexit) {
02551 ret = AVERROR_EOF;
02552 goto fail;
02553 }
02554 }
02555 continue;
02556 }
02557 ret = av_read_frame(ic, pkt);
02558 if (ret < 0) {
02559 if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
02560 eof = 1;
02561 if (ic->pb && ic->pb->error)
02562 break;
02563 SDL_Delay(100);
02564 continue;
02565 }
02566
02567 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
02568 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
02569 av_q2d(ic->streams[pkt->stream_index]->time_base) -
02570 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
02571 <= ((double)duration / 1000000);
02572 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
02573 packet_queue_put(&is->audioq, pkt);
02574 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
02575 packet_queue_put(&is->videoq, pkt);
02576 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
02577 packet_queue_put(&is->subtitleq, pkt);
02578 } else {
02579 av_free_packet(pkt);
02580 }
02581 }
02582
02583 while (!is->abort_request) {
02584 SDL_Delay(100);
02585 }
02586
02587 ret = 0;
02588 fail:
02589
02590 global_video_state = NULL;
02591
02592
02593 if (is->audio_stream >= 0)
02594 stream_component_close(is, is->audio_stream);
02595 if (is->video_stream >= 0)
02596 stream_component_close(is, is->video_stream);
02597 if (is->subtitle_stream >= 0)
02598 stream_component_close(is, is->subtitle_stream);
02599 if (is->ic) {
02600 avformat_close_input(&is->ic);
02601 }
02602
02603 if (ret != 0) {
02604 SDL_Event event;
02605
02606 event.type = FF_QUIT_EVENT;
02607 event.user.data1 = is;
02608 SDL_PushEvent(&event);
02609 }
02610 return 0;
02611 }
02612
02613 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
02614 {
02615 VideoState *is;
02616
02617 is = av_mallocz(sizeof(VideoState));
02618 if (!is)
02619 return NULL;
02620 av_strlcpy(is->filename, filename, sizeof(is->filename));
02621 is->iformat = iformat;
02622 is->ytop = 0;
02623 is->xleft = 0;
02624
02625
02626 is->pictq_mutex = SDL_CreateMutex();
02627 is->pictq_cond = SDL_CreateCond();
02628
02629 is->subpq_mutex = SDL_CreateMutex();
02630 is->subpq_cond = SDL_CreateCond();
02631
02632 is->av_sync_type = av_sync_type;
02633 is->parse_tid = SDL_CreateThread(decode_thread, is);
02634 if (!is->parse_tid) {
02635 av_free(is);
02636 return NULL;
02637 }
02638 return is;
02639 }
02640
02641 static void stream_cycle_channel(VideoState *is, int codec_type)
02642 {
02643 AVFormatContext *ic = is->ic;
02644 int start_index, stream_index;
02645 AVStream *st;
02646
02647 if (codec_type == AVMEDIA_TYPE_VIDEO)
02648 start_index = is->video_stream;
02649 else if (codec_type == AVMEDIA_TYPE_AUDIO)
02650 start_index = is->audio_stream;
02651 else
02652 start_index = is->subtitle_stream;
02653 if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
02654 return;
02655 stream_index = start_index;
02656 for (;;) {
02657 if (++stream_index >= is->ic->nb_streams)
02658 {
02659 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
02660 {
02661 stream_index = -1;
02662 goto the_end;
02663 } else
02664 stream_index = 0;
02665 }
02666 if (stream_index == start_index)
02667 return;
02668 st = ic->streams[stream_index];
02669 if (st->codec->codec_type == codec_type) {
02670
02671 switch (codec_type) {
02672 case AVMEDIA_TYPE_AUDIO:
02673 if (st->codec->sample_rate != 0 &&
02674 st->codec->channels != 0)
02675 goto the_end;
02676 break;
02677 case AVMEDIA_TYPE_VIDEO:
02678 case AVMEDIA_TYPE_SUBTITLE:
02679 goto the_end;
02680 default:
02681 break;
02682 }
02683 }
02684 }
02685 the_end:
02686 stream_component_close(is, start_index);
02687 stream_component_open(is, stream_index);
02688 }
02689
02690
02691 static void toggle_full_screen(void)
02692 {
02693 is_full_screen = !is_full_screen;
02694 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
02695
02696 for (int i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
02697 cur_stream->pictq[i].reallocate = 1;
02698 }
02699 #endif
02700 video_open(cur_stream);
02701 }
02702
02703 static void toggle_pause(void)
02704 {
02705 if (cur_stream)
02706 stream_pause(cur_stream);
02707 step = 0;
02708 }
02709
02710 static void step_to_next_frame(void)
02711 {
02712 if (cur_stream) {
02713
02714 if (cur_stream->paused)
02715 stream_pause(cur_stream);
02716 }
02717 step = 1;
02718 }
02719
02720 static void toggle_audio_display(void)
02721 {
02722 if (cur_stream) {
02723 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
02724 cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
02725 fill_rectangle(screen,
02726 cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
02727 bgcolor);
02728 SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
02729 }
02730 }
02731
02732
02733 static void event_loop(void)
02734 {
02735 SDL_Event event;
02736 double incr, pos, frac;
02737
02738 for (;;) {
02739 double x;
02740 SDL_WaitEvent(&event);
02741 switch (event.type) {
02742 case SDL_KEYDOWN:
02743 if (exit_on_keydown) {
02744 do_exit();
02745 break;
02746 }
02747 switch (event.key.keysym.sym) {
02748 case SDLK_ESCAPE:
02749 case SDLK_q:
02750 do_exit();
02751 break;
02752 case SDLK_f:
02753 toggle_full_screen();
02754 break;
02755 case SDLK_p:
02756 case SDLK_SPACE:
02757 toggle_pause();
02758 break;
02759 case SDLK_s:
02760 step_to_next_frame();
02761 break;
02762 case SDLK_a:
02763 if (cur_stream)
02764 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
02765 break;
02766 case SDLK_v:
02767 if (cur_stream)
02768 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
02769 break;
02770 case SDLK_t:
02771 if (cur_stream)
02772 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
02773 break;
02774 case SDLK_w:
02775 toggle_audio_display();
02776 break;
02777 case SDLK_LEFT:
02778 incr = -10.0;
02779 goto do_seek;
02780 case SDLK_RIGHT:
02781 incr = 10.0;
02782 goto do_seek;
02783 case SDLK_UP:
02784 incr = 60.0;
02785 goto do_seek;
02786 case SDLK_DOWN:
02787 incr = -60.0;
02788 do_seek:
02789 if (cur_stream) {
02790 if (seek_by_bytes) {
02791 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
02792 pos = cur_stream->video_current_pos;
02793 } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
02794 pos = cur_stream->audio_pkt.pos;
02795 } else
02796 pos = avio_tell(cur_stream->ic->pb);
02797 if (cur_stream->ic->bit_rate)
02798 incr *= cur_stream->ic->bit_rate / 8.0;
02799 else
02800 incr *= 180000.0;
02801 pos += incr;
02802 stream_seek(cur_stream, pos, incr, 1);
02803 } else {
02804 pos = get_master_clock(cur_stream);
02805 pos += incr;
02806 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
02807 }
02808 }
02809 break;
02810 default:
02811 break;
02812 }
02813 break;
02814 case SDL_MOUSEBUTTONDOWN:
02815 if (exit_on_mousedown) {
02816 do_exit();
02817 break;
02818 }
02819 case SDL_MOUSEMOTION:
02820 if (event.type == SDL_MOUSEBUTTONDOWN) {
02821 x = event.button.x;
02822 } else {
02823 if (event.motion.state != SDL_PRESSED)
02824 break;
02825 x = event.motion.x;
02826 }
02827 if (cur_stream) {
02828 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
02829 uint64_t size = avio_size(cur_stream->ic->pb);
02830 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
02831 } else {
02832 int64_t ts;
02833 int ns, hh, mm, ss;
02834 int tns, thh, tmm, tss;
02835 tns = cur_stream->ic->duration / 1000000LL;
02836 thh = tns / 3600;
02837 tmm = (tns % 3600) / 60;
02838 tss = (tns % 60);
02839 frac = x / cur_stream->width;
02840 ns = frac * tns;
02841 hh = ns / 3600;
02842 mm = (ns % 3600) / 60;
02843 ss = (ns % 60);
02844 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
02845 hh, mm, ss, thh, tmm, tss);
02846 ts = frac * cur_stream->ic->duration;
02847 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
02848 ts += cur_stream->ic->start_time;
02849 stream_seek(cur_stream, ts, 0, 0);
02850 }
02851 }
02852 break;
02853 case SDL_VIDEORESIZE:
02854 if (cur_stream) {
02855 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
02856 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
02857 screen_width = cur_stream->width = event.resize.w;
02858 screen_height = cur_stream->height = event.resize.h;
02859 }
02860 break;
02861 case SDL_QUIT:
02862 case FF_QUIT_EVENT:
02863 do_exit();
02864 break;
02865 case FF_ALLOC_EVENT:
02866 video_open(event.user.data1);
02867 alloc_picture(event.user.data1);
02868 break;
02869 case FF_REFRESH_EVENT:
02870 video_refresh_timer(event.user.data1);
02871 cur_stream->refresh = 0;
02872 break;
02873 default:
02874 break;
02875 }
02876 }
02877 }
02878
02879 static int opt_frame_size(const char *opt, const char *arg)
02880 {
02881 av_log(NULL, AV_LOG_ERROR,
02882 "Option '%s' has been removed, use private format options instead\n", opt);
02883 return AVERROR(EINVAL);
02884 }
02885
02886 static int opt_width(const char *opt, const char *arg)
02887 {
02888 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
02889 return 0;
02890 }
02891
02892 static int opt_height(const char *opt, const char *arg)
02893 {
02894 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
02895 return 0;
02896 }
02897
02898 static int opt_format(const char *opt, const char *arg)
02899 {
02900 file_iformat = av_find_input_format(arg);
02901 if (!file_iformat) {
02902 fprintf(stderr, "Unknown input format: %s\n", arg);
02903 return AVERROR(EINVAL);
02904 }
02905 return 0;
02906 }
02907
02908 static int opt_frame_pix_fmt(const char *opt, const char *arg)
02909 {
02910 av_log(NULL, AV_LOG_ERROR,
02911 "Option '%s' has been removed, use private format options instead\n", opt);
02912 return AVERROR(EINVAL);
02913 }
02914
02915 static int opt_sync(const char *opt, const char *arg)
02916 {
02917 if (!strcmp(arg, "audio"))
02918 av_sync_type = AV_SYNC_AUDIO_MASTER;
02919 else if (!strcmp(arg, "video"))
02920 av_sync_type = AV_SYNC_VIDEO_MASTER;
02921 else if (!strcmp(arg, "ext"))
02922 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
02923 else {
02924 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
02925 exit(1);
02926 }
02927 return 0;
02928 }
02929
02930 static int opt_seek(const char *opt, const char *arg)
02931 {
02932 start_time = parse_time_or_die(opt, arg, 1);
02933 return 0;
02934 }
02935
02936 static int opt_duration(const char *opt, const char *arg)
02937 {
02938 duration = parse_time_or_die(opt, arg, 1);
02939 return 0;
02940 }
02941
02942 static int opt_debug(const char *opt, const char *arg)
02943 {
02944 av_log_set_level(99);
02945 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
02946 return 0;
02947 }
02948
02949 static int opt_vismv(const char *opt, const char *arg)
02950 {
02951 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
02952 return 0;
02953 }
02954
02955 static const OptionDef options[] = {
02956 #include "cmdutils_common_opts.h"
02957 { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
02958 { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
02959 { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
02960 { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
02961 { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
02962 { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
02963 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
02964 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
02965 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
02966 { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
02967 { "t", HAS_ARG, { (void*)&opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
02968 { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
02969 { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
02970 { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
02971 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
02972 { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
02973 { "debug", HAS_ARG | OPT_EXPERT, { (void*)opt_debug }, "print specific debug info", "" },
02974 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
02975 { "vismv", HAS_ARG | OPT_EXPERT, { (void*)opt_vismv }, "visualize motion vectors", "" },
02976 { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
02977 { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
02978 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
02979 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&lowres }, "", "" },
02980 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
02981 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
02982 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
02983 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo", "algo" },
02984 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_recognition }, "set error detection threshold (0-4)", "threshold" },
02985 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options", "bit_mask" },
02986 { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
02987 { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
02988 { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
02989 { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
02990 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
02991 { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
02992 { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
02993 #if CONFIG_AVFILTER
02994 { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
02995 #endif
02996 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
02997 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
02998 { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
02999 { NULL, },
03000 };
03001
03002 static void show_usage(void)
03003 {
03004 printf("Simple media player\n");
03005 printf("usage: %s [options] input_file\n", program_name);
03006 printf("\n");
03007 }
03008
03009 static void show_help(void)
03010 {
03011 av_log_set_callback(log_callback_help);
03012 show_usage();
03013 show_help_options(options, "Main options:\n",
03014 OPT_EXPERT, 0);
03015 show_help_options(options, "\nAdvanced options:\n",
03016 OPT_EXPERT, OPT_EXPERT);
03017 printf("\n");
03018 show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
03019 show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
03020 #if !CONFIG_AVFILTER
03021 show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
03022 #endif
03023 printf("\nWhile playing:\n"
03024 "q, ESC quit\n"
03025 "f toggle full screen\n"
03026 "p, SPC pause\n"
03027 "a cycle audio channel\n"
03028 "v cycle video channel\n"
03029 "t cycle subtitle channel\n"
03030 "w show audio waves\n"
03031 "s activate frame-step mode\n"
03032 "left/right seek backward/forward 10 seconds\n"
03033 "down/up seek backward/forward 1 minute\n"
03034 "mouse click seek to percentage in file corresponding to fraction of width\n"
03035 );
03036 }
03037
03038 static void opt_input_file(void *optctx, const char *filename)
03039 {
03040 if (input_filename) {
03041 fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
03042 filename, input_filename);
03043 exit(1);
03044 }
03045 if (!strcmp(filename, "-"))
03046 filename = "pipe:";
03047 input_filename = filename;
03048 }
03049
03050
03051 int main(int argc, char **argv)
03052 {
03053 int flags;
03054
03055 av_log_set_flags(AV_LOG_SKIP_REPEATED);
03056 parse_loglevel(argc, argv, options);
03057
03058
03059 avcodec_register_all();
03060 #if CONFIG_AVDEVICE
03061 avdevice_register_all();
03062 #endif
03063 #if CONFIG_AVFILTER
03064 avfilter_register_all();
03065 #endif
03066 av_register_all();
03067 avformat_network_init();
03068
03069 init_opts();
03070
03071 show_banner();
03072
03073 parse_options(NULL, argc, argv, options, opt_input_file);
03074
03075 if (!input_filename) {
03076 show_usage();
03077 fprintf(stderr, "An input file must be specified\n");
03078 fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
03079 exit(1);
03080 }
03081
03082 if (display_disable) {
03083 video_disable = 1;
03084 }
03085 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
03086 #if !defined(__MINGW32__) && !defined(__APPLE__)
03087 flags |= SDL_INIT_EVENTTHREAD;
03088 #endif
03089 if (SDL_Init (flags)) {
03090 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
03091 exit(1);
03092 }
03093
03094 if (!display_disable) {
03095 #if HAVE_SDL_VIDEO_SIZE
03096 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
03097 fs_screen_width = vi->current_w;
03098 fs_screen_height = vi->current_h;
03099 #endif
03100 }
03101
03102 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
03103 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
03104 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
03105
03106 av_init_packet(&flush_pkt);
03107 flush_pkt.data = "FLUSH";
03108
03109 cur_stream = stream_open(input_filename, file_iformat);
03110
03111 event_loop();
03112
03113
03114
03115 return 0;
03116 }