FFmpeg  4.4.5
mpeg12dec.c
Go to the documentation of this file.
1 /*
2  * MPEG-1/2 decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2013 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * MPEG-1/2 decoder
26  */
27 
28 #define UNCHECKED_BITSTREAM_READER 1
29 #include <inttypes.h>
30 
31 #include "libavutil/attributes.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/mem_internal.h"
35 #include "libavutil/stereo3d.h"
37 
38 #include "avcodec.h"
39 #include "bytestream.h"
40 #include "error_resilience.h"
41 #include "hwconfig.h"
42 #include "idctdsp.h"
43 #include "internal.h"
44 #include "mpeg_er.h"
45 #include "mpeg12.h"
46 #include "mpeg12data.h"
47 #include "mpegutils.h"
48 #include "mpegvideo.h"
49 #include "mpegvideodata.h"
50 #include "profiles.h"
51 #include "thread.h"
52 #include "version.h"
53 #include "xvmc_internal.h"
54 
55 #define A53_MAX_CC_COUNT 2000
56 
57 typedef struct Mpeg1Context {
59  int mpeg_enc_ctx_allocated; /* true if decoding context allocated */
60  int repeat_field; /* true if we must repeat the field */
61  AVPanScan pan_scan; /* some temporary storage for the panscan */
66  int has_afd;
71  AVRational frame_rate_ext; /* MPEG-2 specific framerate modificator */
72  int sync; /* Did we reach a sync point like a GOP/SEQ/KEYFrame? */
73  int tmpgexs;
76 } Mpeg1Context;
77 
78 #define MB_TYPE_ZERO_MV 0x20000000
79 
80 static const uint32_t ptype2mb_type[7] = {
83  MB_TYPE_L0,
88 };
89 
90 static const uint32_t btype2mb_type[11] = {
92  MB_TYPE_L1,
94  MB_TYPE_L0,
102 };
103 
104 /* as H.263, but only 17 codes */
105 static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
106 {
107  int code, sign, val, shift;
108 
109  code = get_vlc2(&s->gb, ff_mv_vlc.table, MV_VLC_BITS, 2);
110  if (code == 0)
111  return pred;
112  if (code < 0)
113  return 0xffff;
114 
115  sign = get_bits1(&s->gb);
116  shift = fcode - 1;
117  val = code;
118  if (shift) {
119  val = (val - 1) << shift;
120  val |= get_bits(&s->gb, shift);
121  val++;
122  }
123  if (sign)
124  val = -val;
125  val += pred;
126 
127  /* modulo decoding */
128  return sign_extend(val, 5 + shift);
129 }
130 
131 #define MAX_INDEX (64 - 1)
132 #define check_scantable_index(ctx, x) \
133  do { \
134  if ((x) > MAX_INDEX) { \
135  av_log(ctx->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", \
136  ctx->mb_x, ctx->mb_y); \
137  return AVERROR_INVALIDDATA; \
138  } \
139  } while (0)
140 
142  int16_t *block, int n)
143 {
144  int level, i, j, run;
145  RLTable *rl = &ff_rl_mpeg1;
146  uint8_t *const scantable = s->intra_scantable.permutated;
147  const uint16_t *quant_matrix = s->inter_matrix;
148  const int qscale = s->qscale;
149 
150  {
151  OPEN_READER(re, &s->gb);
152  i = -1;
153  // special case for first coefficient, no need to add second VLC table
154  UPDATE_CACHE(re, &s->gb);
155  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
156  level = (3 * qscale * quant_matrix[0]) >> 5;
157  level = (level - 1) | 1;
158  if (GET_CACHE(re, &s->gb) & 0x40000000)
159  level = -level;
160  block[0] = level;
161  i++;
162  SKIP_BITS(re, &s->gb, 2);
163  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
164  goto end;
165  }
166  /* now quantify & encode AC coefficients */
167  for (;;) {
168  GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0],
169  TEX_VLC_BITS, 2, 0);
170 
171  if (level != 0) {
172  i += run;
173  if (i > MAX_INDEX)
174  break;
175  j = scantable[i];
176  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
177  level = (level - 1) | 1;
178  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
179  SHOW_SBITS(re, &s->gb, 1);
180  SKIP_BITS(re, &s->gb, 1);
181  } else {
182  /* escape */
183  run = SHOW_UBITS(re, &s->gb, 6) + 1;
184  LAST_SKIP_BITS(re, &s->gb, 6);
185  UPDATE_CACHE(re, &s->gb);
186  level = SHOW_SBITS(re, &s->gb, 8);
187  SKIP_BITS(re, &s->gb, 8);
188  if (level == -128) {
189  level = SHOW_UBITS(re, &s->gb, 8) - 256;
190  SKIP_BITS(re, &s->gb, 8);
191  } else if (level == 0) {
192  level = SHOW_UBITS(re, &s->gb, 8);
193  SKIP_BITS(re, &s->gb, 8);
194  }
195  i += run;
196  if (i > MAX_INDEX)
197  break;
198  j = scantable[i];
199  if (level < 0) {
200  level = -level;
201  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
202  level = (level - 1) | 1;
203  level = -level;
204  } else {
205  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
206  level = (level - 1) | 1;
207  }
208  }
209 
210  block[j] = level;
211  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
212  break;
213  UPDATE_CACHE(re, &s->gb);
214  }
215 end:
216  LAST_SKIP_BITS(re, &s->gb, 2);
217  CLOSE_READER(re, &s->gb);
218  }
219 
221 
222  s->block_last_index[n] = i;
223  return 0;
224 }
225 
226 /**
227  * Changing this would eat up any speed benefits it has.
228  * Do not use "fast" flag if you need the code to be robust.
229  */
231  int16_t *block, int n)
232 {
233  int level, i, j, run;
234  RLTable *rl = &ff_rl_mpeg1;
235  uint8_t *const scantable = s->intra_scantable.permutated;
236  const int qscale = s->qscale;
237 
238  {
239  OPEN_READER(re, &s->gb);
240  i = -1;
241  // Special case for first coefficient, no need to add second VLC table.
242  UPDATE_CACHE(re, &s->gb);
243  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
244  level = (3 * qscale) >> 1;
245  level = (level - 1) | 1;
246  if (GET_CACHE(re, &s->gb) & 0x40000000)
247  level = -level;
248  block[0] = level;
249  i++;
250  SKIP_BITS(re, &s->gb, 2);
251  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
252  goto end;
253  }
254 
255  /* now quantify & encode AC coefficients */
256  for (;;) {
257  GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0],
258  TEX_VLC_BITS, 2, 0);
259 
260  if (level != 0) {
261  i += run;
262  if (i > MAX_INDEX)
263  break;
264  j = scantable[i];
265  level = ((level * 2 + 1) * qscale) >> 1;
266  level = (level - 1) | 1;
267  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
268  SHOW_SBITS(re, &s->gb, 1);
269  SKIP_BITS(re, &s->gb, 1);
270  } else {
271  /* escape */
272  run = SHOW_UBITS(re, &s->gb, 6) + 1;
273  LAST_SKIP_BITS(re, &s->gb, 6);
274  UPDATE_CACHE(re, &s->gb);
275  level = SHOW_SBITS(re, &s->gb, 8);
276  SKIP_BITS(re, &s->gb, 8);
277  if (level == -128) {
278  level = SHOW_UBITS(re, &s->gb, 8) - 256;
279  SKIP_BITS(re, &s->gb, 8);
280  } else if (level == 0) {
281  level = SHOW_UBITS(re, &s->gb, 8);
282  SKIP_BITS(re, &s->gb, 8);
283  }
284  i += run;
285  if (i > MAX_INDEX)
286  break;
287  j = scantable[i];
288  if (level < 0) {
289  level = -level;
290  level = ((level * 2 + 1) * qscale) >> 1;
291  level = (level - 1) | 1;
292  level = -level;
293  } else {
294  level = ((level * 2 + 1) * qscale) >> 1;
295  level = (level - 1) | 1;
296  }
297  }
298 
299  block[j] = level;
300  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
301  break;
302  UPDATE_CACHE(re, &s->gb);
303  }
304 end:
305  LAST_SKIP_BITS(re, &s->gb, 2);
306  CLOSE_READER(re, &s->gb);
307  }
308 
310 
311  s->block_last_index[n] = i;
312  return 0;
313 }
314 
316  int16_t *block, int n)
317 {
318  int level, i, j, run;
319  RLTable *rl = &ff_rl_mpeg1;
320  uint8_t *const scantable = s->intra_scantable.permutated;
321  const uint16_t *quant_matrix;
322  const int qscale = s->qscale;
323  int mismatch;
324 
325  mismatch = 1;
326 
327  {
328  OPEN_READER(re, &s->gb);
329  i = -1;
330  if (n < 4)
331  quant_matrix = s->inter_matrix;
332  else
333  quant_matrix = s->chroma_inter_matrix;
334 
335  // Special case for first coefficient, no need to add second VLC table.
336  UPDATE_CACHE(re, &s->gb);
337  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
338  level = (3 * qscale * quant_matrix[0]) >> 5;
339  if (GET_CACHE(re, &s->gb) & 0x40000000)
340  level = -level;
341  block[0] = level;
342  mismatch ^= level;
343  i++;
344  SKIP_BITS(re, &s->gb, 2);
345  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
346  goto end;
347  }
348 
349  /* now quantify & encode AC coefficients */
350  for (;;) {
351  GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0],
352  TEX_VLC_BITS, 2, 0);
353 
354  if (level != 0) {
355  i += run;
356  if (i > MAX_INDEX)
357  break;
358  j = scantable[i];
359  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
360  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
361  SHOW_SBITS(re, &s->gb, 1);
362  SKIP_BITS(re, &s->gb, 1);
363  } else {
364  /* escape */
365  run = SHOW_UBITS(re, &s->gb, 6) + 1;
366  LAST_SKIP_BITS(re, &s->gb, 6);
367  UPDATE_CACHE(re, &s->gb);
368  level = SHOW_SBITS(re, &s->gb, 12);
369  SKIP_BITS(re, &s->gb, 12);
370 
371  i += run;
372  if (i > MAX_INDEX)
373  break;
374  j = scantable[i];
375  if (level < 0) {
376  level = ((-level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
377  level = -level;
378  } else {
379  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
380  }
381  }
382 
383  mismatch ^= level;
384  block[j] = level;
385  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
386  break;
387  UPDATE_CACHE(re, &s->gb);
388  }
389 end:
390  LAST_SKIP_BITS(re, &s->gb, 2);
391  CLOSE_READER(re, &s->gb);
392  }
393  block[63] ^= (mismatch & 1);
394 
396 
397  s->block_last_index[n] = i;
398  return 0;
399 }
400 
401 /**
402  * Changing this would eat up any speed benefits it has.
403  * Do not use "fast" flag if you need the code to be robust.
404  */
406  int16_t *block, int n)
407 {
408  int level, i, j, run;
409  RLTable *rl = &ff_rl_mpeg1;
410  uint8_t *const scantable = s->intra_scantable.permutated;
411  const int qscale = s->qscale;
412  OPEN_READER(re, &s->gb);
413  i = -1;
414 
415  // special case for first coefficient, no need to add second VLC table
416  UPDATE_CACHE(re, &s->gb);
417  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
418  level = (3 * qscale) >> 1;
419  if (GET_CACHE(re, &s->gb) & 0x40000000)
420  level = -level;
421  block[0] = level;
422  i++;
423  SKIP_BITS(re, &s->gb, 2);
424  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
425  goto end;
426  }
427 
428  /* now quantify & encode AC coefficients */
429  for (;;) {
430  GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0);
431 
432  if (level != 0) {
433  i += run;
434  if (i > MAX_INDEX)
435  break;
436  j = scantable[i];
437  level = ((level * 2 + 1) * qscale) >> 1;
438  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
439  SHOW_SBITS(re, &s->gb, 1);
440  SKIP_BITS(re, &s->gb, 1);
441  } else {
442  /* escape */
443  run = SHOW_UBITS(re, &s->gb, 6) + 1;
444  LAST_SKIP_BITS(re, &s->gb, 6);
445  UPDATE_CACHE(re, &s->gb);
446  level = SHOW_SBITS(re, &s->gb, 12);
447  SKIP_BITS(re, &s->gb, 12);
448 
449  i += run;
450  if (i > MAX_INDEX)
451  break;
452  j = scantable[i];
453  if (level < 0) {
454  level = ((-level * 2 + 1) * qscale) >> 1;
455  level = -level;
456  } else {
457  level = ((level * 2 + 1) * qscale) >> 1;
458  }
459  }
460 
461  block[j] = level;
462  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF || i > 63)
463  break;
464 
465  UPDATE_CACHE(re, &s->gb);
466  }
467 end:
468  LAST_SKIP_BITS(re, &s->gb, 2);
469  CLOSE_READER(re, &s->gb);
470 
472 
473  s->block_last_index[n] = i;
474  return 0;
475 }
476 
478  int16_t *block, int n)
479 {
480  int level, dc, diff, i, j, run;
481  int component;
482  RLTable *rl;
483  uint8_t *const scantable = s->intra_scantable.permutated;
484  const uint16_t *quant_matrix;
485  const int qscale = s->qscale;
486  int mismatch;
487 
488  /* DC coefficient */
489  if (n < 4) {
490  quant_matrix = s->intra_matrix;
491  component = 0;
492  } else {
493  quant_matrix = s->chroma_intra_matrix;
494  component = (n & 1) + 1;
495  }
496  diff = decode_dc(&s->gb, component);
497  dc = s->last_dc[component];
498  dc += diff;
499  s->last_dc[component] = dc;
500  block[0] = dc * (1 << (3 - s->intra_dc_precision));
501  ff_tlog(s->avctx, "dc=%d\n", block[0]);
502  mismatch = block[0] ^ 1;
503  i = 0;
504  if (s->intra_vlc_format)
505  rl = &ff_rl_mpeg2;
506  else
507  rl = &ff_rl_mpeg1;
508 
509  {
510  OPEN_READER(re, &s->gb);
511  /* now quantify & encode AC coefficients */
512  for (;;) {
513  UPDATE_CACHE(re, &s->gb);
514  GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0],
515  TEX_VLC_BITS, 2, 0);
516 
517  if (level == 127) {
518  break;
519  } else if (level != 0) {
520  i += run;
521  if (i > MAX_INDEX)
522  break;
523  j = scantable[i];
524  level = (level * qscale * quant_matrix[j]) >> 4;
525  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
526  SHOW_SBITS(re, &s->gb, 1);
527  LAST_SKIP_BITS(re, &s->gb, 1);
528  } else {
529  /* escape */
530  run = SHOW_UBITS(re, &s->gb, 6) + 1;
531  SKIP_BITS(re, &s->gb, 6);
532  level = SHOW_SBITS(re, &s->gb, 12);
533  LAST_SKIP_BITS(re, &s->gb, 12);
534  i += run;
535  if (i > MAX_INDEX)
536  break;
537  j = scantable[i];
538  if (level < 0) {
539  level = (-level * qscale * quant_matrix[j]) >> 4;
540  level = -level;
541  } else {
542  level = (level * qscale * quant_matrix[j]) >> 4;
543  }
544  }
545 
546  mismatch ^= level;
547  block[j] = level;
548  }
549  CLOSE_READER(re, &s->gb);
550  }
551  block[63] ^= mismatch & 1;
552 
554 
555  s->block_last_index[n] = i;
556  return 0;
557 }
558 
559 /**
560  * Changing this would eat up any speed benefits it has.
561  * Do not use "fast" flag if you need the code to be robust.
562  */
564  int16_t *block, int n)
565 {
566  int level, dc, diff, i, j, run;
567  int component;
568  RLTable *rl;
569  uint8_t *const scantable = s->intra_scantable.permutated;
570  const uint16_t *quant_matrix;
571  const int qscale = s->qscale;
572 
573  /* DC coefficient */
574  if (n < 4) {
575  quant_matrix = s->intra_matrix;
576  component = 0;
577  } else {
578  quant_matrix = s->chroma_intra_matrix;
579  component = (n & 1) + 1;
580  }
581  diff = decode_dc(&s->gb, component);
582  dc = s->last_dc[component];
583  dc += diff;
584  s->last_dc[component] = dc;
585  block[0] = dc * (1 << (3 - s->intra_dc_precision));
586  i = 0;
587  if (s->intra_vlc_format)
588  rl = &ff_rl_mpeg2;
589  else
590  rl = &ff_rl_mpeg1;
591 
592  {
593  OPEN_READER(re, &s->gb);
594  /* now quantify & encode AC coefficients */
595  for (;;) {
596  UPDATE_CACHE(re, &s->gb);
597  GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0],
598  TEX_VLC_BITS, 2, 0);
599 
600  if (level >= 64 || i > 63) {
601  break;
602  } else if (level != 0) {
603  i += run;
604  j = scantable[i];
605  level = (level * qscale * quant_matrix[j]) >> 4;
606  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
607  SHOW_SBITS(re, &s->gb, 1);
608  LAST_SKIP_BITS(re, &s->gb, 1);
609  } else {
610  /* escape */
611  run = SHOW_UBITS(re, &s->gb, 6) + 1;
612  SKIP_BITS(re, &s->gb, 6);
613  level = SHOW_SBITS(re, &s->gb, 12);
614  LAST_SKIP_BITS(re, &s->gb, 12);
615  i += run;
616  j = scantable[i];
617  if (level < 0) {
618  level = (-level * qscale * quant_matrix[j]) >> 4;
619  level = -level;
620  } else {
621  level = (level * qscale * quant_matrix[j]) >> 4;
622  }
623  }
624 
625  block[j] = level;
626  }
627  CLOSE_READER(re, &s->gb);
628  }
629 
631 
632  s->block_last_index[n] = i;
633  return 0;
634 }
635 
636 /******************************************/
637 /* decoding */
638 
639 static inline int get_dmv(MpegEncContext *s)
640 {
641  if (get_bits1(&s->gb))
642  return 1 - (get_bits1(&s->gb) << 1);
643  else
644  return 0;
645 }
646 
647 /* motion type (for MPEG-2) */
648 #define MT_FIELD 1
649 #define MT_FRAME 2
650 #define MT_16X8 2
651 #define MT_DMV 3
652 
653 static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
654 {
655  int i, j, k, cbp, val, mb_type, motion_type;
656  const int mb_block_count = 4 + (1 << s->chroma_format);
657  int ret;
658 
659  ff_tlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
660 
661  av_assert2(s->mb_skipped == 0);
662 
663  if (s->mb_skip_run-- != 0) {
664  if (s->pict_type == AV_PICTURE_TYPE_P) {
665  s->mb_skipped = 1;
666  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
668  } else {
669  int mb_type;
670 
671  if (s->mb_x)
672  mb_type = s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
673  else
674  // FIXME not sure if this is allowed in MPEG at all
675  mb_type = s->current_picture.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1];
676  if (IS_INTRA(mb_type)) {
677  av_log(s->avctx, AV_LOG_ERROR, "skip with previntra\n");
678  return AVERROR_INVALIDDATA;
679  }
680  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
681  mb_type | MB_TYPE_SKIP;
682 
683  if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0)
684  s->mb_skipped = 1;
685  }
686 
687  return 0;
688  }
689 
690  switch (s->pict_type) {
691  default:
692  case AV_PICTURE_TYPE_I:
693  if (get_bits1(&s->gb) == 0) {
694  if (get_bits1(&s->gb) == 0) {
695  av_log(s->avctx, AV_LOG_ERROR,
696  "Invalid mb type in I-frame at %d %d\n",
697  s->mb_x, s->mb_y);
698  return AVERROR_INVALIDDATA;
699  }
700  mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA;
701  } else {
702  mb_type = MB_TYPE_INTRA;
703  }
704  break;
705  case AV_PICTURE_TYPE_P:
706  mb_type = get_vlc2(&s->gb, ff_mb_ptype_vlc.table, MB_PTYPE_VLC_BITS, 1);
707  if (mb_type < 0) {
708  av_log(s->avctx, AV_LOG_ERROR,
709  "Invalid mb type in P-frame at %d %d\n", s->mb_x, s->mb_y);
710  return AVERROR_INVALIDDATA;
711  }
712  mb_type = ptype2mb_type[mb_type];
713  break;
714  case AV_PICTURE_TYPE_B:
715  mb_type = get_vlc2(&s->gb, ff_mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1);
716  if (mb_type < 0) {
717  av_log(s->avctx, AV_LOG_ERROR,
718  "Invalid mb type in B-frame at %d %d\n", s->mb_x, s->mb_y);
719  return AVERROR_INVALIDDATA;
720  }
721  mb_type = btype2mb_type[mb_type];
722  break;
723  }
724  ff_tlog(s->avctx, "mb_type=%x\n", mb_type);
725 // motion_type = 0; /* avoid warning */
726  if (IS_INTRA(mb_type)) {
727  s->bdsp.clear_blocks(s->block[0]);
728 
729  if (!s->chroma_y_shift)
730  s->bdsp.clear_blocks(s->block[6]);
731 
732  /* compute DCT type */
733  // FIXME: add an interlaced_dct coded var?
734  if (s->picture_structure == PICT_FRAME &&
735  !s->frame_pred_frame_dct)
736  s->interlaced_dct = get_bits1(&s->gb);
737 
738  if (IS_QUANT(mb_type))
739  s->qscale = mpeg_get_qscale(s);
740 
741  if (s->concealment_motion_vectors) {
742  /* just parse them */
743  if (s->picture_structure != PICT_FRAME)
744  skip_bits1(&s->gb); /* field select */
745 
746  s->mv[0][0][0] =
747  s->last_mv[0][0][0] =
748  s->last_mv[0][1][0] = mpeg_decode_motion(s, s->mpeg_f_code[0][0],
749  s->last_mv[0][0][0]);
750  s->mv[0][0][1] =
751  s->last_mv[0][0][1] =
752  s->last_mv[0][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[0][1],
753  s->last_mv[0][0][1]);
754 
755  check_marker(s->avctx, &s->gb, "after concealment_motion_vectors");
756  } else {
757  /* reset mv prediction */
758  memset(s->last_mv, 0, sizeof(s->last_mv));
759  }
760  s->mb_intra = 1;
761  // if 1, we memcpy blocks in xvmcvideo
762  if ((CONFIG_MPEG1_XVMC_HWACCEL || CONFIG_MPEG2_XVMC_HWACCEL) && s->pack_pblocks)
763  ff_xvmc_pack_pblocks(s, -1); // inter are always full blocks
764 
765  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
766  if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
767  for (i = 0; i < 6; i++)
768  mpeg2_fast_decode_block_intra(s, *s->pblocks[i], i);
769  } else {
770  for (i = 0; i < mb_block_count; i++)
771  if ((ret = mpeg2_decode_block_intra(s, *s->pblocks[i], i)) < 0)
772  return ret;
773  }
774  } else {
775  for (i = 0; i < 6; i++) {
776  ret = ff_mpeg1_decode_block_intra(&s->gb,
777  s->intra_matrix,
778  s->intra_scantable.permutated,
779  s->last_dc, *s->pblocks[i],
780  i, s->qscale);
781  if (ret < 0) {
782  av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n",
783  s->mb_x, s->mb_y);
784  return ret;
785  }
786 
787  s->block_last_index[i] = ret;
788  }
789  }
790  } else {
791  if (mb_type & MB_TYPE_ZERO_MV) {
792  av_assert2(mb_type & MB_TYPE_CBP);
793 
794  s->mv_dir = MV_DIR_FORWARD;
795  if (s->picture_structure == PICT_FRAME) {
796  if (s->picture_structure == PICT_FRAME
797  && !s->frame_pred_frame_dct)
798  s->interlaced_dct = get_bits1(&s->gb);
799  s->mv_type = MV_TYPE_16X16;
800  } else {
801  s->mv_type = MV_TYPE_FIELD;
802  mb_type |= MB_TYPE_INTERLACED;
803  s->field_select[0][0] = s->picture_structure - 1;
804  }
805 
806  if (IS_QUANT(mb_type))
807  s->qscale = mpeg_get_qscale(s);
808 
809  s->last_mv[0][0][0] = 0;
810  s->last_mv[0][0][1] = 0;
811  s->last_mv[0][1][0] = 0;
812  s->last_mv[0][1][1] = 0;
813  s->mv[0][0][0] = 0;
814  s->mv[0][0][1] = 0;
815  } else {
816  av_assert2(mb_type & MB_TYPE_L0L1);
817  // FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
818  /* get additional motion vector type */
819  if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct) {
820  motion_type = MT_FRAME;
821  } else {
822  motion_type = get_bits(&s->gb, 2);
823  if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type))
824  s->interlaced_dct = get_bits1(&s->gb);
825  }
826 
827  if (IS_QUANT(mb_type))
828  s->qscale = mpeg_get_qscale(s);
829 
830  /* motion vectors */
831  s->mv_dir = (mb_type >> 13) & 3;
832  ff_tlog(s->avctx, "motion_type=%d\n", motion_type);
833  switch (motion_type) {
834  case MT_FRAME: /* or MT_16X8 */
835  if (s->picture_structure == PICT_FRAME) {
836  mb_type |= MB_TYPE_16x16;
837  s->mv_type = MV_TYPE_16X16;
838  for (i = 0; i < 2; i++) {
839  if (USES_LIST(mb_type, i)) {
840  /* MT_FRAME */
841  s->mv[i][0][0] =
842  s->last_mv[i][0][0] =
843  s->last_mv[i][1][0] =
844  mpeg_decode_motion(s, s->mpeg_f_code[i][0],
845  s->last_mv[i][0][0]);
846  s->mv[i][0][1] =
847  s->last_mv[i][0][1] =
848  s->last_mv[i][1][1] =
849  mpeg_decode_motion(s, s->mpeg_f_code[i][1],
850  s->last_mv[i][0][1]);
851  /* full_pel: only for MPEG-1 */
852  if (s->full_pel[i]) {
853  s->mv[i][0][0] *= 2;
854  s->mv[i][0][1] *= 2;
855  }
856  }
857  }
858  } else {
859  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
860  s->mv_type = MV_TYPE_16X8;
861  for (i = 0; i < 2; i++) {
862  if (USES_LIST(mb_type, i)) {
863  /* MT_16X8 */
864  for (j = 0; j < 2; j++) {
865  s->field_select[i][j] = get_bits1(&s->gb);
866  for (k = 0; k < 2; k++) {
867  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
868  s->last_mv[i][j][k]);
869  s->last_mv[i][j][k] = val;
870  s->mv[i][j][k] = val;
871  }
872  }
873  }
874  }
875  }
876  break;
877  case MT_FIELD:
878  s->mv_type = MV_TYPE_FIELD;
879  if (s->picture_structure == PICT_FRAME) {
880  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
881  for (i = 0; i < 2; i++) {
882  if (USES_LIST(mb_type, i)) {
883  for (j = 0; j < 2; j++) {
884  s->field_select[i][j] = get_bits1(&s->gb);
885  val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
886  s->last_mv[i][j][0]);
887  s->last_mv[i][j][0] = val;
888  s->mv[i][j][0] = val;
889  ff_tlog(s->avctx, "fmx=%d\n", val);
890  val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
891  s->last_mv[i][j][1] >> 1);
892  s->last_mv[i][j][1] = 2 * val;
893  s->mv[i][j][1] = val;
894  ff_tlog(s->avctx, "fmy=%d\n", val);
895  }
896  }
897  }
898  } else {
899  av_assert0(!s->progressive_sequence);
900  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
901  for (i = 0; i < 2; i++) {
902  if (USES_LIST(mb_type, i)) {
903  s->field_select[i][0] = get_bits1(&s->gb);
904  for (k = 0; k < 2; k++) {
905  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
906  s->last_mv[i][0][k]);
907  s->last_mv[i][0][k] = val;
908  s->last_mv[i][1][k] = val;
909  s->mv[i][0][k] = val;
910  }
911  }
912  }
913  }
914  break;
915  case MT_DMV:
916  if (s->progressive_sequence){
917  av_log(s->avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n");
918  return AVERROR_INVALIDDATA;
919  }
920  s->mv_type = MV_TYPE_DMV;
921  for (i = 0; i < 2; i++) {
922  if (USES_LIST(mb_type, i)) {
923  int dmx, dmy, mx, my, m;
924  const int my_shift = s->picture_structure == PICT_FRAME;
925 
926  mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
927  s->last_mv[i][0][0]);
928  s->last_mv[i][0][0] = mx;
929  s->last_mv[i][1][0] = mx;
930  dmx = get_dmv(s);
931  my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
932  s->last_mv[i][0][1] >> my_shift);
933  dmy = get_dmv(s);
934 
935 
936  s->last_mv[i][0][1] = my * (1 << my_shift);
937  s->last_mv[i][1][1] = my * (1 << my_shift);
938 
939  s->mv[i][0][0] = mx;
940  s->mv[i][0][1] = my;
941  s->mv[i][1][0] = mx; // not used
942  s->mv[i][1][1] = my; // not used
943 
944  if (s->picture_structure == PICT_FRAME) {
945  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
946 
947  // m = 1 + 2 * s->top_field_first;
948  m = s->top_field_first ? 1 : 3;
949 
950  /* top -> top pred */
951  s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
952  s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
953  m = 4 - m;
954  s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
955  s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
956  } else {
957  mb_type |= MB_TYPE_16x16;
958 
959  s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
960  s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy;
961  if (s->picture_structure == PICT_TOP_FIELD)
962  s->mv[i][2][1]--;
963  else
964  s->mv[i][2][1]++;
965  }
966  }
967  }
968  break;
969  default:
970  av_log(s->avctx, AV_LOG_ERROR,
971  "00 motion_type at %d %d\n", s->mb_x, s->mb_y);
972  return AVERROR_INVALIDDATA;
973  }
974  }
975 
976  s->mb_intra = 0;
977  if (HAS_CBP(mb_type)) {
978  s->bdsp.clear_blocks(s->block[0]);
979 
980  cbp = get_vlc2(&s->gb, ff_mb_pat_vlc.table, MB_PAT_VLC_BITS, 1);
981  if (mb_block_count > 6) {
982  cbp *= 1 << mb_block_count - 6;
983  cbp |= get_bits(&s->gb, mb_block_count - 6);
984  s->bdsp.clear_blocks(s->block[6]);
985  }
986  if (cbp <= 0) {
987  av_log(s->avctx, AV_LOG_ERROR,
988  "invalid cbp %d at %d %d\n", cbp, s->mb_x, s->mb_y);
989  return AVERROR_INVALIDDATA;
990  }
991 
992  // if 1, we memcpy blocks in xvmcvideo
993  if ((CONFIG_MPEG1_XVMC_HWACCEL || CONFIG_MPEG2_XVMC_HWACCEL) && s->pack_pblocks)
994  ff_xvmc_pack_pblocks(s, cbp);
995 
996  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
997  if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
998  for (i = 0; i < 6; i++) {
999  if (cbp & 32)
1000  mpeg2_fast_decode_block_non_intra(s, *s->pblocks[i], i);
1001  else
1002  s->block_last_index[i] = -1;
1003  cbp += cbp;
1004  }
1005  } else {
1006  cbp <<= 12 - mb_block_count;
1007 
1008  for (i = 0; i < mb_block_count; i++) {
1009  if (cbp & (1 << 11)) {
1010  if ((ret = mpeg2_decode_block_non_intra(s, *s->pblocks[i], i)) < 0)
1011  return ret;
1012  } else {
1013  s->block_last_index[i] = -1;
1014  }
1015  cbp += cbp;
1016  }
1017  }
1018  } else {
1019  if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
1020  for (i = 0; i < 6; i++) {
1021  if (cbp & 32)
1022  mpeg1_fast_decode_block_inter(s, *s->pblocks[i], i);
1023  else
1024  s->block_last_index[i] = -1;
1025  cbp += cbp;
1026  }
1027  } else {
1028  for (i = 0; i < 6; i++) {
1029  if (cbp & 32) {
1030  if ((ret = mpeg1_decode_block_inter(s, *s->pblocks[i], i)) < 0)
1031  return ret;
1032  } else {
1033  s->block_last_index[i] = -1;
1034  }
1035  cbp += cbp;
1036  }
1037  }
1038  }
1039  } else {
1040  for (i = 0; i < 12; i++)
1041  s->block_last_index[i] = -1;
1042  }
1043  }
1044 
1045  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
1046 
1047  return 0;
1048 }
1049 
1051 {
1052  Mpeg1Context *s = avctx->priv_data;
1053  MpegEncContext *s2 = &s->mpeg_enc_ctx;
1054 
1055  if ( avctx->codec_tag != AV_RL32("VCR2")
1056  && avctx->codec_tag != AV_RL32("BW10"))
1057  avctx->coded_width = avctx->coded_height = 0; // do not trust dimensions from input
1058  ff_mpv_decode_init(s2, avctx);
1059 
1060  /* we need some permutation to store matrices,
1061  * until the decoder sets the real permutation. */
1063  ff_mpeg12_common_init(&s->mpeg_enc_ctx);
1065 
1066  s2->chroma_format = 1;
1067  s->mpeg_enc_ctx_allocated = 0;
1068  s->mpeg_enc_ctx.picture_number = 0;
1069  s->repeat_field = 0;
1070  s->mpeg_enc_ctx.codec_id = avctx->codec->id;
1071  avctx->color_range = AVCOL_RANGE_MPEG;
1072  return 0;
1073 }
1074 
1075 #if HAVE_THREADS
1076 static int mpeg_decode_update_thread_context(AVCodecContext *avctx,
1077  const AVCodecContext *avctx_from)
1078 {
1079  Mpeg1Context *ctx = avctx->priv_data, *ctx_from = avctx_from->priv_data;
1080  MpegEncContext *s = &ctx->mpeg_enc_ctx, *s1 = &ctx_from->mpeg_enc_ctx;
1081  int err;
1082 
1083  if (avctx == avctx_from ||
1084  !ctx_from->mpeg_enc_ctx_allocated ||
1085  !s1->context_initialized)
1086  return 0;
1087 
1088  err = ff_mpeg_update_thread_context(avctx, avctx_from);
1089  if (err)
1090  return err;
1091 
1092  if (!ctx->mpeg_enc_ctx_allocated)
1093  memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext));
1094 
1095  if (!(s->pict_type == AV_PICTURE_TYPE_B || s->low_delay))
1096  s->picture_number++;
1097 
1098  return 0;
1099 }
1100 #endif
1101 
1102 static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm,
1103  const uint8_t *new_perm)
1104 {
1105  uint16_t temp_matrix[64];
1106  int i;
1107 
1108  memcpy(temp_matrix, matrix, 64 * sizeof(uint16_t));
1109 
1110  for (i = 0; i < 64; i++)
1111  matrix[new_perm[i]] = temp_matrix[old_perm[i]];
1112 }
1113 
1114 static const enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[] = {
1115 #if CONFIG_MPEG1_NVDEC_HWACCEL
1117 #endif
1118 #if CONFIG_MPEG1_XVMC_HWACCEL
1120 #endif
1121 #if CONFIG_MPEG1_VDPAU_HWACCEL
1123 #endif
1126 };
1127 
1128 static const enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[] = {
1129 #if CONFIG_MPEG2_NVDEC_HWACCEL
1131 #endif
1132 #if CONFIG_MPEG2_XVMC_HWACCEL
1134 #endif
1135 #if CONFIG_MPEG2_VDPAU_HWACCEL
1137 #endif
1138 #if CONFIG_MPEG2_DXVA2_HWACCEL
1140 #endif
1141 #if CONFIG_MPEG2_D3D11VA_HWACCEL
1144 #endif
1145 #if CONFIG_MPEG2_VAAPI_HWACCEL
1147 #endif
1148 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
1150 #endif
1153 };
1154 
1155 static const enum AVPixelFormat mpeg12_pixfmt_list_422[] = {
1158 };
1159 
1160 static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
1163 };
1164 
1166 {
1167  Mpeg1Context *s1 = avctx->priv_data;
1168  MpegEncContext *s = &s1->mpeg_enc_ctx;
1169  const enum AVPixelFormat *pix_fmts;
1170 
1171  if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY))
1172  return AV_PIX_FMT_GRAY8;
1173 
1174  if (s->chroma_format < 2)
1178  else if (s->chroma_format == 2)
1180  else
1182 
1183  return ff_thread_get_format(avctx, pix_fmts);
1184 }
1185 
1187 {
1188  // until then pix_fmt may be changed right after codec init
1189  if (avctx->hwaccel)
1190  if (avctx->idct_algo == FF_IDCT_AUTO)
1191  avctx->idct_algo = FF_IDCT_NONE;
1192 
1193  if (avctx->hwaccel && avctx->pix_fmt == AV_PIX_FMT_XVMC) {
1194  Mpeg1Context *s1 = avctx->priv_data;
1195  MpegEncContext *s = &s1->mpeg_enc_ctx;
1196 
1197  s->pack_pblocks = 1;
1198  }
1199 }
1200 
1201 /* Call this function when we know all parameters.
1202  * It may be called in different places for MPEG-1 and MPEG-2. */
1204 {
1205  Mpeg1Context *s1 = avctx->priv_data;
1206  MpegEncContext *s = &s1->mpeg_enc_ctx;
1207  uint8_t old_permutation[64];
1208  int ret;
1209 
1210  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
1211  // MPEG-1 aspect
1212  AVRational aspect_inv = av_d2q(ff_mpeg1_aspect[s->aspect_ratio_info], 255);
1213  avctx->sample_aspect_ratio = (AVRational) { aspect_inv.den, aspect_inv.num };
1214  } else { // MPEG-2
1215  // MPEG-2 aspect
1216  if (s->aspect_ratio_info > 1) {
1217  AVRational dar =
1218  av_mul_q(av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info],
1219  (AVRational) { s1->pan_scan.width,
1220  s1->pan_scan.height }),
1221  (AVRational) { s->width, s->height });
1222 
1223  /* We ignore the spec here and guess a bit as reality does not
1224  * match the spec, see for example res_change_ffmpeg_aspect.ts
1225  * and sequence-display-aspect.mpg.
1226  * issue1613, 621, 562 */
1227  if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) ||
1228  (av_cmp_q(dar, (AVRational) { 4, 3 }) &&
1229  av_cmp_q(dar, (AVRational) { 16, 9 }))) {
1230  s->avctx->sample_aspect_ratio =
1231  av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info],
1232  (AVRational) { s->width, s->height });
1233  } else {
1234  s->avctx->sample_aspect_ratio =
1235  av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info],
1236  (AVRational) { s1->pan_scan.width, s1->pan_scan.height });
1237 // issue1613 4/3 16/9 -> 16/9
1238 // res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3
1239 // widescreen-issue562.mpg 4/3 16/9 -> 16/9
1240 // s->avctx->sample_aspect_ratio = av_mul_q(s->avctx->sample_aspect_ratio, (AVRational) {s->width, s->height});
1241  ff_dlog(avctx, "aspect A %d/%d\n",
1242  ff_mpeg2_aspect[s->aspect_ratio_info].num,
1243  ff_mpeg2_aspect[s->aspect_ratio_info].den);
1244  ff_dlog(avctx, "aspect B %d/%d\n", s->avctx->sample_aspect_ratio.num,
1245  s->avctx->sample_aspect_ratio.den);
1246  }
1247  } else {
1248  s->avctx->sample_aspect_ratio =
1249  ff_mpeg2_aspect[s->aspect_ratio_info];
1250  }
1251  } // MPEG-2
1252 
1253  if (av_image_check_sar(s->width, s->height,
1254  avctx->sample_aspect_ratio) < 0) {
1255  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1256  avctx->sample_aspect_ratio.num,
1257  avctx->sample_aspect_ratio.den);
1258  avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
1259  }
1260 
1261  if ((s1->mpeg_enc_ctx_allocated == 0) ||
1262  avctx->coded_width != s->width ||
1263  avctx->coded_height != s->height ||
1264  s1->save_width != s->width ||
1265  s1->save_height != s->height ||
1266  av_cmp_q(s1->save_aspect, s->avctx->sample_aspect_ratio) ||
1267  (s1->save_progressive_seq != s->progressive_sequence && FFALIGN(s->height, 16) != FFALIGN(s->height, 32)) ||
1268  0) {
1269  if (s1->mpeg_enc_ctx_allocated) {
1270  ParseContext pc = s->parse_context;
1271  s->parse_context.buffer = 0;
1273  s->parse_context = pc;
1274  s1->mpeg_enc_ctx_allocated = 0;
1275  }
1276 
1277  ret = ff_set_dimensions(avctx, s->width, s->height);
1278  if (ret < 0)
1279  return ret;
1280 
1281  if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->bit_rate) {
1282  avctx->rc_max_rate = s->bit_rate;
1283  } else if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && s->bit_rate &&
1284  (s->bit_rate != 0x3FFFF*400 || s->vbv_delay != 0xFFFF)) {
1285  avctx->bit_rate = s->bit_rate;
1286  }
1287  s1->save_aspect = s->avctx->sample_aspect_ratio;
1288  s1->save_width = s->width;
1289  s1->save_height = s->height;
1290  s1->save_progressive_seq = s->progressive_sequence;
1291 
1292  /* low_delay may be forced, in this case we will have B-frames
1293  * that behave like P-frames. */
1294  avctx->has_b_frames = !s->low_delay;
1295 
1296  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
1297  // MPEG-1 fps
1298  avctx->framerate = ff_mpeg12_frame_rate_tab[s->frame_rate_index];
1299  avctx->ticks_per_frame = 1;
1300 
1302  } else { // MPEG-2
1303  // MPEG-2 fps
1304  av_reduce(&s->avctx->framerate.num,
1305  &s->avctx->framerate.den,
1306  ff_mpeg12_frame_rate_tab[s->frame_rate_index].num * s1->frame_rate_ext.num,
1307  ff_mpeg12_frame_rate_tab[s->frame_rate_index].den * s1->frame_rate_ext.den,
1308  1 << 30);
1309  avctx->ticks_per_frame = 2;
1310 
1311  switch (s->chroma_format) {
1312  case 1: avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; break;
1313  case 2:
1314  case 3: avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; break;
1315  default: av_assert0(0);
1316  }
1317  } // MPEG-2
1318 
1319  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1320  setup_hwaccel_for_pixfmt(avctx);
1321 
1322  /* Quantization matrices may need reordering
1323  * if DCT permutation is changed. */
1324  memcpy(old_permutation, s->idsp.idct_permutation, 64 * sizeof(uint8_t));
1325 
1327  if ((ret = ff_mpv_common_init(s)) < 0)
1328  return ret;
1329 
1330  quant_matrix_rebuild(s->intra_matrix, old_permutation, s->idsp.idct_permutation);
1331  quant_matrix_rebuild(s->inter_matrix, old_permutation, s->idsp.idct_permutation);
1332  quant_matrix_rebuild(s->chroma_intra_matrix, old_permutation, s->idsp.idct_permutation);
1333  quant_matrix_rebuild(s->chroma_inter_matrix, old_permutation, s->idsp.idct_permutation);
1334 
1335  s1->mpeg_enc_ctx_allocated = 1;
1336  }
1337  return 0;
1338 }
1339 
1340 static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
1341  int buf_size)
1342 {
1343  Mpeg1Context *s1 = avctx->priv_data;
1344  MpegEncContext *s = &s1->mpeg_enc_ctx;
1345  int ref, f_code, vbv_delay;
1346 
1347  init_get_bits(&s->gb, buf, buf_size * 8);
1348 
1349  ref = get_bits(&s->gb, 10); /* temporal ref */
1350  s->pict_type = get_bits(&s->gb, 3);
1351  if (s->pict_type == 0 || s->pict_type > 3)
1352  return AVERROR_INVALIDDATA;
1353 
1354  vbv_delay = get_bits(&s->gb, 16);
1355  s->vbv_delay = vbv_delay;
1356  if (s->pict_type == AV_PICTURE_TYPE_P ||
1357  s->pict_type == AV_PICTURE_TYPE_B) {
1358  s->full_pel[0] = get_bits1(&s->gb);
1359  f_code = get_bits(&s->gb, 3);
1360  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1361  return AVERROR_INVALIDDATA;
1362  f_code += !f_code;
1363  s->mpeg_f_code[0][0] = f_code;
1364  s->mpeg_f_code[0][1] = f_code;
1365  }
1366  if (s->pict_type == AV_PICTURE_TYPE_B) {
1367  s->full_pel[1] = get_bits1(&s->gb);
1368  f_code = get_bits(&s->gb, 3);
1369  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1370  return AVERROR_INVALIDDATA;
1371  f_code += !f_code;
1372  s->mpeg_f_code[1][0] = f_code;
1373  s->mpeg_f_code[1][1] = f_code;
1374  }
1375  s->current_picture.f->pict_type = s->pict_type;
1376  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1377 
1378  if (avctx->debug & FF_DEBUG_PICT_INFO)
1379  av_log(avctx, AV_LOG_DEBUG,
1380  "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
1381 
1382  s->y_dc_scale = 8;
1383  s->c_dc_scale = 8;
1384  return 0;
1385 }
1386 
1388 {
1389  MpegEncContext *s = &s1->mpeg_enc_ctx;
1390  int horiz_size_ext, vert_size_ext;
1391  int bit_rate_ext;
1392  AVCPBProperties *cpb_props;
1393 
1394  skip_bits(&s->gb, 1); /* profile and level esc*/
1395  s->avctx->profile = get_bits(&s->gb, 3);
1396  s->avctx->level = get_bits(&s->gb, 4);
1397  s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */
1398  s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */
1399 
1400  if (!s->chroma_format) {
1401  s->chroma_format = 1;
1402  av_log(s->avctx, AV_LOG_WARNING, "Chroma format invalid\n");
1403  }
1404 
1405  horiz_size_ext = get_bits(&s->gb, 2);
1406  vert_size_ext = get_bits(&s->gb, 2);
1407  s->width |= (horiz_size_ext << 12);
1408  s->height |= (vert_size_ext << 12);
1409  bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */
1410  s->bit_rate += (bit_rate_ext << 18) * 400LL;
1411  check_marker(s->avctx, &s->gb, "after bit rate extension");
1412  s1->rc_buffer_size += get_bits(&s->gb, 8) * 1024 * 16 << 10;
1413 
1414  s->low_delay = get_bits1(&s->gb);
1415  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1416  s->low_delay = 1;
1417 
1418  s1->frame_rate_ext.num = get_bits(&s->gb, 2) + 1;
1419  s1->frame_rate_ext.den = get_bits(&s->gb, 5) + 1;
1420 
1421  ff_dlog(s->avctx, "sequence extension\n");
1422  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1423 
1424  if (cpb_props = ff_add_cpb_side_data(s->avctx)) {
1425  cpb_props->buffer_size = s1->rc_buffer_size;
1426  if (s->bit_rate != 0x3FFFF*400)
1427  cpb_props->max_bitrate = s->bit_rate;
1428  }
1429 
1430  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1431  av_log(s->avctx, AV_LOG_DEBUG,
1432  "profile: %d, level: %d ps: %d cf:%d vbv buffer: %d, bitrate:%"PRId64"\n",
1433  s->avctx->profile, s->avctx->level, s->progressive_sequence, s->chroma_format,
1434  s1->rc_buffer_size, s->bit_rate);
1435 }
1436 
1438 {
1439  MpegEncContext *s = &s1->mpeg_enc_ctx;
1440  int color_description, w, h;
1441 
1442  skip_bits(&s->gb, 3); /* video format */
1443  color_description = get_bits1(&s->gb);
1444  if (color_description) {
1445  s->avctx->color_primaries = get_bits(&s->gb, 8);
1446  s->avctx->color_trc = get_bits(&s->gb, 8);
1447  s->avctx->colorspace = get_bits(&s->gb, 8);
1448  }
1449  w = get_bits(&s->gb, 14);
1450  skip_bits(&s->gb, 1); // marker
1451  h = get_bits(&s->gb, 14);
1452  // remaining 3 bits are zero padding
1453 
1454  s1->pan_scan.width = 16 * w;
1455  s1->pan_scan.height = 16 * h;
1456 
1457  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1458  av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h);
1459 }
1460 
1462 {
1463  MpegEncContext *s = &s1->mpeg_enc_ctx;
1464  int i, nofco;
1465 
1466  nofco = 1;
1467  if (s->progressive_sequence) {
1468  if (s->repeat_first_field) {
1469  nofco++;
1470  if (s->top_field_first)
1471  nofco++;
1472  }
1473  } else {
1474  if (s->picture_structure == PICT_FRAME) {
1475  nofco++;
1476  if (s->repeat_first_field)
1477  nofco++;
1478  }
1479  }
1480  for (i = 0; i < nofco; i++) {
1481  s1->pan_scan.position[i][0] = get_sbits(&s->gb, 16);
1482  skip_bits(&s->gb, 1); // marker
1483  s1->pan_scan.position[i][1] = get_sbits(&s->gb, 16);
1484  skip_bits(&s->gb, 1); // marker
1485  }
1486 
1487  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1488  av_log(s->avctx, AV_LOG_DEBUG,
1489  "pde (%"PRId16",%"PRId16") (%"PRId16",%"PRId16") (%"PRId16",%"PRId16")\n",
1490  s1->pan_scan.position[0][0], s1->pan_scan.position[0][1],
1491  s1->pan_scan.position[1][0], s1->pan_scan.position[1][1],
1492  s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]);
1493 }
1494 
1495 static int load_matrix(MpegEncContext *s, uint16_t matrix0[64],
1496  uint16_t matrix1[64], int intra)
1497 {
1498  int i;
1499 
1500  for (i = 0; i < 64; i++) {
1501  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
1502  int v = get_bits(&s->gb, 8);
1503  if (v == 0) {
1504  av_log(s->avctx, AV_LOG_ERROR, "matrix damaged\n");
1505  return AVERROR_INVALIDDATA;
1506  }
1507  if (intra && i == 0 && v != 8) {
1508  av_log(s->avctx, AV_LOG_DEBUG, "intra matrix specifies invalid DC quantizer %d, ignoring\n", v);
1509  v = 8; // needed by pink.mpg / issue1046
1510  }
1511  matrix0[j] = v;
1512  if (matrix1)
1513  matrix1[j] = v;
1514  }
1515  return 0;
1516 }
1517 
1519 {
1520  ff_dlog(s->avctx, "matrix extension\n");
1521 
1522  if (get_bits1(&s->gb))
1523  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1524  if (get_bits1(&s->gb))
1525  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1526  if (get_bits1(&s->gb))
1527  load_matrix(s, s->chroma_intra_matrix, NULL, 1);
1528  if (get_bits1(&s->gb))
1529  load_matrix(s, s->chroma_inter_matrix, NULL, 0);
1530 }
1531 
1533 {
1534  MpegEncContext *s = &s1->mpeg_enc_ctx;
1535 
1536  s->full_pel[0] = s->full_pel[1] = 0;
1537  s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
1538  s->mpeg_f_code[0][1] = get_bits(&s->gb, 4);
1539  s->mpeg_f_code[1][0] = get_bits(&s->gb, 4);
1540  s->mpeg_f_code[1][1] = get_bits(&s->gb, 4);
1541  s->mpeg_f_code[0][0] += !s->mpeg_f_code[0][0];
1542  s->mpeg_f_code[0][1] += !s->mpeg_f_code[0][1];
1543  s->mpeg_f_code[1][0] += !s->mpeg_f_code[1][0];
1544  s->mpeg_f_code[1][1] += !s->mpeg_f_code[1][1];
1545  if (!s->pict_type && s1->mpeg_enc_ctx_allocated) {
1546  av_log(s->avctx, AV_LOG_ERROR,
1547  "Missing picture start code, guessing missing values\n");
1548  if (s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1] == 15) {
1549  if (s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
1550  s->pict_type = AV_PICTURE_TYPE_I;
1551  else
1552  s->pict_type = AV_PICTURE_TYPE_P;
1553  } else
1554  s->pict_type = AV_PICTURE_TYPE_B;
1555  s->current_picture.f->pict_type = s->pict_type;
1556  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1557  }
1558 
1559  s->intra_dc_precision = get_bits(&s->gb, 2);
1560  s->picture_structure = get_bits(&s->gb, 2);
1561  s->top_field_first = get_bits1(&s->gb);
1562  s->frame_pred_frame_dct = get_bits1(&s->gb);
1563  s->concealment_motion_vectors = get_bits1(&s->gb);
1564  s->q_scale_type = get_bits1(&s->gb);
1565  s->intra_vlc_format = get_bits1(&s->gb);
1566  s->alternate_scan = get_bits1(&s->gb);
1567  s->repeat_first_field = get_bits1(&s->gb);
1568  s->chroma_420_type = get_bits1(&s->gb);
1569  s->progressive_frame = get_bits1(&s->gb);
1570 
1571  if (s->alternate_scan) {
1572  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
1573  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
1574  } else {
1575  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
1576  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
1577  }
1578 
1579  /* composite display not parsed */
1580  ff_dlog(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
1581  ff_dlog(s->avctx, "picture_structure=%d\n", s->picture_structure);
1582  ff_dlog(s->avctx, "top field first=%d\n", s->top_field_first);
1583  ff_dlog(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
1584  ff_dlog(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
1585  ff_dlog(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
1586  ff_dlog(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
1587  ff_dlog(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
1588  ff_dlog(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
1589 }
1590 
1591 static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
1592 {
1593  AVCodecContext *avctx = s->avctx;
1594  Mpeg1Context *s1 = (Mpeg1Context *) s;
1595  int ret;
1596 
1597  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
1598  if (s->mb_width * s->mb_height * 11LL / (33 * 2 * 8) > buf_size)
1599  return AVERROR_INVALIDDATA;
1600  }
1601 
1602  /* start frame decoding */
1603  if (s->first_field || s->picture_structure == PICT_FRAME) {
1604  AVFrameSideData *pan_scan;
1605 
1606  if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
1607  return ret;
1608 
1610 
1611  /* first check if we must repeat the frame */
1612  s->current_picture_ptr->f->repeat_pict = 0;
1613  if (s->repeat_first_field) {
1614  if (s->progressive_sequence) {
1615  if (s->top_field_first)
1616  s->current_picture_ptr->f->repeat_pict = 4;
1617  else
1618  s->current_picture_ptr->f->repeat_pict = 2;
1619  } else if (s->progressive_frame) {
1620  s->current_picture_ptr->f->repeat_pict = 1;
1621  }
1622  }
1623 
1624  pan_scan = av_frame_new_side_data(s->current_picture_ptr->f,
1626  sizeof(s1->pan_scan));
1627  if (!pan_scan)
1628  return AVERROR(ENOMEM);
1629  memcpy(pan_scan->data, &s1->pan_scan, sizeof(s1->pan_scan));
1630 
1631  if (s1->a53_buf_ref) {
1633  s->current_picture_ptr->f, AV_FRAME_DATA_A53_CC,
1634  s1->a53_buf_ref);
1635  if (!sd)
1636  av_buffer_unref(&s1->a53_buf_ref);
1637  s1->a53_buf_ref = NULL;
1638  }
1639 
1640  if (s1->has_stereo3d) {
1641  AVStereo3D *stereo = av_stereo3d_create_side_data(s->current_picture_ptr->f);
1642  if (!stereo)
1643  return AVERROR(ENOMEM);
1644 
1645  *stereo = s1->stereo3d;
1646  s1->has_stereo3d = 0;
1647  }
1648 
1649  if (s1->has_afd) {
1650  AVFrameSideData *sd =
1651  av_frame_new_side_data(s->current_picture_ptr->f,
1652  AV_FRAME_DATA_AFD, 1);
1653  if (!sd)
1654  return AVERROR(ENOMEM);
1655 
1656  *sd->data = s1->afd;
1657  s1->has_afd = 0;
1658  }
1659 
1661  ff_thread_finish_setup(avctx);
1662  } else { // second field
1663  int i;
1664 
1665  if (!s->current_picture_ptr) {
1666  av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
1667  return AVERROR_INVALIDDATA;
1668  }
1669 
1670  if (s->avctx->hwaccel) {
1671  if ((ret = s->avctx->hwaccel->end_frame(s->avctx)) < 0) {
1672  av_log(avctx, AV_LOG_ERROR,
1673  "hardware accelerator failed to decode first field\n");
1674  return ret;
1675  }
1676  }
1677 
1678  for (i = 0; i < 4; i++) {
1679  s->current_picture.f->data[i] = s->current_picture_ptr->f->data[i];
1680  if (s->picture_structure == PICT_BOTTOM_FIELD)
1681  s->current_picture.f->data[i] +=
1682  s->current_picture_ptr->f->linesize[i];
1683  }
1684  }
1685 
1686  if (avctx->hwaccel) {
1687  if ((ret = avctx->hwaccel->start_frame(avctx, buf, buf_size)) < 0)
1688  return ret;
1689  }
1690 
1691  return 0;
1692 }
1693 
1694 #define DECODE_SLICE_ERROR -1
1695 #define DECODE_SLICE_OK 0
1696 
1697 /**
1698  * Decode a slice.
1699  * MpegEncContext.mb_y must be set to the MB row from the startcode.
1700  * @return DECODE_SLICE_ERROR if the slice is damaged,
1701  * DECODE_SLICE_OK if this slice is OK
1702  */
1703 static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
1704  const uint8_t **buf, int buf_size)
1705 {
1706  AVCodecContext *avctx = s->avctx;
1707  const int lowres = s->avctx->lowres;
1708  const int field_pic = s->picture_structure != PICT_FRAME;
1709  int ret;
1710 
1711  s->resync_mb_x =
1712  s->resync_mb_y = -1;
1713 
1714  av_assert0(mb_y < s->mb_height);
1715 
1716  init_get_bits(&s->gb, *buf, buf_size * 8);
1717  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1718  skip_bits(&s->gb, 3);
1719 
1721  s->interlaced_dct = 0;
1722 
1723  s->qscale = mpeg_get_qscale(s);
1724 
1725  if (s->qscale == 0) {
1726  av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n");
1727  return AVERROR_INVALIDDATA;
1728  }
1729 
1730  /* extra slice info */
1731  if (skip_1stop_8data_bits(&s->gb) < 0)
1732  return AVERROR_INVALIDDATA;
1733 
1734  s->mb_x = 0;
1735 
1736  if (mb_y == 0 && s->codec_tag == AV_RL32("SLIF")) {
1737  skip_bits1(&s->gb);
1738  } else {
1739  while (get_bits_left(&s->gb) > 0) {
1740  int code = get_vlc2(&s->gb, ff_mbincr_vlc.table,
1741  MBINCR_VLC_BITS, 2);
1742  if (code < 0) {
1743  av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
1744  return AVERROR_INVALIDDATA;
1745  }
1746  if (code >= 33) {
1747  if (code == 33)
1748  s->mb_x += 33;
1749  /* otherwise, stuffing, nothing to do */
1750  } else {
1751  s->mb_x += code;
1752  break;
1753  }
1754  }
1755  }
1756 
1757  if (s->mb_x >= (unsigned) s->mb_width) {
1758  av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n");
1759  return AVERROR_INVALIDDATA;
1760  }
1761 
1762  if (avctx->hwaccel && avctx->hwaccel->decode_slice) {
1763  const uint8_t *buf_end, *buf_start = *buf - 4; /* include start_code */
1764  int start_code = -1;
1765  buf_end = avpriv_find_start_code(buf_start + 2, *buf + buf_size, &start_code);
1766  if (buf_end < *buf + buf_size)
1767  buf_end -= 4;
1768  s->mb_y = mb_y;
1769  if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_end - buf_start) < 0)
1770  return DECODE_SLICE_ERROR;
1771  *buf = buf_end;
1772  return DECODE_SLICE_OK;
1773  }
1774 
1775  s->resync_mb_x = s->mb_x;
1776  s->resync_mb_y = s->mb_y = mb_y;
1777  s->mb_skip_run = 0;
1779 
1780  if (s->mb_y == 0 && s->mb_x == 0 && (s->first_field || s->picture_structure == PICT_FRAME)) {
1781  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
1782  av_log(s->avctx, AV_LOG_DEBUG,
1783  "qp:%d fc:%2d%2d%2d%2d %s %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
1784  s->qscale,
1785  s->mpeg_f_code[0][0], s->mpeg_f_code[0][1],
1786  s->mpeg_f_code[1][0], s->mpeg_f_code[1][1],
1787  s->pict_type == AV_PICTURE_TYPE_I ? "I" :
1788  (s->pict_type == AV_PICTURE_TYPE_P ? "P" :
1789  (s->pict_type == AV_PICTURE_TYPE_B ? "B" : "S")),
1790  s->progressive_sequence ? "ps" : "",
1791  s->progressive_frame ? "pf" : "",
1792  s->alternate_scan ? "alt" : "",
1793  s->top_field_first ? "top" : "",
1794  s->intra_dc_precision, s->picture_structure,
1795  s->frame_pred_frame_dct, s->concealment_motion_vectors,
1796  s->q_scale_type, s->intra_vlc_format,
1797  s->repeat_first_field, s->chroma_420_type ? "420" : "");
1798  }
1799  }
1800 
1801  for (;;) {
1802  // If 1, we memcpy blocks in xvmcvideo.
1803  if ((CONFIG_MPEG1_XVMC_HWACCEL || CONFIG_MPEG2_XVMC_HWACCEL) && s->pack_pblocks)
1804  ff_xvmc_init_block(s); // set s->block
1805 
1806  if ((ret = mpeg_decode_mb(s, s->block)) < 0)
1807  return ret;
1808 
1809  // Note motion_val is normally NULL unless we want to extract the MVs.
1810  if (s->current_picture.motion_val[0] && !s->encoding) {
1811  const int wrap = s->b8_stride;
1812  int xy = s->mb_x * 2 + s->mb_y * 2 * wrap;
1813  int b8_xy = 4 * (s->mb_x + s->mb_y * s->mb_stride);
1814  int motion_x, motion_y, dir, i;
1815 
1816  for (i = 0; i < 2; i++) {
1817  for (dir = 0; dir < 2; dir++) {
1818  if (s->mb_intra ||
1819  (dir == 1 && s->pict_type != AV_PICTURE_TYPE_B)) {
1820  motion_x = motion_y = 0;
1821  } else if (s->mv_type == MV_TYPE_16X16 ||
1822  (s->mv_type == MV_TYPE_FIELD && field_pic)) {
1823  motion_x = s->mv[dir][0][0];
1824  motion_y = s->mv[dir][0][1];
1825  } else { /* if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8)) */
1826  motion_x = s->mv[dir][i][0];
1827  motion_y = s->mv[dir][i][1];
1828  }
1829 
1830  s->current_picture.motion_val[dir][xy][0] = motion_x;
1831  s->current_picture.motion_val[dir][xy][1] = motion_y;
1832  s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
1833  s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
1834  s->current_picture.ref_index [dir][b8_xy] =
1835  s->current_picture.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
1836  av_assert2(s->field_select[dir][i] == 0 ||
1837  s->field_select[dir][i] == 1);
1838  }
1839  xy += wrap;
1840  b8_xy += 2;
1841  }
1842  }
1843 
1844  s->dest[0] += 16 >> lowres;
1845  s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
1846  s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
1847 
1848  ff_mpv_reconstruct_mb(s, s->block);
1849 
1850  if (++s->mb_x >= s->mb_width) {
1851  const int mb_size = 16 >> s->avctx->lowres;
1852  int left;
1853 
1854  ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size);
1856 
1857  s->mb_x = 0;
1858  s->mb_y += 1 << field_pic;
1859 
1860  if (s->mb_y >= s->mb_height) {
1861  int left = get_bits_left(&s->gb);
1862  int is_d10 = s->chroma_format == 2 &&
1863  s->pict_type == AV_PICTURE_TYPE_I &&
1864  avctx->profile == 0 && avctx->level == 5 &&
1865  s->intra_dc_precision == 2 &&
1866  s->q_scale_type == 1 && s->alternate_scan == 0 &&
1867  s->progressive_frame == 0
1868  /* vbv_delay == 0xBBB || 0xE10 */;
1869 
1870  if (left >= 32 && !is_d10) {
1871  GetBitContext gb = s->gb;
1872  align_get_bits(&gb);
1873  if (show_bits(&gb, 24) == 0x060E2B) {
1874  av_log(avctx, AV_LOG_DEBUG, "Invalid MXF data found in video stream\n");
1875  is_d10 = 1;
1876  }
1877  if (left > 32 && show_bits_long(&gb, 32) == 0x201) {
1878  av_log(avctx, AV_LOG_DEBUG, "skipping m704 alpha (unsupported)\n");
1879  goto eos;
1880  }
1881  }
1882 
1883  if (left < 0 ||
1884  (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) ||
1885  ((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) {
1886  av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X at %d %d\n",
1887  left, left>0 ? show_bits(&s->gb, FFMIN(left, 23)) : 0, s->mb_x, s->mb_y);
1888  return AVERROR_INVALIDDATA;
1889  } else
1890  goto eos;
1891  }
1892  // There are some files out there which are missing the last slice
1893  // in cases where the slice is completely outside the visible
1894  // area, we detect this here instead of running into the end expecting
1895  // more data
1896  left = get_bits_left(&s->gb);
1897  if (s->mb_y >= ((s->height + 15) >> 4) &&
1898  !s->progressive_sequence &&
1899  left <= 25 &&
1900  left >= 0 &&
1901  s->mb_skip_run == -1 &&
1902  (!left || show_bits(&s->gb, left) == 0))
1903  goto eos;
1904 
1906  }
1907 
1908  /* skip mb handling */
1909  if (s->mb_skip_run == -1) {
1910  /* read increment again */
1911  s->mb_skip_run = 0;
1912  for (;;) {
1913  int code = get_vlc2(&s->gb, ff_mbincr_vlc.table,
1914  MBINCR_VLC_BITS, 2);
1915  if (code < 0) {
1916  av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n");
1917  return AVERROR_INVALIDDATA;
1918  }
1919  if (code >= 33) {
1920  if (code == 33) {
1921  s->mb_skip_run += 33;
1922  } else if (code == 35) {
1923  if (s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0) {
1924  av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n");
1925  return AVERROR_INVALIDDATA;
1926  }
1927  goto eos; /* end of slice */
1928  }
1929  /* otherwise, stuffing, nothing to do */
1930  } else {
1931  s->mb_skip_run += code;
1932  break;
1933  }
1934  }
1935  if (s->mb_skip_run) {
1936  int i;
1937  if (s->pict_type == AV_PICTURE_TYPE_I) {
1938  av_log(s->avctx, AV_LOG_ERROR,
1939  "skipped MB in I-frame at %d %d\n", s->mb_x, s->mb_y);
1940  return AVERROR_INVALIDDATA;
1941  }
1942 
1943  /* skip mb */
1944  s->mb_intra = 0;
1945  for (i = 0; i < 12; i++)
1946  s->block_last_index[i] = -1;
1947  if (s->picture_structure == PICT_FRAME)
1948  s->mv_type = MV_TYPE_16X16;
1949  else
1950  s->mv_type = MV_TYPE_FIELD;
1951  if (s->pict_type == AV_PICTURE_TYPE_P) {
1952  /* if P type, zero motion vector is implied */
1953  s->mv_dir = MV_DIR_FORWARD;
1954  s->mv[0][0][0] = s->mv[0][0][1] = 0;
1955  s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0;
1956  s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0;
1957  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1958  } else {
1959  /* if B type, reuse previous vectors and directions */
1960  s->mv[0][0][0] = s->last_mv[0][0][0];
1961  s->mv[0][0][1] = s->last_mv[0][0][1];
1962  s->mv[1][0][0] = s->last_mv[1][0][0];
1963  s->mv[1][0][1] = s->last_mv[1][0][1];
1964  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1965  s->field_select[1][0] = (s->picture_structure - 1) & 1;
1966  }
1967  }
1968  }
1969  }
1970 eos: // end of slice
1971  if (get_bits_left(&s->gb) < 0) {
1972  av_log(s, AV_LOG_ERROR, "overread %d\n", -get_bits_left(&s->gb));
1973  return AVERROR_INVALIDDATA;
1974  }
1975  *buf += (get_bits_count(&s->gb) - 1) / 8;
1976  ff_dlog(s, "Slice start:%d %d end:%d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
1977  return 0;
1978 }
1979 
1981 {
1982  MpegEncContext *s = *(void **) arg;
1983  const uint8_t *buf = s->gb.buffer;
1984  int mb_y = s->start_mb_y;
1985  const int field_pic = s->picture_structure != PICT_FRAME;
1986 
1987  s->er.error_count = (3 * (s->end_mb_y - s->start_mb_y) * s->mb_width) >> field_pic;
1988 
1989  for (;;) {
1990  uint32_t start_code;
1991  int ret;
1992 
1993  ret = mpeg_decode_slice(s, mb_y, &buf, s->gb.buffer_end - buf);
1994  emms_c();
1995  ff_dlog(c, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
1996  ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y,
1997  s->start_mb_y, s->end_mb_y, s->er.error_count);
1998  if (ret < 0) {
1999  if (c->err_recognition & AV_EF_EXPLODE)
2000  return ret;
2001  if (s->resync_mb_x >= 0 && s->resync_mb_y >= 0)
2002  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
2003  s->mb_x, s->mb_y,
2005  } else {
2006  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
2007  s->mb_x - 1, s->mb_y,
2009  }
2010 
2011  if (s->mb_y == s->end_mb_y)
2012  return 0;
2013 
2014  start_code = -1;
2015  buf = avpriv_find_start_code(buf, s->gb.buffer_end, &start_code);
2016  if (start_code < SLICE_MIN_START_CODE || start_code > SLICE_MAX_START_CODE)
2017  return AVERROR_INVALIDDATA;
2019  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
2020  mb_y += (*buf&0xE0)<<2;
2021  mb_y <<= field_pic;
2022  if (s->picture_structure == PICT_BOTTOM_FIELD)
2023  mb_y++;
2024  if (mb_y >= s->end_mb_y)
2025  return AVERROR_INVALIDDATA;
2026  }
2027 }
2028 
2029 /**
2030  * Handle slice ends.
2031  * @return 1 if it seems to be the last slice
2032  */
2033 static int slice_end(AVCodecContext *avctx, AVFrame *pict)
2034 {
2035  Mpeg1Context *s1 = avctx->priv_data;
2036  MpegEncContext *s = &s1->mpeg_enc_ctx;
2037 
2038  if (!s1->mpeg_enc_ctx_allocated || !s->current_picture_ptr)
2039  return 0;
2040 
2041  if (s->avctx->hwaccel) {
2042  int ret = s->avctx->hwaccel->end_frame(s->avctx);
2043  if (ret < 0) {
2044  av_log(avctx, AV_LOG_ERROR,
2045  "hardware accelerator failed to decode picture\n");
2046  return ret;
2047  }
2048  }
2049 
2050  /* end of slice reached */
2051  if (/* s->mb_y << field_pic == s->mb_height && */ !s->first_field && !s1->first_slice) {
2052  /* end of image */
2053 
2054  ff_er_frame_end(&s->er);
2055 
2057 
2058  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
2059  int ret = av_frame_ref(pict, s->current_picture_ptr->f);
2060  if (ret < 0)
2061  return ret;
2062  ff_print_debug_info(s, s->current_picture_ptr, pict);
2063  ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG2);
2064  } else {
2065  if (avctx->active_thread_type & FF_THREAD_FRAME)
2066  s->picture_number++;
2067  /* latency of 1 frame for I- and P-frames */
2068  /* XXX: use another variable than picture_number */
2069  if (s->last_picture_ptr) {
2070  int ret = av_frame_ref(pict, s->last_picture_ptr->f);
2071  if (ret < 0)
2072  return ret;
2073  ff_print_debug_info(s, s->last_picture_ptr, pict);
2074  ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_QSCALE_TYPE_MPEG2);
2075  }
2076  }
2077 
2078  return 1;
2079  } else {
2080  return 0;
2081  }
2082 }
2083 
2085  const uint8_t *buf, int buf_size)
2086 {
2087  Mpeg1Context *s1 = avctx->priv_data;
2088  MpegEncContext *s = &s1->mpeg_enc_ctx;
2089  int width, height;
2090  int i, v, j;
2091 
2092  init_get_bits(&s->gb, buf, buf_size * 8);
2093 
2094  width = get_bits(&s->gb, 12);
2095  height = get_bits(&s->gb, 12);
2096  if (width == 0 || height == 0) {
2097  av_log(avctx, AV_LOG_WARNING,
2098  "Invalid horizontal or vertical size value.\n");
2100  return AVERROR_INVALIDDATA;
2101  }
2102  s->aspect_ratio_info = get_bits(&s->gb, 4);
2103  if (s->aspect_ratio_info == 0) {
2104  av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
2106  return AVERROR_INVALIDDATA;
2107  }
2108  s->frame_rate_index = get_bits(&s->gb, 4);
2109  if (s->frame_rate_index == 0 || s->frame_rate_index > 13) {
2110  av_log(avctx, AV_LOG_WARNING,
2111  "frame_rate_index %d is invalid\n", s->frame_rate_index);
2112  s->frame_rate_index = 1;
2113  }
2114  s->bit_rate = get_bits(&s->gb, 18) * 400LL;
2115  if (check_marker(s->avctx, &s->gb, "in sequence header") == 0) {
2116  return AVERROR_INVALIDDATA;
2117  }
2118 
2119  s1->rc_buffer_size = get_bits(&s->gb, 10) * 1024 * 16;
2120  skip_bits(&s->gb, 1);
2121 
2122  /* get matrix */
2123  if (get_bits1(&s->gb)) {
2124  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
2125  } else {
2126  for (i = 0; i < 64; i++) {
2127  j = s->idsp.idct_permutation[i];
2129  s->intra_matrix[j] = v;
2130  s->chroma_intra_matrix[j] = v;
2131  }
2132  }
2133  if (get_bits1(&s->gb)) {
2134  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
2135  } else {
2136  for (i = 0; i < 64; i++) {
2137  int j = s->idsp.idct_permutation[i];
2139  s->inter_matrix[j] = v;
2140  s->chroma_inter_matrix[j] = v;
2141  }
2142  }
2143 
2144  if (show_bits(&s->gb, 23) != 0) {
2145  av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
2146  return AVERROR_INVALIDDATA;
2147  }
2148 
2149  s->width = width;
2150  s->height = height;
2151 
2152  /* We set MPEG-2 parameters so that it emulates MPEG-1. */
2153  s->progressive_sequence = 1;
2154  s->progressive_frame = 1;
2155  s->picture_structure = PICT_FRAME;
2156  s->first_field = 0;
2157  s->frame_pred_frame_dct = 1;
2158  s->chroma_format = 1;
2159  s->codec_id =
2160  s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
2161  s->out_format = FMT_MPEG1;
2162  s->swap_uv = 0; // AFAIK VCR2 does not have SEQ_HEADER
2163  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
2164  s->low_delay = 1;
2165 
2166  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2167  av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%"PRId64", aspect_ratio_info: %d \n",
2168  s1->rc_buffer_size, s->bit_rate, s->aspect_ratio_info);
2169 
2170  return 0;
2171 }
2172 
2174 {
2175  Mpeg1Context *s1 = avctx->priv_data;
2176  MpegEncContext *s = &s1->mpeg_enc_ctx;
2177  int i, v, ret;
2178 
2179  /* start new MPEG-1 context decoding */
2180  s->out_format = FMT_MPEG1;
2181  if (s1->mpeg_enc_ctx_allocated) {
2183  s1->mpeg_enc_ctx_allocated = 0;
2184  }
2185  s->width = avctx->coded_width;
2186  s->height = avctx->coded_height;
2187  avctx->has_b_frames = 0; // true?
2188  s->low_delay = 1;
2189 
2190  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
2191  setup_hwaccel_for_pixfmt(avctx);
2192 
2194  if ((ret = ff_mpv_common_init(s)) < 0)
2195  return ret;
2196  s1->mpeg_enc_ctx_allocated = 1;
2197 
2198  for (i = 0; i < 64; i++) {
2199  int j = s->idsp.idct_permutation[i];
2201  s->intra_matrix[j] = v;
2202  s->chroma_intra_matrix[j] = v;
2203 
2205  s->inter_matrix[j] = v;
2206  s->chroma_inter_matrix[j] = v;
2207  }
2208 
2209  s->progressive_sequence = 1;
2210  s->progressive_frame = 1;
2211  s->picture_structure = PICT_FRAME;
2212  s->first_field = 0;
2213  s->frame_pred_frame_dct = 1;
2214  s->chroma_format = 1;
2215  if (s->codec_tag == AV_RL32("BW10")) {
2216  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
2217  } else {
2218  s->swap_uv = 1; // in case of xvmc we need to swap uv for each MB
2219  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
2220  }
2221  s1->save_width = s->width;
2222  s1->save_height = s->height;
2223  s1->save_progressive_seq = s->progressive_sequence;
2224  return 0;
2225 }
2226 
2228  const uint8_t *p, int buf_size)
2229 {
2230  Mpeg1Context *s1 = avctx->priv_data;
2231 
2232  if (buf_size >= 6 &&
2233  p[0] == 'G' && p[1] == 'A' && p[2] == '9' && p[3] == '4' &&
2234  p[4] == 3 && (p[5] & 0x40)) {
2235  /* extract A53 Part 4 CC data */
2236  int cc_count = p[5] & 0x1f;
2237  if (cc_count > 0 && buf_size >= 7 + cc_count * 3) {
2238  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2239  const uint64_t new_size = (old_size + cc_count
2240  * UINT64_C(3));
2241  int ret;
2242 
2243  if (new_size > 3*A53_MAX_CC_COUNT)
2244  return AVERROR(EINVAL);
2245 
2246  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2247  if (ret >= 0)
2248  memcpy(s1->a53_buf_ref->data + old_size, p + 7, cc_count * UINT64_C(3));
2249 
2251  }
2252  return 1;
2253  } else if (buf_size >= 2 &&
2254  p[0] == 0x03 && (p[1]&0x7f) == 0x01) {
2255  /* extract SCTE-20 CC data */
2256  GetBitContext gb;
2257  int cc_count = 0;
2258  int i, ret;
2259 
2260  init_get_bits(&gb, p + 2, buf_size - 2);
2261  cc_count = get_bits(&gb, 5);
2262  if (cc_count > 0) {
2263  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2264  const uint64_t new_size = (old_size + cc_count
2265  * UINT64_C(3));
2266  if (new_size > 3*A53_MAX_CC_COUNT)
2267  return AVERROR(EINVAL);
2268 
2269  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2270  if (ret >= 0) {
2271  uint8_t field, cc1, cc2;
2272  uint8_t *cap = s1->a53_buf_ref->data;
2273 
2274  memset(s1->a53_buf_ref->data + old_size, 0, cc_count * 3);
2275  for (i = 0; i < cc_count && get_bits_left(&gb) >= 26; i++) {
2276  skip_bits(&gb, 2); // priority
2277  field = get_bits(&gb, 2);
2278  skip_bits(&gb, 5); // line_offset
2279  cc1 = get_bits(&gb, 8);
2280  cc2 = get_bits(&gb, 8);
2281  skip_bits(&gb, 1); // marker
2282 
2283  if (!field) { // forbidden
2284  cap[0] = cap[1] = cap[2] = 0x00;
2285  } else {
2286  field = (field == 2 ? 1 : 0);
2287  if (!s1->mpeg_enc_ctx.top_field_first) field = !field;
2288  cap[0] = 0x04 | field;
2289  cap[1] = ff_reverse[cc1];
2290  cap[2] = ff_reverse[cc2];
2291  }
2292  cap += 3;
2293  }
2294  }
2296  }
2297  return 1;
2298  } else if (buf_size >= 11 &&
2299  p[0] == 'C' && p[1] == 'C' && p[2] == 0x01 && p[3] == 0xf8) {
2300  /* extract DVD CC data
2301  *
2302  * uint32_t user_data_start_code 0x000001B2 (big endian)
2303  * uint16_t user_identifier 0x4343 "CC"
2304  * uint8_t user_data_type_code 0x01
2305  * uint8_t caption_block_size 0xF8
2306  * uint8_t
2307  * bit 7 caption_odd_field_first 1=odd field (CC1/CC2) first 0=even field (CC3/CC4) first
2308  * bit 6 caption_filler 0
2309  * bit 5:1 caption_block_count number of caption blocks (pairs of caption words = frames). Most DVDs use 15 per start of GOP.
2310  * bit 0 caption_extra_field_added 1=one additional caption word
2311  *
2312  * struct caption_field_block {
2313  * uint8_t
2314  * bit 7:1 caption_filler 0x7F (all 1s)
2315  * bit 0 caption_field_odd 1=odd field (this is CC1/CC2) 0=even field (this is CC3/CC4)
2316  * uint8_t caption_first_byte
2317  * uint8_t caption_second_byte
2318  * } caption_block[(caption_block_count * 2) + caption_extra_field_added];
2319  *
2320  * Some DVDs encode caption data for both fields with caption_field_odd=1. The only way to decode the fields
2321  * correctly is to start on the field indicated by caption_odd_field_first and count between odd/even fields.
2322  * Don't assume that the first caption word is the odd field. There do exist MPEG files in the wild that start
2323  * on the even field. There also exist DVDs in the wild that encode an odd field count and the
2324  * caption_extra_field_added/caption_odd_field_first bits change per packet to allow that. */
2325  int cc_count = 0;
2326  int i, ret;
2327  // There is a caption count field in the data, but it is often
2328  // incorrect. So count the number of captions present.
2329  for (i = 5; i + 6 <= buf_size && ((p[i] & 0xfe) == 0xfe); i += 6)
2330  cc_count++;
2331  // Transform the DVD format into A53 Part 4 format
2332  if (cc_count > 0) {
2333  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2334  const uint64_t new_size = (old_size + cc_count
2335  * UINT64_C(6));
2336  if (new_size > 3*A53_MAX_CC_COUNT)
2337  return AVERROR(EINVAL);
2338 
2339  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2340  if (ret >= 0) {
2341  uint8_t field1 = !!(p[4] & 0x80);
2342  uint8_t *cap = s1->a53_buf_ref->data;
2343  p += 5;
2344  for (i = 0; i < cc_count; i++) {
2345  cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd;
2346  cap[1] = p[1];
2347  cap[2] = p[2];
2348  cap[3] = (p[3] == 0xff && !field1) ? 0xfc : 0xfd;
2349  cap[4] = p[4];
2350  cap[5] = p[5];
2351  cap += 6;
2352  p += 6;
2353  }
2354  }
2356  }
2357  return 1;
2358  }
2359  return 0;
2360 }
2361 
2363  const uint8_t *p, int buf_size)
2364 {
2365  Mpeg1Context *s = avctx->priv_data;
2366  const uint8_t *buf_end = p + buf_size;
2367  Mpeg1Context *s1 = avctx->priv_data;
2368 
2369 #if 0
2370  int i;
2371  for(i=0; !(!p[i-2] && !p[i-1] && p[i]==1) && i<buf_size; i++){
2372  av_log(avctx, AV_LOG_ERROR, "%c", p[i]);
2373  }
2374  av_log(avctx, AV_LOG_ERROR, "\n");
2375 #endif
2376 
2377  if (buf_size > 29){
2378  int i;
2379  for(i=0; i<20; i++)
2380  if (!memcmp(p+i, "\0TMPGEXS\0", 9)){
2381  s->tmpgexs= 1;
2382  }
2383  }
2384  /* we parse the DTG active format information */
2385  if (buf_end - p >= 5 &&
2386  p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') {
2387  int flags = p[4];
2388  p += 5;
2389  if (flags & 0x80) {
2390  /* skip event id */
2391  p += 2;
2392  }
2393  if (flags & 0x40) {
2394  if (buf_end - p < 1)
2395  return;
2396  s1->has_afd = 1;
2397  s1->afd = p[0] & 0x0f;
2398  }
2399  } else if (buf_end - p >= 6 &&
2400  p[0] == 'J' && p[1] == 'P' && p[2] == '3' && p[3] == 'D' &&
2401  p[4] == 0x03) { // S3D_video_format_length
2402  // the 0x7F mask ignores the reserved_bit value
2403  const uint8_t S3D_video_format_type = p[5] & 0x7F;
2404 
2405  if (S3D_video_format_type == 0x03 ||
2406  S3D_video_format_type == 0x04 ||
2407  S3D_video_format_type == 0x08 ||
2408  S3D_video_format_type == 0x23) {
2409 
2410  s1->has_stereo3d = 1;
2411 
2412  switch (S3D_video_format_type) {
2413  case 0x03:
2414  s1->stereo3d.type = AV_STEREO3D_SIDEBYSIDE;
2415  break;
2416  case 0x04:
2417  s1->stereo3d.type = AV_STEREO3D_TOPBOTTOM;
2418  break;
2419  case 0x08:
2420  s1->stereo3d.type = AV_STEREO3D_2D;
2421  break;
2422  case 0x23:
2423  s1->stereo3d.type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
2424  break;
2425  }
2426  }
2427  } else if (mpeg_decode_a53_cc(avctx, p, buf_size)) {
2428  return;
2429  }
2430 }
2431 
2432 static void mpeg_decode_gop(AVCodecContext *avctx,
2433  const uint8_t *buf, int buf_size)
2434 {
2435  Mpeg1Context *s1 = avctx->priv_data;
2436  MpegEncContext *s = &s1->mpeg_enc_ctx;
2437  int broken_link;
2438  int64_t tc;
2439 
2440  init_get_bits(&s->gb, buf, buf_size * 8);
2441 
2442  tc = s-> timecode_frame_start = get_bits(&s->gb, 25);
2443 
2444 #if FF_API_PRIVATE_OPT
2446  avctx->timecode_frame_start = tc;
2448 #endif
2449 
2450  s->closed_gop = get_bits1(&s->gb);
2451  /* broken_link indicates that after editing the
2452  * reference frames of the first B-Frames after GOP I-Frame
2453  * are missing (open gop) */
2454  broken_link = get_bits1(&s->gb);
2455 
2456  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
2457  char tcbuf[AV_TIMECODE_STR_SIZE];
2459  av_log(s->avctx, AV_LOG_DEBUG,
2460  "GOP (%s) closed_gop=%d broken_link=%d\n",
2461  tcbuf, s->closed_gop, broken_link);
2462  }
2463 }
2464 
2465 static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
2466  int *got_output, const uint8_t *buf, int buf_size)
2467 {
2468  Mpeg1Context *s = avctx->priv_data;
2469  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2470  const uint8_t *buf_ptr = buf;
2471  const uint8_t *buf_end = buf + buf_size;
2472  int ret, input_size;
2473  int last_code = 0, skip_frame = 0;
2474  int picture_start_code_seen = 0;
2475 
2476  for (;;) {
2477  /* find next start code */
2478  uint32_t start_code = -1;
2479  buf_ptr = avpriv_find_start_code(buf_ptr, buf_end, &start_code);
2480  if (start_code > 0x1ff) {
2481  if (!skip_frame) {
2482  if (HAVE_THREADS &&
2483  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2484  !avctx->hwaccel) {
2485  int i;
2486  av_assert0(avctx->thread_count > 1);
2487 
2488  avctx->execute(avctx, slice_decode_thread,
2489  &s2->thread_context[0], NULL,
2490  s->slice_count, sizeof(void *));
2491  for (i = 0; i < s->slice_count; i++)
2492  s2->er.error_count += s2->thread_context[i]->er.error_count;
2493  }
2494 
2495  ret = slice_end(avctx, picture);
2496  if (ret < 0)
2497  return ret;
2498  else if (ret) {
2499  // FIXME: merge with the stuff in mpeg_decode_slice
2500  if (s2->last_picture_ptr || s2->low_delay || s2->pict_type == AV_PICTURE_TYPE_B)
2501  *got_output = 1;
2502  }
2503  }
2504  s2->pict_type = 0;
2505 
2506  if (avctx->err_recognition & AV_EF_EXPLODE && s2->er.error_count)
2507  return AVERROR_INVALIDDATA;
2508 
2509  return FFMAX(0, buf_ptr - buf - s2->parse_context.last_index);
2510  }
2511 
2512  input_size = buf_end - buf_ptr;
2513 
2514  if (avctx->debug & FF_DEBUG_STARTCODE)
2515  av_log(avctx, AV_LOG_DEBUG, "%3"PRIX32" at %"PTRDIFF_SPECIFIER" left %d\n",
2516  start_code, buf_ptr - buf, input_size);
2517 
2518  /* prepare data for next start code */
2519  switch (start_code) {
2520  case SEQ_START_CODE:
2521  if (last_code == 0) {
2522  mpeg1_decode_sequence(avctx, buf_ptr, input_size);
2523  if (buf != avctx->extradata)
2524  s->sync = 1;
2525  } else {
2526  av_log(avctx, AV_LOG_ERROR,
2527  "ignoring SEQ_START_CODE after %X\n", last_code);
2528  if (avctx->err_recognition & AV_EF_EXPLODE)
2529  return AVERROR_INVALIDDATA;
2530  }
2531  break;
2532 
2533  case PICTURE_START_CODE:
2534  if (picture_start_code_seen && s2->picture_structure == PICT_FRAME) {
2535  /* If it's a frame picture, there can't be more than one picture header.
2536  Yet, it does happen and we need to handle it. */
2537  av_log(avctx, AV_LOG_WARNING, "ignoring extra picture following a frame-picture\n");
2538  break;
2539  }
2540  picture_start_code_seen = 1;
2541 
2542  if (s2->width <= 0 || s2->height <= 0) {
2543  av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d.\n",
2544  s2->width, s2->height);
2545  return AVERROR_INVALIDDATA;
2546  }
2547 
2548  if (s->tmpgexs){
2549  s2->intra_dc_precision= 3;
2550  s2->intra_matrix[0]= 1;
2551  }
2552  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
2553  !avctx->hwaccel && s->slice_count) {
2554  int i;
2555 
2556  avctx->execute(avctx, slice_decode_thread,
2557  s2->thread_context, NULL,
2558  s->slice_count, sizeof(void *));
2559  for (i = 0; i < s->slice_count; i++)
2560  s2->er.error_count += s2->thread_context[i]->er.error_count;
2561  s->slice_count = 0;
2562  }
2563  if (last_code == 0 || last_code == SLICE_MIN_START_CODE) {
2564  ret = mpeg_decode_postinit(avctx);
2565  if (ret < 0) {
2566  av_log(avctx, AV_LOG_ERROR,
2567  "mpeg_decode_postinit() failure\n");
2568  return ret;
2569  }
2570 
2571  /* We have a complete image: we try to decompress it. */
2572  if (mpeg1_decode_picture(avctx, buf_ptr, input_size) < 0)
2573  s2->pict_type = 0;
2574  s->first_slice = 1;
2575  last_code = PICTURE_START_CODE;
2576  } else {
2577  av_log(avctx, AV_LOG_ERROR,
2578  "ignoring pic after %X\n", last_code);
2579  if (avctx->err_recognition & AV_EF_EXPLODE)
2580  return AVERROR_INVALIDDATA;
2581  }
2582  break;
2583  case EXT_START_CODE:
2584  init_get_bits(&s2->gb, buf_ptr, input_size * 8);
2585 
2586  switch (get_bits(&s2->gb, 4)) {
2587  case 0x1:
2588  if (last_code == 0) {
2590  } else {
2591  av_log(avctx, AV_LOG_ERROR,
2592  "ignoring seq ext after %X\n", last_code);
2593  if (avctx->err_recognition & AV_EF_EXPLODE)
2594  return AVERROR_INVALIDDATA;
2595  }
2596  break;
2597  case 0x2:
2599  break;
2600  case 0x3:
2602  break;
2603  case 0x7:
2605  break;
2606  case 0x8:
2607  if (last_code == PICTURE_START_CODE) {
2609  } else {
2610  av_log(avctx, AV_LOG_ERROR,
2611  "ignoring pic cod ext after %X\n", last_code);
2612  if (avctx->err_recognition & AV_EF_EXPLODE)
2613  return AVERROR_INVALIDDATA;
2614  }
2615  break;
2616  }
2617  break;
2618  case USER_START_CODE:
2619  mpeg_decode_user_data(avctx, buf_ptr, input_size);
2620  break;
2621  case GOP_START_CODE:
2622  if (last_code == 0) {
2623  s2->first_field = 0;
2624  mpeg_decode_gop(avctx, buf_ptr, input_size);
2625  s->sync = 1;
2626  } else {
2627  av_log(avctx, AV_LOG_ERROR,
2628  "ignoring GOP_START_CODE after %X\n", last_code);
2629  if (avctx->err_recognition & AV_EF_EXPLODE)
2630  return AVERROR_INVALIDDATA;
2631  }
2632  break;
2633  default:
2635  start_code <= SLICE_MAX_START_CODE && last_code == PICTURE_START_CODE) {
2636  if (s2->progressive_sequence && !s2->progressive_frame) {
2637  s2->progressive_frame = 1;
2638  av_log(s2->avctx, AV_LOG_ERROR,
2639  "interlaced frame in progressive sequence, ignoring\n");
2640  }
2641 
2642  if (s2->picture_structure == 0 ||
2643  (s2->progressive_frame && s2->picture_structure != PICT_FRAME)) {
2644  av_log(s2->avctx, AV_LOG_ERROR,
2645  "picture_structure %d invalid, ignoring\n",
2646  s2->picture_structure);
2647  s2->picture_structure = PICT_FRAME;
2648  }
2649 
2650  if (s2->progressive_sequence && !s2->frame_pred_frame_dct)
2651  av_log(s2->avctx, AV_LOG_WARNING, "invalid frame_pred_frame_dct\n");
2652 
2653  if (s2->picture_structure == PICT_FRAME) {
2654  s2->first_field = 0;
2655  s2->v_edge_pos = 16 * s2->mb_height;
2656  } else {
2657  s2->first_field ^= 1;
2658  s2->v_edge_pos = 8 * s2->mb_height;
2659  memset(s2->mbskip_table, 0, s2->mb_stride * s2->mb_height);
2660  }
2661  }
2663  start_code <= SLICE_MAX_START_CODE && last_code != 0) {
2664  const int field_pic = s2->picture_structure != PICT_FRAME;
2665  int mb_y = start_code - SLICE_MIN_START_CODE;
2666  last_code = SLICE_MIN_START_CODE;
2667  if (s2->codec_id != AV_CODEC_ID_MPEG1VIDEO && s2->mb_height > 2800/16)
2668  mb_y += (*buf_ptr&0xE0)<<2;
2669 
2670  mb_y <<= field_pic;
2671  if (s2->picture_structure == PICT_BOTTOM_FIELD)
2672  mb_y++;
2673 
2674  if (buf_end - buf_ptr < 2) {
2675  av_log(s2->avctx, AV_LOG_ERROR, "slice too small\n");
2676  return AVERROR_INVALIDDATA;
2677  }
2678 
2679  if (mb_y >= s2->mb_height) {
2680  av_log(s2->avctx, AV_LOG_ERROR,
2681  "slice below image (%d >= %d)\n", mb_y, s2->mb_height);
2682  return AVERROR_INVALIDDATA;
2683  }
2684 
2685  if (!s2->last_picture_ptr) {
2686  /* Skip B-frames if we do not have reference frames and
2687  * GOP is not closed. */
2688  if (s2->pict_type == AV_PICTURE_TYPE_B) {
2689  if (!s2->closed_gop) {
2690  skip_frame = 1;
2691  av_log(s2->avctx, AV_LOG_DEBUG,
2692  "Skipping B slice due to open GOP\n");
2693  break;
2694  }
2695  }
2696  }
2697  if (s2->pict_type == AV_PICTURE_TYPE_I || (s2->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL))
2698  s->sync = 1;
2699  if (!s2->next_picture_ptr) {
2700  /* Skip P-frames if we do not have a reference frame or
2701  * we have an invalid header. */
2702  if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) {
2703  skip_frame = 1;
2704  av_log(s2->avctx, AV_LOG_DEBUG,
2705  "Skipping P slice due to !sync\n");
2706  break;
2707  }
2708  }
2709  if ((avctx->skip_frame >= AVDISCARD_NONREF &&
2710  s2->pict_type == AV_PICTURE_TYPE_B) ||
2711  (avctx->skip_frame >= AVDISCARD_NONKEY &&
2712  s2->pict_type != AV_PICTURE_TYPE_I) ||
2713  avctx->skip_frame >= AVDISCARD_ALL) {
2714  skip_frame = 1;
2715  break;
2716  }
2717 
2718  if (!s->mpeg_enc_ctx_allocated)
2719  break;
2720 
2721  if (s2->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
2722  if (mb_y < avctx->skip_top ||
2723  mb_y >= s2->mb_height - avctx->skip_bottom)
2724  break;
2725  }
2726 
2727  if (!s2->pict_type) {
2728  av_log(avctx, AV_LOG_ERROR, "Missing picture start code\n");
2729  if (avctx->err_recognition & AV_EF_EXPLODE)
2730  return AVERROR_INVALIDDATA;
2731  break;
2732  }
2733 
2734  if (s->first_slice) {
2735  skip_frame = 0;
2736  s->first_slice = 0;
2737  if ((ret = mpeg_field_start(s2, buf, buf_size)) < 0)
2738  return ret;
2739  }
2740  if (!s2->current_picture_ptr) {
2741  av_log(avctx, AV_LOG_ERROR,
2742  "current_picture not initialized\n");
2743  return AVERROR_INVALIDDATA;
2744  }
2745 
2746  if (HAVE_THREADS &&
2747  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2748  !avctx->hwaccel) {
2749  int threshold = (s2->mb_height * s->slice_count +
2750  s2->slice_context_count / 2) /
2751  s2->slice_context_count;
2752  av_assert0(avctx->thread_count > 1);
2753  if (threshold <= mb_y) {
2754  MpegEncContext *thread_context = s2->thread_context[s->slice_count];
2755 
2756  thread_context->start_mb_y = mb_y;
2757  thread_context->end_mb_y = s2->mb_height;
2758  if (s->slice_count) {
2759  s2->thread_context[s->slice_count - 1]->end_mb_y = mb_y;
2760  ret = ff_update_duplicate_context(thread_context, s2);
2761  if (ret < 0)
2762  return ret;
2763  }
2764  init_get_bits(&thread_context->gb, buf_ptr, input_size * 8);
2765  s->slice_count++;
2766  }
2767  buf_ptr += 2; // FIXME add minimum number of bytes per slice
2768  } else {
2769  ret = mpeg_decode_slice(s2, mb_y, &buf_ptr, input_size);
2770  emms_c();
2771 
2772  if (ret < 0) {
2773  if (avctx->err_recognition & AV_EF_EXPLODE)
2774  return ret;
2775  if (s2->resync_mb_x >= 0 && s2->resync_mb_y >= 0)
2776  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2777  s2->resync_mb_y, s2->mb_x, s2->mb_y,
2779  } else {
2780  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2781  s2->resync_mb_y, s2->mb_x - 1, s2->mb_y,
2783  }
2784  }
2785  }
2786  break;
2787  }
2788  }
2789 }
2790 
2791 static int mpeg_decode_frame(AVCodecContext *avctx, void *data,
2792  int *got_output, AVPacket *avpkt)
2793 {
2794  const uint8_t *buf = avpkt->data;
2795  int ret;
2796  int buf_size = avpkt->size;
2797  Mpeg1Context *s = avctx->priv_data;
2798  AVFrame *picture = data;
2799  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2800 
2801  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
2802  /* special case for last picture */
2803  if (s2->low_delay == 0 && s2->next_picture_ptr) {
2804  int ret = av_frame_ref(picture, s2->next_picture_ptr->f);
2805  if (ret < 0)
2806  return ret;
2807 
2808  s2->next_picture_ptr = NULL;
2809 
2810  *got_output = 1;
2811  }
2812  return buf_size;
2813  }
2814 
2815  if (s2->avctx->flags & AV_CODEC_FLAG_TRUNCATED) {
2816  int next = ff_mpeg1_find_frame_end(&s2->parse_context, buf,
2817  buf_size, NULL);
2818 
2819  if (ff_combine_frame(&s2->parse_context, next,
2820  (const uint8_t **) &buf, &buf_size) < 0)
2821  return buf_size;
2822  }
2823 
2824  s2->codec_tag = avpriv_toupper4(avctx->codec_tag);
2825  if (s->mpeg_enc_ctx_allocated == 0 && ( s2->codec_tag == AV_RL32("VCR2")
2826  || s2->codec_tag == AV_RL32("BW10")
2827  ))
2828  vcr2_init_sequence(avctx);
2829 
2830  s->slice_count = 0;
2831 
2832  if (avctx->extradata && !s->extradata_decoded) {
2833  ret = decode_chunks(avctx, picture, got_output,
2834  avctx->extradata, avctx->extradata_size);
2835  if (*got_output) {
2836  av_log(avctx, AV_LOG_ERROR, "picture in extradata\n");
2837  av_frame_unref(picture);
2838  *got_output = 0;
2839  }
2840  s->extradata_decoded = 1;
2841  if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
2842  s2->current_picture_ptr = NULL;
2843  return ret;
2844  }
2845  }
2846 
2847  ret = decode_chunks(avctx, picture, got_output, buf, buf_size);
2848  if (ret<0 || *got_output) {
2849  s2->current_picture_ptr = NULL;
2850 
2851  if (s2->timecode_frame_start != -1 && *got_output) {
2852  char tcbuf[AV_TIMECODE_STR_SIZE];
2853  AVFrameSideData *tcside = av_frame_new_side_data(picture,
2855  sizeof(int64_t));
2856  if (!tcside)
2857  return AVERROR(ENOMEM);
2858  memcpy(tcside->data, &s2->timecode_frame_start, sizeof(int64_t));
2859 
2860  av_timecode_make_mpeg_tc_string(tcbuf, s2->timecode_frame_start);
2861  av_dict_set(&picture->metadata, "timecode", tcbuf, 0);
2862 
2863  s2->timecode_frame_start = -1;
2864  }
2865  }
2866 
2867  return ret;
2868 }
2869 
2870 static void flush(AVCodecContext *avctx)
2871 {
2872  Mpeg1Context *s = avctx->priv_data;
2873 
2874  s->sync = 0;
2875 
2876  ff_mpeg_flush(avctx);
2877 }
2878 
2880 {
2881  Mpeg1Context *s = avctx->priv_data;
2882 
2883  if (s->mpeg_enc_ctx_allocated)
2884  ff_mpv_common_end(&s->mpeg_enc_ctx);
2885  av_buffer_unref(&s->a53_buf_ref);
2886  return 0;
2887 }
2888 
2890  .name = "mpeg1video",
2891  .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"),
2892  .type = AVMEDIA_TYPE_VIDEO,
2893  .id = AV_CODEC_ID_MPEG1VIDEO,
2894  .priv_data_size = sizeof(Mpeg1Context),
2896  .close = mpeg_decode_end,
2901  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2903  .flush = flush,
2904  .max_lowres = 3,
2905  .update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg_decode_update_thread_context),
2906  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2907 #if CONFIG_MPEG1_NVDEC_HWACCEL
2908  HWACCEL_NVDEC(mpeg1),
2909 #endif
2910 #if CONFIG_MPEG1_VDPAU_HWACCEL
2911  HWACCEL_VDPAU(mpeg1),
2912 #endif
2913 #if CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL
2914  HWACCEL_VIDEOTOOLBOX(mpeg1),
2915 #endif
2916 #if CONFIG_MPEG1_XVMC_HWACCEL
2917  HWACCEL_XVMC(mpeg1),
2918 #endif
2919  NULL
2920  },
2921 };
2922 
2924  .name = "mpeg2video",
2925  .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 video"),
2926  .type = AVMEDIA_TYPE_VIDEO,
2927  .id = AV_CODEC_ID_MPEG2VIDEO,
2928  .priv_data_size = sizeof(Mpeg1Context),
2930  .close = mpeg_decode_end,
2935  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2937  .flush = flush,
2938  .max_lowres = 3,
2940  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2941 #if CONFIG_MPEG2_DXVA2_HWACCEL
2942  HWACCEL_DXVA2(mpeg2),
2943 #endif
2944 #if CONFIG_MPEG2_D3D11VA_HWACCEL
2945  HWACCEL_D3D11VA(mpeg2),
2946 #endif
2947 #if CONFIG_MPEG2_D3D11VA2_HWACCEL
2948  HWACCEL_D3D11VA2(mpeg2),
2949 #endif
2950 #if CONFIG_MPEG2_NVDEC_HWACCEL
2951  HWACCEL_NVDEC(mpeg2),
2952 #endif
2953 #if CONFIG_MPEG2_VAAPI_HWACCEL
2954  HWACCEL_VAAPI(mpeg2),
2955 #endif
2956 #if CONFIG_MPEG2_VDPAU_HWACCEL
2957  HWACCEL_VDPAU(mpeg2),
2958 #endif
2959 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
2960  HWACCEL_VIDEOTOOLBOX(mpeg2),
2961 #endif
2962 #if CONFIG_MPEG2_XVMC_HWACCEL
2963  HWACCEL_XVMC(mpeg2),
2964 #endif
2965  NULL
2966  },
2967 };
2968 
2969 //legacy decoder
2971  .name = "mpegvideo",
2972  .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"),
2973  .type = AVMEDIA_TYPE_VIDEO,
2974  .id = AV_CODEC_ID_MPEG2VIDEO,
2975  .priv_data_size = sizeof(Mpeg1Context),
2977  .close = mpeg_decode_end,
2980  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2982  .flush = flush,
2983  .max_lowres = 3,
2984 };
2985 
2986 typedef struct IPUContext {
2988 
2989  int flags;
2990  DECLARE_ALIGNED(32, int16_t, block)[6][64];
2991 } IPUContext;
2992 
2993 static int ipu_decode_frame(AVCodecContext *avctx, void *data,
2994  int *got_frame, AVPacket *avpkt)
2995 {
2996  IPUContext *s = avctx->priv_data;
2997  MpegEncContext *m = &s->m;
2998  GetBitContext *gb = &m->gb;
2999  AVFrame * const frame = data;
3000  int ret;
3001 
3002  // Check for minimal intra MB size (considering mb header, luma & chroma dc VLC, ac EOB VLC)
3003  if (avpkt->size*8LL < (avctx->width+15)/16 * ((avctx->height+15)/16) * (2LL + 3*4 + 2*2 + 2*6))
3004  return AVERROR_INVALIDDATA;
3005 
3006  ret = ff_get_buffer(avctx, frame, 0);
3007  if (ret < 0)
3008  return ret;
3009 
3010  ret = init_get_bits8(gb, avpkt->data, avpkt->size);
3011  if (ret < 0)
3012  return ret;
3013 
3014  s->flags = get_bits(gb, 8);
3015  m->intra_dc_precision = s->flags & 3;
3016  m->q_scale_type = !!(s->flags & 0x40);
3017  m->intra_vlc_format = !!(s->flags & 0x20);
3018  m->alternate_scan = !!(s->flags & 0x10);
3019 
3020  if (s->flags & 0x10) {
3023  } else {
3026  }
3027 
3028  m->last_dc[0] = m->last_dc[1] = m->last_dc[2] = 1 << (7 + (s->flags & 3));
3029  m->qscale = 1;
3030 
3031  for (int y = 0; y < avctx->height; y += 16) {
3032  int intraquant;
3033 
3034  for (int x = 0; x < avctx->width; x += 16) {
3035  if (x || y) {
3036  if (!get_bits1(gb))
3037  return AVERROR_INVALIDDATA;
3038  }
3039  if (get_bits1(gb)) {
3040  intraquant = 0;
3041  } else {
3042  if (!get_bits1(gb))
3043  return AVERROR_INVALIDDATA;
3044  intraquant = 1;
3045  }
3046 
3047  if (s->flags & 4)
3048  skip_bits1(gb);
3049 
3050  if (intraquant)
3051  m->qscale = mpeg_get_qscale(m);
3052 
3053  memset(s->block, 0, sizeof(s->block));
3054 
3055  for (int n = 0; n < 6; n++) {
3056  if (s->flags & 0x80) {
3057  ret = ff_mpeg1_decode_block_intra(&m->gb,
3058  m->intra_matrix,
3060  m->last_dc, s->block[n],
3061  n, m->qscale);
3062  if (ret >= 0)
3063  m->block_last_index[n] = ret;
3064  } else {
3065  ret = mpeg2_decode_block_intra(m, s->block[n], n);
3066  }
3067 
3068  if (ret < 0)
3069  return ret;
3070  }
3071 
3072  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x,
3073  frame->linesize[0], s->block[0]);
3074  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x + 8,
3075  frame->linesize[0], s->block[1]);
3076  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x,
3077  frame->linesize[0], s->block[2]);
3078  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x + 8,
3079  frame->linesize[0], s->block[3]);
3080  m->idsp.idct_put(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
3081  frame->linesize[1], s->block[4]);
3082  m->idsp.idct_put(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
3083  frame->linesize[2], s->block[5]);
3084  }
3085  }
3086 
3087  align_get_bits(gb);
3088  if (get_bits_left(gb) != 32)
3089  return AVERROR_INVALIDDATA;
3090 
3092  frame->key_frame = 1;
3093  *got_frame = 1;
3094 
3095  return avpkt->size;
3096 }
3097 
3099 {
3100  IPUContext *s = avctx->priv_data;
3101  MpegEncContext *m = &s->m;
3102 
3103  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
3104 
3105  ff_mpv_decode_init(m, avctx);
3106  ff_mpv_idct_init(m);
3109 
3110  for (int i = 0; i < 64; i++) {
3111  int j = m->idsp.idct_permutation[i];
3113  m->intra_matrix[j] = v;
3114  m->chroma_intra_matrix[j] = v;
3115  }
3116 
3117  for (int i = 0; i < 64; i++) {
3118  int j = m->idsp.idct_permutation[i];
3120  m->inter_matrix[j] = v;
3121  m->chroma_inter_matrix[j] = v;
3122  }
3123 
3124  return 0;
3125 }
3126 
3128 {
3129  IPUContext *s = avctx->priv_data;
3130 
3131  ff_mpv_common_end(&s->m);
3132 
3133  return 0;
3134 }
3135 
3137  .name = "ipu",
3138  .long_name = NULL_IF_CONFIG_SMALL("IPU Video"),
3139  .type = AVMEDIA_TYPE_VIDEO,
3140  .id = AV_CODEC_ID_IPU,
3141  .priv_data_size = sizeof(IPUContext),
3142  .init = ipu_decode_init,
3144  .close = ipu_decode_end,
3145  .capabilities = AV_CODEC_CAP_DR1,
3147 };
#define wrap(func)
Definition: neontest.h:65
static double val(void *priv, double ch)
Definition: aeval.c:76
Macro definitions for various function/variable attributes.
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
uint8_t
int32_t
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
Libavcodec external API header.
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:2185
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1631
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1784
#define AV_EF_BITSTREAM
detect bitstream specification deviations
Definition: avcodec.h:1654
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
Definition: avcodec.h:1660
#define FF_IDCT_NONE
Definition: avcodec.h:1732
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1624
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1656
#define AV_EF_AGGRESSIVE
consider things that a sane encoder should not do as an error
Definition: avcodec.h:1661
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1785
#define FF_IDCT_AUTO
Definition: avcodec.h:1720
#define AV_RB32
Definition: intreadwrite.h:130
#define AV_RL32
Definition: intreadwrite.h:146
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
#define USER_START_CODE
Definition: cavs.h:36
#define EXT_START_CODE
Definition: cavs.h:35
#define SLICE_MAX_START_CODE
Definition: cavs.h:34
#define flags(name, subs,...)
Definition: cbs_av1.c:572
#define s(width, name)
Definition: cbs_vp9.c:257
#define FFMIN(a, b)
Definition: common.h:105
#define FFMAX(a, b)
Definition: common.h:103
#define CONFIG_MPEG1_XVMC_HWACCEL
Definition: config.h:1553
#define HAVE_THREADS
Definition: config.h:275
#define CONFIG_GRAY
Definition: config.h:556
#define CONFIG_MPEG2_XVMC_HWACCEL
Definition: config.h:1561
#define NULL
Definition: coverity.c:32
long long int64_t
Definition: coverity.c:34
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1900
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static AVFrame * frame
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
void ff_er_frame_end(ERContext *s)
#define ER_DC_ERROR
#define ER_AC_ERROR
#define ER_MV_ERROR
#define ER_MV_END
#define ER_AC_END
#define ER_DC_END
@ HWACCEL_VIDEOTOOLBOX
Definition: ffmpeg.h:62
static int lowres
Definition: ffplay.c:336
float re
Definition: fft.c:82
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:602
#define GET_CACHE(name, gb)
Definition: get_bits.h:215
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:359
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
#define SHOW_SBITS(name, gb, num)
Definition: get_bits.h:212
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:854
#define SKIP_BITS(name, gb, num)
Definition: get_bits.h:193
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:738
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:538
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
static int check_marker(void *logctx, GetBitContext *s, const char *msg)
Definition: get_bits.h:612
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:348
#define AV_CODEC_CAP_TRUNCATED
Definition: codec.h:53
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:376
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:77
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
Definition: avcodec.h:317
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:112
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:325
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
Definition: avcodec.h:367
@ AV_CODEC_ID_IPU
Definition: codec_id.h:305
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:50
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:51
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: avcodec.h:235
@ AVDISCARD_NONREF
discard all non reference
Definition: avcodec.h:232
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
int av_buffer_realloc(AVBufferRef **pbuf, buffer_size_t size)
Reallocate a given buffer.
Definition: buffer.c:169
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AVERROR(e)
Definition: error.h:43
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, buffer_size_t size)
Add a new side data to a frame.
Definition: frame.c:726
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:694
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:124
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:52
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
Definition: rational.c:106
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:117
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:322
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:117
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
#define IS_INTRA(x, y)
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
#define HWACCEL_XVMC(codec)
Definition: hwconfig.h:81
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:79
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
misc image utilities
int i
Definition: input.c:407
#define TEX_VLC_BITS
Definition: dv.h:99
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:61
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:1064
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:41
#define ff_tlog(ctx,...)
Definition: internal.h:96
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:49
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:84
unsigned int avpriv_toupper4(unsigned int x)
Definition: utils.c:932
#define FF_QSCALE_TYPE_MPEG2
Definition: internal.h:103
const char * arg
Definition: jacosubdec.c:66
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
#define PTRDIFF_SPECIFIER
Definition: internal.h:192
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:156
#define emms_c()
Definition: internal.h:54
const uint8_t ff_reverse[256]
Definition: reverse.c:23
Stereoscopic video.
static const AVProfile profiles[]
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
swscale version macros
uint8_t w
Definition: llviddspenc.c:39
#define FFALIGN(x, a)
Definition: macros.h:48
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
VLC ff_mbincr_vlc
Definition: mpeg12.c:131
VLC ff_mb_btype_vlc
Definition: mpeg12.c:133
VLC ff_mv_vlc
Definition: mpeg12.c:126
VLC ff_mb_pat_vlc
Definition: mpeg12.c:134
av_cold void ff_mpeg12_init_vlcs(void)
Definition: mpeg12.c:165
int ff_mpeg1_decode_block_intra(GetBitContext *gb, const uint16_t *quant_matrix, uint8_t *const scantable, int last_dc[3], int16_t *block, int index, int qscale)
Definition: mpeg12.c:237
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:114
VLC ff_mb_ptype_vlc
Definition: mpeg12.c:132
av_cold void ff_mpeg12_common_init(MpegEncContext *s)
Definition: mpeg12.c:106
int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size, AVCodecParserContext *s)
Find the end of the current frame in the bitstream.
Definition: mpeg12.c:175
static int decode_dc(GetBitContext *gb, int component)
Definition: mpeg12.h:40
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
const float ff_mpeg1_aspect[16]
Definition: mpeg12data.c:374
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
const AVRational ff_mpeg2_aspect[16]
Definition: mpeg12data.c:395
RLTable ff_rl_mpeg2
Definition: mpeg12data.c:174
RLTable ff_rl_mpeg1
Definition: mpeg12data.c:166
MPEG-1/2 tables.
const AVRational ff_mpeg12_frame_rate_tab[]
static void mpeg_decode_gop(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2432
AVCodec ff_mpeg2video_decoder
Definition: mpeg12dec.c:2923
#define MT_DMV
Definition: mpeg12dec.c:651
static int mpeg_decode_slice(MpegEncContext *s, int mb_y, const uint8_t **buf, int buf_size)
Decode a slice.
Definition: mpeg12dec.c:1703
static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1532
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
Definition: mpeg12dec.c:105
static int mpeg1_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:141
#define DECODE_SLICE_ERROR
Definition: mpeg12dec.c:1694
static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1461
static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
Definition: mpeg12dec.c:1518
AVCodec ff_mpeg1video_decoder
Definition: mpeg12dec.c:2889
#define MT_FRAME
Definition: mpeg12dec.c:649
static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1591
static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1387
#define A53_MAX_CC_COUNT
Definition: mpeg12dec.c:55
static int mpeg2_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:315
static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1340
AVCodec ff_ipu_decoder
Definition: mpeg12dec.c:3136
#define DECODE_SLICE_OK
Definition: mpeg12dec.c:1695
static int slice_decode_thread(AVCodecContext *c, void *arg)
Definition: mpeg12dec.c:1980
static void setup_hwaccel_for_pixfmt(AVCodecContext *avctx)
Definition: mpeg12dec.c:1186
static enum AVPixelFormat mpeg12_pixfmt_list_422[]
Definition: mpeg12dec.c:1155
static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1437
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:1050
static int mpeg2_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:477
#define check_scantable_index(ctx, x)
Definition: mpeg12dec.c:132
#define MB_TYPE_ZERO_MV
Definition: mpeg12dec.c:78
static int mpeg1_decode_sequence(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2084
static int vcr2_init_sequence(AVCodecContext *avctx)
Definition: mpeg12dec.c:2173
static const uint32_t ptype2mb_type[7]
Definition: mpeg12dec.c:80
static int ipu_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: mpeg12dec.c:2993
static enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:1128
static int mpeg2_fast_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
Definition: mpeg12dec.c:563
static enum AVPixelFormat mpeg12_pixfmt_list_444[]
Definition: mpeg12dec.c:1160
static int mpeg1_fast_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
Definition: mpeg12dec.c:230
static av_cold int mpeg_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:2879
#define MT_FIELD
Definition: mpeg12dec.c:648
static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpeg12dec.c:653
static enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:1114
static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, int *got_output, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2465
static av_cold int ipu_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:3127
static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm, const uint8_t *new_perm)
Definition: mpeg12dec.c:1102
static const uint32_t btype2mb_type[11]
Definition: mpeg12dec.c:90
static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
Definition: mpeg12dec.c:1165
static void mpeg_decode_user_data(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:2362
static int mpeg_decode_a53_cc(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:2227
static av_cold int ipu_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:3098
static int get_dmv(MpegEncContext *s)
Definition: mpeg12dec.c:639
AVCodec ff_mpegvideo_decoder
Definition: mpeg12dec.c:2970
#define MAX_INDEX
Definition: mpeg12dec.c:131
static int mpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_output, AVPacket *avpkt)
Definition: mpeg12dec.c:2791
static int mpeg_decode_postinit(AVCodecContext *avctx)
Definition: mpeg12dec.c:1203
static int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
Definition: mpeg12dec.c:405
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2033
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra)
Definition: mpeg12dec.c:1495
static void flush(AVCodecContext *avctx)
Definition: mpeg12dec.c:2870
#define MB_BTYPE_VLC_BITS
Definition: mpeg12vlc.h:40
#define MBINCR_VLC_BITS
Definition: mpeg12vlc.h:37
#define MV_VLC_BITS
Definition: mpeg12vlc.h:34
#define MB_PAT_VLC_BITS
Definition: mpeg12vlc.h:38
#define MB_PTYPE_VLC_BITS
Definition: mpeg12vlc.h:39
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:46
#define MB_TYPE_CBP
Definition: mpegutils.h:71
#define MB_TYPE_QUANT
Definition: mpegutils.h:70
#define MB_TYPE_SKIP
Definition: mpegutils.h:62
#define MB_TYPE_L1
Definition: mpegutils.h:68
#define HAS_CBP(a)
Definition: mpegutils.h:101
#define MB_TYPE_INTRA
Definition: mpegutils.h:73
#define MB_TYPE_L0L1
Definition: mpegutils.h:69
#define USES_LIST(a, list)
Definition: mpegutils.h:99
#define MB_TYPE_16x8
Definition: mpegutils.h:55
#define MB_TYPE_INTERLACED
Definition: mpegutils.h:58
#define MB_TYPE_16x16
Definition: mpegutils.h:54
#define MB_TYPE_L0
Definition: mpegutils.h:67
#define PICT_TOP_FIELD
Definition: mpegutils.h:37
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
#define IS_QUANT(a)
Definition: mpegutils.h:95
@ FMT_MPEG1
Definition: mpegutils.h:124
#define PICT_FRAME
Definition: mpegutils.h:39
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo.c:1181
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1111
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1405
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:913
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:499
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:2300
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
Definition: mpegvideo.c:1420
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:331
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
Definition: mpegvideo.c:1413
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2248
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo.c:699
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2267
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo.c:524
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2260
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo.c:2345
mpegvideo header.
#define PICTURE_START_CODE
Definition: mpegvideo.h:70
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:268
#define SEQ_END_CODE
Definition: mpegvideo.h:67
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:270
#define GOP_START_CODE
Definition: mpegvideo.h:69
#define SLICE_MIN_START_CODE
Definition: mpegvideo.h:71
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
static int mpeg_get_qscale(MpegEncContext *s)
Definition: mpegvideo.h:774
#define SEQ_START_CODE
Definition: mpegvideo.h:68
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
void ff_xvmc_init_block(MpegEncContext *s)
Initialize the block field of the MpegEncContext pointer passed as parameter after making sure that t...
void ff_xvmc_pack_pblocks(MpegEncContext *s, int cbp)
Fill individual block pointers, so there are no gaps in the data_block array in case not all blocks i...
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:95
const char data[16]
Definition: mxf.c:142
int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size)
Combine the (truncated) bitstream to a complete frame.
Definition: parser.c:238
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:609
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:607
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:608
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:569
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:282
@ AV_PIX_FMT_XVMC
XVideo Motion Acceleration via common packet passing.
Definition: pixfmt.h:273
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:137
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:235
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:313
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:229
@ AV_PIX_FMT_VAAPI
Definition: pixfmt.h:122
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:197
const AVProfile ff_mpeg2_video_profiles[]
Definition: profiles.c:100
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
#define s1
Definition: regdef.h:38
#define s2
Definition: regdef.h:39
#define tc
Definition: regdef.h:69
static const float pred[4]
Definition: siprdata.h:259
static int shift(int a, int b)
Definition: sonic.c:82
const uint8_t * code
Definition: spdifenc.c:413
A reference to a data buffer.
Definition: buffer.h:84
This structure describes the bitrate properties of an encoded bitstream.
Definition: avcodec.h:453
int buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: avcodec.h:486
int max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: avcodec.h:459
main external API structure.
Definition: avcodec.h:536
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
int width
picture width / height.
Definition: avcodec.h:709
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:623
int debug
debug
Definition: avcodec.h:1623
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1171
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:561
AVRational framerate
Definition: avcodec.h:2071
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:915
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:668
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1680
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1792
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:826
int level
level
Definition: avcodec.h:1984
int64_t bit_rate
the average bitrate
Definition: avcodec.h:586
const struct AVCodec * codec
Definition: avcodec.h:545
int profile
profile
Definition: avcodec.h:1858
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:2183
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1824
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:1719
attribute_deprecated int64_t timecode_frame_start
Definition: avcodec.h:1500
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1416
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1773
int coded_height
Definition: avcodec.h:724
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:616
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1178
enum AVCodecID codec_id
Definition: avcodec.h:546
int extradata_size
Definition: avcodec.h:638
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
Definition: avcodec.h:1076
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:724
void * priv_data
Definition: avcodec.h:563
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2006
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1645
AVCodec.
Definition: codec.h:197
enum AVCodecID id
Definition: codec.h:211
const char * name
Name of the codec implementation.
Definition: codec.h:204
Structure to hold side data for an AVFrame.
Definition: frame.h:220
uint8_t * data
Definition: frame.h:222
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:396
AVDictionary * metadata
metadata.
Definition: frame.h:604
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:401
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2500
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2528
This structure stores compressed data.
Definition: packet.h:346
int size
Definition: packet.h:370
uint8_t * data
Definition: packet.h:369
Pan Scan area.
Definition: avcodec.h:424
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int num
Numerator.
Definition: rational.h:59
int den
Denominator.
Definition: rational.h:60
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:72
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:96
MpegEncContext m
Definition: mpeg12dec.c:2987
int16_t block[6][64]
Definition: mpeg12dec.c:2990
int first_slice
Definition: mpeg12dec.c:74
int save_height
Definition: mpeg12dec.c:69
uint8_t afd
Definition: mpeg12dec.c:65
int has_stereo3d
Definition: mpeg12dec.c:63
int slice_count
Definition: mpeg12dec.c:67
int save_width
Definition: mpeg12dec.c:69
MpegEncContext mpeg_enc_ctx
Definition: mpeg12dec.c:58
AVBufferRef * a53_buf_ref
Definition: mpeg12dec.c:64
int mpeg_enc_ctx_allocated
Definition: mpeg12dec.c:59
AVRational frame_rate_ext
Definition: mpeg12dec.c:71
int extradata_decoded
Definition: mpeg12dec.c:75
AVPanScan pan_scan
Definition: mpeg12dec.c:61
AVRational save_aspect
Definition: mpeg12dec.c:68
int repeat_field
Definition: mpeg12dec.c:60
int rc_buffer_size
Definition: mpeg12dec.c:70
AVStereo3D stereo3d
Definition: mpeg12dec.c:62
int save_progressive_seq
Definition: mpeg12dec.c:69
MpegEncContext.
Definition: mpegvideo.h:81
int intra_vlc_format
Definition: mpegvideo.h:475
uint16_t inter_matrix[64]
Definition: mpegvideo.h:302
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:153
int qscale
QP.
Definition: mpegvideo.h:204
int intra_dc_precision
Definition: mpegvideo.h:469
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:86
int alternate_scan
Definition: mpegvideo.h:476
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:185
GetBitContext gb
Definition: mpegvideo.h:453
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
Definition: mpegvideo.h:90
uint16_t chroma_inter_matrix[64]
Definition: mpegvideo.h:303
ScanTable intra_scantable
Definition: mpegvideo.h:91
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:154
IDCTDSPContext idsp
Definition: mpegvideo.h:230
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:300
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:301
RLTable.
Definition: rl.h:39
RL_VLC_ELEM * rl_vlc[32]
decoding only
Definition: rl.h:48
uint8_t permutated[64]
Definition: idctdsp.h:33
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
uint8_t run
Definition: svq3.c:205
uint8_t level
Definition: svq3.c:206
#define ff_dlog(a,...)
#define av_log(a,...)
static int16_t block[64]
Definition: dct.c:116
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
AVFormatContext * ctx
Definition: movenc.c:48
#define height
#define width
char * av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
Get the timecode string from the 25-bit timecode format (MPEG GOP format).
Definition: timecode.c:165
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
if(ret< 0)
Definition: vf_mcdeint.c:282
static av_always_inline int diff(const uint32_t a, const uint32_t b)
static const uint8_t start_code[]
static double c[64]