FFmpeg  4.4.5
mss2.c
Go to the documentation of this file.
1 /*
2  * Microsoft Screen 2 (aka Windows Media Video V9 Screen) decoder
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Microsoft Screen 2 (aka Windows Media Video V9 Screen) decoder
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "error_resilience.h"
28 #include "internal.h"
29 #include "mpeg_er.h"
30 #include "msmpeg4.h"
31 #include "qpeldsp.h"
32 #include "vc1.h"
33 #include "wmv2data.h"
34 #include "mss12.h"
35 #include "mss2dsp.h"
36 
37 typedef struct MSS2Context {
45 } MSS2Context;
46 
48 {
49  while ((c->high >> 15) - (c->low >> 15) < 2) {
50  if ((c->low ^ c->high) & 0x10000) {
51  c->high ^= 0x8000;
52  c->value ^= 0x8000;
53  c->low ^= 0x8000;
54  }
55  c->high = (uint16_t)c->high << 8 | 0xFF;
56  c->value = (uint16_t)c->value << 8 | bytestream2_get_byte(c->gbc.gB);
57  c->low = (uint16_t)c->low << 8;
58  }
59 }
60 
61 ARITH_GET_BIT(arith2)
62 
63 /* L. Stuiver and A. Moffat: "Piecewise Integer Mapping for Arithmetic Coding."
64  * In Proc. 8th Data Compression Conference (DCC '98), pp. 3-12, Mar. 1998 */
65 
66 static int arith2_get_scaled_value(int value, int n, int range)
67 {
68  int split = (n << 1) - range;
69 
70  if (value > split)
71  return split + (value - split >> 1);
72  else
73  return value;
74 }
75 
76 static void arith2_rescale_interval(ArithCoder *c, int range,
77  int low, int high, int n)
78 {
79  int split = (n << 1) - range;
80 
81  if (high > split)
82  c->high = split + (high - split << 1);
83  else
84  c->high = high;
85 
86  c->high += c->low - 1;
87 
88  if (low > split)
89  c->low += split + (low - split << 1);
90  else
91  c->low += low;
92 }
93 
94 static int arith2_get_number(ArithCoder *c, int n)
95 {
96  int range = c->high - c->low + 1;
97  int scale = av_log2(range) - av_log2(n);
98  int val;
99 
100  if (n << scale > range)
101  scale--;
102 
103  n <<= scale;
104 
105  val = arith2_get_scaled_value(c->value - c->low, n, range) >> scale;
106 
107  arith2_rescale_interval(c, range, val << scale, (val + 1) << scale, n);
108 
110 
111  return val;
112 }
113 
114 static int arith2_get_prob(ArithCoder *c, int16_t *probs)
115 {
116  int range = c->high - c->low + 1, n = *probs;
117  int scale = av_log2(range) - av_log2(n);
118  int i = 0, val;
119 
120  if (n << scale > range)
121  scale--;
122 
123  n <<= scale;
124 
125  val = arith2_get_scaled_value(c->value - c->low, n, range) >> scale;
126  while (probs[++i] > val) ;
127 
128  arith2_rescale_interval(c, range,
129  probs[i] << scale, probs[i - 1] << scale, n);
130 
131  return i;
132 }
133 
134 ARITH_GET_MODEL_SYM(arith2)
135 
137 {
138  int diff = (c->high >> 16) - (c->low >> 16);
139  int bp = bytestream2_tell(c->gbc.gB) - 3 << 3;
140  int bits = 1;
141 
142  while (!(diff & 0x80)) {
143  bits++;
144  diff <<= 1;
145  }
146 
147  return (bits + bp + 7 >> 3) + ((c->low >> 16) + 1 == c->high >> 16);
148 }
149 
151 {
152  c->low = 0;
153  c->high = 0xFFFFFF;
154  c->value = bytestream2_get_be24(gB);
155  c->overread = 0;
156  c->gbc.gB = gB;
157  c->get_model_sym = arith2_get_model_sym;
158  c->get_number = arith2_get_number;
159 }
160 
161 static int decode_pal_v2(MSS12Context *ctx, const uint8_t *buf, int buf_size)
162 {
163  int i, ncol;
164  uint32_t *pal = ctx->pal + 256 - ctx->free_colours;
165 
166  if (!ctx->free_colours)
167  return 0;
168 
169  ncol = *buf++;
170  if (ncol > ctx->free_colours || buf_size < 2 + ncol * 3)
171  return AVERROR_INVALIDDATA;
172  for (i = 0; i < ncol; i++)
173  *pal++ = AV_RB24(buf + 3 * i);
174 
175  return 1 + ncol * 3;
176 }
177 
178 static int decode_555(AVCodecContext *avctx, GetByteContext *gB, uint16_t *dst, ptrdiff_t stride,
179  int keyframe, int w, int h)
180 {
181  int last_symbol = 0, repeat = 0, prev_avail = 0;
182 
183  if (!keyframe) {
184  int x, y, endx, endy, t;
185 
186 #define READ_PAIR(a, b) \
187  a = bytestream2_get_byte(gB) << 4; \
188  t = bytestream2_get_byte(gB); \
189  a |= t >> 4; \
190  b = (t & 0xF) << 8; \
191  b |= bytestream2_get_byte(gB); \
192 
193  READ_PAIR(x, endx)
194  READ_PAIR(y, endy)
195 
196  if (endx >= w || endy >= h || x > endx || y > endy)
197  return AVERROR_INVALIDDATA;
198  dst += x + stride * y;
199  w = endx - x + 1;
200  h = endy - y + 1;
201  if (y)
202  prev_avail = 1;
203  }
204 
205  do {
206  uint16_t *p = dst;
207  do {
208  if (repeat-- < 1) {
209  int b = bytestream2_get_byte(gB);
210  if (b < 128)
211  last_symbol = b << 8 | bytestream2_get_byte(gB);
212  else if (b > 129) {
213  repeat = 0;
214  while (b-- > 130) {
215  if (repeat >= (INT_MAX >> 8) - 1) {
216  av_log(avctx, AV_LOG_ERROR, "repeat overflow\n");
217  return AVERROR_INVALIDDATA;
218  }
219  repeat = (repeat << 8) + bytestream2_get_byte(gB) + 1;
220  }
221  if (last_symbol == -2) {
222  int skip = FFMIN((unsigned)repeat, dst + w - p);
223  repeat -= skip;
224  p += skip;
225  }
226  } else
227  last_symbol = 127 - b;
228  }
229  if (last_symbol >= 0)
230  *p = last_symbol;
231  else if (last_symbol == -1 && prev_avail)
232  *p = *(p - stride);
233  } while (++p < dst + w);
234  dst += stride;
235  prev_avail = 1;
236  } while (--h);
237 
238  return 0;
239 }
240 
241 static int decode_rle(GetBitContext *gb, uint8_t *pal_dst, ptrdiff_t pal_stride,
242  uint8_t *rgb_dst, ptrdiff_t rgb_stride, uint32_t *pal,
243  int keyframe, int kf_slipt, int slice, int w, int h)
244 {
245  uint8_t bits[270] = { 0 };
246  uint32_t codes[270];
247  VLC vlc;
248 
249  int current_length = 0, read_codes = 0, next_code = 0, current_codes = 0;
250  int remaining_codes, surplus_codes, i;
251 
252  const int alphabet_size = 270 - keyframe;
253 
254  int last_symbol = 0, repeat = 0, prev_avail = 0;
255 
256  if (!keyframe) {
257  int x, y, clipw, cliph;
258 
259  x = get_bits(gb, 12);
260  y = get_bits(gb, 12);
261  clipw = get_bits(gb, 12) + 1;
262  cliph = get_bits(gb, 12) + 1;
263 
264  if (x + clipw > w || y + cliph > h)
265  return AVERROR_INVALIDDATA;
266  pal_dst += pal_stride * y + x;
267  rgb_dst += rgb_stride * y + x * 3;
268  w = clipw;
269  h = cliph;
270  if (y)
271  prev_avail = 1;
272  } else {
273  if (slice > 0) {
274  pal_dst += pal_stride * kf_slipt;
275  rgb_dst += rgb_stride * kf_slipt;
276  prev_avail = 1;
277  h -= kf_slipt;
278  } else
279  h = kf_slipt;
280  }
281 
282  /* read explicit codes */
283  do {
284  while (current_codes--) {
285  int symbol = get_bits(gb, 8);
286  if (symbol >= 204 - keyframe)
287  symbol += 14 - keyframe;
288  else if (symbol > 189)
289  symbol = get_bits1(gb) + (symbol << 1) - 190;
290  if (bits[symbol])
291  return AVERROR_INVALIDDATA;
292  bits[symbol] = current_length;
293  codes[symbol] = next_code++;
294  read_codes++;
295  }
296  current_length++;
297  next_code <<= 1;
298  remaining_codes = (1 << current_length) - next_code;
299  current_codes = get_bits(gb, av_ceil_log2(remaining_codes + 1));
300  if (current_length > 22 || current_codes > remaining_codes)
301  return AVERROR_INVALIDDATA;
302  } while (current_codes != remaining_codes);
303 
304  remaining_codes = alphabet_size - read_codes;
305 
306  /* determine the minimum length to fit the rest of the alphabet */
307  while ((surplus_codes = (2 << current_length) -
308  (next_code << 1) - remaining_codes) < 0) {
309  current_length++;
310  next_code <<= 1;
311  }
312 
313  /* add the rest of the symbols lexicographically */
314  for (i = 0; i < alphabet_size; i++)
315  if (!bits[i]) {
316  if (surplus_codes-- == 0) {
317  current_length++;
318  next_code <<= 1;
319  }
320  bits[i] = current_length;
321  codes[i] = next_code++;
322  }
323 
324  if (next_code != 1 << current_length)
325  return AVERROR_INVALIDDATA;
326 
327  if ((i = init_vlc(&vlc, 9, alphabet_size, bits, 1, 1, codes, 4, 4, 0)) < 0)
328  return i;
329 
330  /* frame decode */
331  do {
332  uint8_t *pp = pal_dst;
333  uint8_t *rp = rgb_dst;
334  do {
335  if (repeat-- < 1) {
336  int b = get_vlc2(gb, vlc.table, 9, 3);
337  if (b < 256)
338  last_symbol = b;
339  else if (b < 268) {
340  b -= 256;
341  if (b == 11)
342  b = get_bits(gb, 4) + 10;
343 
344  if (!b)
345  repeat = 0;
346  else
347  repeat = get_bits(gb, b);
348 
349  repeat += (1 << b) - 1;
350 
351  if (last_symbol == -2) {
352  int skip = FFMIN(repeat, pal_dst + w - pp);
353  repeat -= skip;
354  pp += skip;
355  rp += skip * 3;
356  }
357  } else
358  last_symbol = 267 - b;
359  }
360  if (last_symbol >= 0) {
361  *pp = last_symbol;
362  AV_WB24(rp, pal[last_symbol]);
363  } else if (last_symbol == -1 && prev_avail) {
364  *pp = *(pp - pal_stride);
365  memcpy(rp, rp - rgb_stride, 3);
366  }
367  rp += 3;
368  } while (++pp < pal_dst + w);
369  pal_dst += pal_stride;
370  rgb_dst += rgb_stride;
371  prev_avail = 1;
372  } while (--h);
373 
374  ff_free_vlc(&vlc);
375  return 0;
376 }
377 
378 static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
379  int x, int y, int w, int h, int wmv9_mask)
380 {
381  MSS2Context *ctx = avctx->priv_data;
382  MSS12Context *c = &ctx->c;
383  VC1Context *v = avctx->priv_data;
384  MpegEncContext *s = &v->s;
385  AVFrame *f;
386  int ret;
387 
388  ff_mpeg_flush(avctx);
389 
390  if ((ret = init_get_bits8(&s->gb, buf, buf_size)) < 0)
391  return ret;
392 
393  s->loop_filter = avctx->skip_loop_filter < AVDISCARD_ALL;
394 
395  if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
396  av_log(v->s.avctx, AV_LOG_ERROR, "header error\n");
397  return AVERROR_INVALIDDATA;
398  }
399 
400  if (s->pict_type != AV_PICTURE_TYPE_I) {
401  av_log(v->s.avctx, AV_LOG_ERROR, "expected I-frame\n");
402  return AVERROR_INVALIDDATA;
403  }
404 
405  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
406 
407  if ((ret = ff_mpv_frame_start(s, avctx)) < 0) {
408  av_log(v->s.avctx, AV_LOG_ERROR, "ff_mpv_frame_start error\n");
409  avctx->pix_fmt = AV_PIX_FMT_RGB24;
410  return ret;
411  }
412 
414 
415  v->end_mb_x = (w + 15) >> 4;
416  s->end_mb_y = (h + 15) >> 4;
417  if (v->respic & 1)
418  v->end_mb_x = v->end_mb_x + 1 >> 1;
419  if (v->respic & 2)
420  s->end_mb_y = s->end_mb_y + 1 >> 1;
421 
423 
424  if (v->end_mb_x == s->mb_width && s->end_mb_y == s->mb_height) {
425  ff_er_frame_end(&s->er);
426  } else {
428  "disabling error correction due to block count mismatch %dx%d != %dx%d\n",
429  v->end_mb_x, s->end_mb_y, s->mb_width, s->mb_height);
430  }
431 
433 
434  f = s->current_picture.f;
435 
436  if (v->respic == 3) {
437  ctx->dsp.upsample_plane(f->data[0], f->linesize[0], w, h);
438  ctx->dsp.upsample_plane(f->data[1], f->linesize[1], w+1 >> 1, h+1 >> 1);
439  ctx->dsp.upsample_plane(f->data[2], f->linesize[2], w+1 >> 1, h+1 >> 1);
440  } else if (v->respic)
442  "Asymmetric WMV9 rectangle subsampling");
443 
444  av_assert0(f->linesize[1] == f->linesize[2]);
445 
446  if (wmv9_mask != -1)
447  ctx->dsp.mss2_blit_wmv9_masked(c->rgb_pic + y * c->rgb_stride + x * 3,
448  c->rgb_stride, wmv9_mask,
449  c->pal_pic + y * c->pal_stride + x,
450  c->pal_stride,
451  f->data[0], f->linesize[0],
452  f->data[1], f->data[2], f->linesize[1],
453  w, h);
454  else
455  ctx->dsp.mss2_blit_wmv9(c->rgb_pic + y * c->rgb_stride + x * 3,
456  c->rgb_stride,
457  f->data[0], f->linesize[0],
458  f->data[1], f->data[2], f->linesize[1],
459  w, h);
460 
461  avctx->pix_fmt = AV_PIX_FMT_RGB24;
462 
463  return 0;
464 }
465 
466 struct Rectangle {
467  int coded, x, y, w, h;
468 };
469 
470 #define MAX_WMV9_RECTANGLES 20
471 #define ARITH2_PADDING 2
472 
473 static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
474  AVPacket *avpkt)
475 {
476  const uint8_t *buf = avpkt->data;
477  int buf_size = avpkt->size;
478  MSS2Context *ctx = avctx->priv_data;
479  MSS12Context *c = &ctx->c;
480  AVFrame *frame = data;
481  GetBitContext gb;
482  GetByteContext gB;
483  ArithCoder acoder;
484 
485  int keyframe, has_wmv9, has_mv, is_rle, is_555, ret;
486 
487  struct Rectangle wmv9rects[MAX_WMV9_RECTANGLES], *r;
488  int used_rects = 0, i, implicit_rect = 0, av_uninit(wmv9_mask);
489 
490  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
491  return ret;
492 
493  if (keyframe = get_bits1(&gb))
494  skip_bits(&gb, 7);
495  has_wmv9 = get_bits1(&gb);
496  has_mv = keyframe ? 0 : get_bits1(&gb);
497  is_rle = get_bits1(&gb);
498  is_555 = is_rle && get_bits1(&gb);
499  if (c->slice_split > 0)
500  ctx->split_position = c->slice_split;
501  else if (c->slice_split < 0) {
502  if (get_bits1(&gb)) {
503  if (get_bits1(&gb)) {
504  if (get_bits1(&gb))
505  ctx->split_position = get_bits(&gb, 16);
506  else
507  ctx->split_position = get_bits(&gb, 12);
508  } else
509  ctx->split_position = get_bits(&gb, 8) << 4;
510  } else {
511  if (keyframe)
512  ctx->split_position = avctx->height / 2;
513  }
514  } else
515  ctx->split_position = avctx->height;
516 
517  if (c->slice_split && (ctx->split_position < 1 - is_555 ||
518  ctx->split_position > avctx->height - 1))
519  return AVERROR_INVALIDDATA;
520 
521  align_get_bits(&gb);
522  buf += get_bits_count(&gb) >> 3;
523  buf_size -= get_bits_count(&gb) >> 3;
524 
525  if (buf_size < 1)
526  return AVERROR_INVALIDDATA;
527 
528  if (is_555 && (has_wmv9 || has_mv || c->slice_split && ctx->split_position))
529  return AVERROR_INVALIDDATA;
530 
531  avctx->pix_fmt = is_555 ? AV_PIX_FMT_RGB555 : AV_PIX_FMT_RGB24;
532  if (ctx->last_pic->format != avctx->pix_fmt)
533  av_frame_unref(ctx->last_pic);
534 
535  if (has_wmv9) {
536  bytestream2_init(&gB, buf, buf_size + ARITH2_PADDING);
537  arith2_init(&acoder, &gB);
538 
539  implicit_rect = !arith2_get_bit(&acoder);
540 
541  while (arith2_get_bit(&acoder)) {
542  if (used_rects == MAX_WMV9_RECTANGLES)
543  return AVERROR_INVALIDDATA;
544  r = &wmv9rects[used_rects];
545  if (!used_rects)
546  r->x = arith2_get_number(&acoder, avctx->width);
547  else
548  r->x = arith2_get_number(&acoder, avctx->width -
549  wmv9rects[used_rects - 1].x) +
550  wmv9rects[used_rects - 1].x;
551  r->y = arith2_get_number(&acoder, avctx->height);
552  r->w = arith2_get_number(&acoder, avctx->width - r->x) + 1;
553  r->h = arith2_get_number(&acoder, avctx->height - r->y) + 1;
554  used_rects++;
555  }
556 
557  if (implicit_rect && used_rects) {
558  av_log(avctx, AV_LOG_ERROR, "implicit_rect && used_rects > 0\n");
559  return AVERROR_INVALIDDATA;
560  }
561 
562  if (implicit_rect) {
563  wmv9rects[0].x = 0;
564  wmv9rects[0].y = 0;
565  wmv9rects[0].w = avctx->width;
566  wmv9rects[0].h = avctx->height;
567 
568  used_rects = 1;
569  }
570  for (i = 0; i < used_rects; i++) {
571  if (!implicit_rect && arith2_get_bit(&acoder)) {
572  av_log(avctx, AV_LOG_ERROR, "Unexpected grandchildren\n");
573  return AVERROR_INVALIDDATA;
574  }
575  if (!i) {
576  wmv9_mask = arith2_get_bit(&acoder) - 1;
577  if (!wmv9_mask)
578  wmv9_mask = arith2_get_number(&acoder, 256);
579  }
580  wmv9rects[i].coded = arith2_get_number(&acoder, 2);
581  }
582 
583  buf += arith2_get_consumed_bytes(&acoder);
584  buf_size -= arith2_get_consumed_bytes(&acoder);
585  if (buf_size < 1)
586  return AVERROR_INVALIDDATA;
587  }
588 
589  c->mvX = c->mvY = 0;
590  if (keyframe && !is_555) {
591  if ((i = decode_pal_v2(c, buf, buf_size)) < 0)
592  return AVERROR_INVALIDDATA;
593  buf += i;
594  buf_size -= i;
595  } else if (has_mv) {
596  buf += 4;
597  buf_size -= 4;
598  if (buf_size < 1)
599  return AVERROR_INVALIDDATA;
600  c->mvX = AV_RB16(buf - 4) - avctx->width;
601  c->mvY = AV_RB16(buf - 2) - avctx->height;
602  }
603 
604  if (c->mvX < 0 || c->mvY < 0) {
605  FFSWAP(uint8_t *, c->pal_pic, c->last_pal_pic);
606 
607  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
608  return ret;
609 
610  if (ctx->last_pic->data[0]) {
611  av_assert0(frame->linesize[0] == ctx->last_pic->linesize[0]);
612  c->last_rgb_pic = ctx->last_pic->data[0] +
613  ctx->last_pic->linesize[0] * (avctx->height - 1);
614  } else {
615  av_log(avctx, AV_LOG_ERROR, "Missing keyframe\n");
616  return AVERROR_INVALIDDATA;
617  }
618  } else {
619  if ((ret = ff_reget_buffer(avctx, ctx->last_pic, 0)) < 0)
620  return ret;
621  if ((ret = av_frame_ref(frame, ctx->last_pic)) < 0)
622  return ret;
623 
624  c->last_rgb_pic = NULL;
625  }
626  c->rgb_pic = frame->data[0] +
627  frame->linesize[0] * (avctx->height - 1);
628  c->rgb_stride = -frame->linesize[0];
629 
630  frame->key_frame = keyframe;
632 
633  if (is_555) {
634  bytestream2_init(&gB, buf, buf_size);
635 
636  if (decode_555(avctx, &gB, (uint16_t *)c->rgb_pic, c->rgb_stride >> 1,
637  keyframe, avctx->width, avctx->height))
638  return AVERROR_INVALIDDATA;
639 
640  buf_size -= bytestream2_tell(&gB);
641  } else {
642  if (keyframe) {
643  c->corrupted = 0;
645  if (c->slice_split)
647  }
648  if (is_rle) {
649  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
650  return ret;
651  if (ret = decode_rle(&gb, c->pal_pic, c->pal_stride,
652  c->rgb_pic, c->rgb_stride, c->pal, keyframe,
653  ctx->split_position, 0,
654  avctx->width, avctx->height))
655  return ret;
656  align_get_bits(&gb);
657 
658  if (c->slice_split)
659  if (ret = decode_rle(&gb, c->pal_pic, c->pal_stride,
660  c->rgb_pic, c->rgb_stride, c->pal, keyframe,
661  ctx->split_position, 1,
662  avctx->width, avctx->height))
663  return ret;
664 
665  align_get_bits(&gb);
666  buf += get_bits_count(&gb) >> 3;
667  buf_size -= get_bits_count(&gb) >> 3;
668  } else if (!implicit_rect || wmv9_mask != -1) {
669  if (c->corrupted)
670  return AVERROR_INVALIDDATA;
671  bytestream2_init(&gB, buf, buf_size + ARITH2_PADDING);
672  arith2_init(&acoder, &gB);
673  c->keyframe = keyframe;
674  if (c->corrupted = ff_mss12_decode_rect(&ctx->sc[0], &acoder, 0, 0,
675  avctx->width,
676  ctx->split_position))
677  return AVERROR_INVALIDDATA;
678 
679  buf += arith2_get_consumed_bytes(&acoder);
680  buf_size -= arith2_get_consumed_bytes(&acoder);
681  if (c->slice_split) {
682  if (buf_size < 1)
683  return AVERROR_INVALIDDATA;
684  bytestream2_init(&gB, buf, buf_size + ARITH2_PADDING);
685  arith2_init(&acoder, &gB);
686  if (c->corrupted = ff_mss12_decode_rect(&ctx->sc[1], &acoder, 0,
687  ctx->split_position,
688  avctx->width,
689  avctx->height - ctx->split_position))
690  return AVERROR_INVALIDDATA;
691 
692  buf += arith2_get_consumed_bytes(&acoder);
693  buf_size -= arith2_get_consumed_bytes(&acoder);
694  }
695  } else
696  memset(c->pal_pic, 0, c->pal_stride * avctx->height);
697  }
698 
699  if (has_wmv9) {
700  for (i = 0; i < used_rects; i++) {
701  int x = wmv9rects[i].x;
702  int y = wmv9rects[i].y;
703  int w = wmv9rects[i].w;
704  int h = wmv9rects[i].h;
705  if (wmv9rects[i].coded) {
706  int WMV9codedFrameSize;
707  if (buf_size < 4 || !(WMV9codedFrameSize = AV_RL24(buf)))
708  return AVERROR_INVALIDDATA;
709  if (ret = decode_wmv9(avctx, buf + 3, buf_size - 3,
710  x, y, w, h, wmv9_mask))
711  return ret;
712  buf += WMV9codedFrameSize + 3;
713  buf_size -= WMV9codedFrameSize + 3;
714  } else {
715  uint8_t *dst = c->rgb_pic + y * c->rgb_stride + x * 3;
716  if (wmv9_mask != -1) {
717  ctx->dsp.mss2_gray_fill_masked(dst, c->rgb_stride,
718  wmv9_mask,
719  c->pal_pic + y * c->pal_stride + x,
720  c->pal_stride,
721  w, h);
722  } else {
723  do {
724  memset(dst, 0x80, w * 3);
725  dst += c->rgb_stride;
726  } while (--h);
727  }
728  }
729  }
730  }
731 
732  if (buf_size)
733  av_log(avctx, AV_LOG_WARNING, "buffer not fully consumed\n");
734 
735  if (c->mvX < 0 || c->mvY < 0) {
736  av_frame_unref(ctx->last_pic);
737  ret = av_frame_ref(ctx->last_pic, frame);
738  if (ret < 0)
739  return ret;
740  }
741 
742  *got_frame = 1;
743 
744  return avpkt->size;
745 }
746 
747 static av_cold int wmv9_init(AVCodecContext *avctx)
748 {
749  VC1Context *v = avctx->priv_data;
750  int ret;
751 
752  v->s.avctx = avctx;
753 
754  if ((ret = ff_vc1_init_common(v)) < 0)
755  return ret;
756  ff_vc1dsp_init(&v->vc1dsp);
757 
758  v->profile = PROFILE_MAIN;
759 
762  v->res_y411 = 0;
763  v->res_sprite = 0;
764 
765  v->frmrtq_postproc = 7;
766  v->bitrtq_postproc = 31;
767 
768  v->res_x8 = 0;
769  v->multires = 0;
770  v->res_fasttx = 1;
771 
772  v->fastuvmc = 0;
773 
774  v->extended_mv = 0;
775 
776  v->dquant = 1;
777  v->vstransform = 1;
778 
779  v->res_transtab = 0;
780 
781  v->overlap = 0;
782 
783  v->resync_marker = 0;
784  v->rangered = 0;
785 
786  v->s.max_b_frames = avctx->max_b_frames = 0;
787  v->quantizer_mode = 0;
788 
789  v->finterpflag = 0;
790 
791  v->res_rtm_flag = 1;
792 
794 
795  if ((ret = ff_msmpeg4_decode_init(avctx)) < 0 ||
796  (ret = ff_vc1_decode_init_alloc_tables(v)) < 0)
797  return ret;
798 
799  /* error concealment */
802 
803  return 0;
804 }
805 
807 {
808  MSS2Context *const ctx = avctx->priv_data;
809 
810  av_frame_free(&ctx->last_pic);
811 
813  av_freep(&ctx->c.pal_pic);
814  av_freep(&ctx->c.last_pal_pic);
815  ff_vc1_decode_end(avctx);
816 
817  return 0;
818 }
819 
821 {
822  MSS2Context * const ctx = avctx->priv_data;
823  MSS12Context *c = &ctx->c;
824  int ret;
825  c->avctx = avctx;
826  if (ret = ff_mss12_decode_init(c, 1, &ctx->sc[0], &ctx->sc[1]))
827  return ret;
828  ctx->last_pic = av_frame_alloc();
829  c->pal_stride = c->mask_stride;
830  c->pal_pic = av_mallocz(c->pal_stride * avctx->height);
831  c->last_pal_pic = av_mallocz(c->pal_stride * avctx->height);
832  if (!c->pal_pic || !c->last_pal_pic || !ctx->last_pic) {
833  mss2_decode_end(avctx);
834  return AVERROR(ENOMEM);
835  }
836  if (ret = wmv9_init(avctx)) {
837  mss2_decode_end(avctx);
838  return ret;
839  }
840  ff_mss2dsp_init(&ctx->dsp);
841  ff_qpeldsp_init(&ctx->qdsp);
842 
843  avctx->pix_fmt = c->free_colours == 127 ? AV_PIX_FMT_RGB555
845 
846 
847  return 0;
848 }
849 
851  .name = "mss2",
852  .long_name = NULL_IF_CONFIG_SMALL("MS Windows Media Video V9 Screen"),
853  .type = AVMEDIA_TYPE_VIDEO,
854  .id = AV_CODEC_ID_MSS2,
855  .priv_data_size = sizeof(MSS2Context),
857  .close = mss2_decode_end,
859  .capabilities = AV_CODEC_CAP_DR1,
860 };
static double val(void *priv, double ch)
Definition: aeval.c:76
static char * split(char *message, char delim)
Definition: af_channelmap.c:81
#define av_uninit(x)
Definition: attributes.h:154
#define av_cold
Definition: attributes.h:88
uint8_t
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
#define AV_RB24
Definition: intreadwrite.h:64
#define AV_RL24
Definition: intreadwrite.h:78
#define AV_RB16
Definition: intreadwrite.h:53
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:431
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
#define s(width, name)
Definition: cbs_vp9.c:257
#define f(width, name)
Definition: cbs_vp9.c:255
#define FFSWAP(type, a, b)
Definition: common.h:108
#define FFMIN(a, b)
Definition: common.h:105
#define av_ceil_log2
Definition: common.h:119
#define NULL
Definition: coverity.c:32
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1900
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available.
Definition: decode.c:2007
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static AVFrame * frame
void ff_er_frame_end(ERContext *s)
double value
Definition: eval.c:98
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:514
@ AV_CODEC_ID_MSS2
Definition: codec_id.h:216
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AVERROR(e)
Definition: error.h:43
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
int i
Definition: input.c:407
#define av_log2
Definition: intmath.h:83
#define AV_WB24(p, d)
Definition: intreadwrite.h:450
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
uint8_t w
Definition: llviddspenc.c:39
int stride
Definition: mace.c:144
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:46
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo.c:1181
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1405
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:2300
int ff_msmpeg4_decode_init(AVCodecContext *avctx)
Definition: msmpeg4dec.c:294
int ff_mss12_decode_rect(SliceContext *sc, ArithCoder *acoder, int x, int y, int width, int height)
Definition: mss12.c:542
av_cold int ff_mss12_decode_end(MSS12Context *c)
Definition: mss12.c:692
av_cold int ff_mss12_decode_init(MSS12Context *c, int version, SliceContext *sc1, SliceContext *sc2)
Definition: mss12.c:580
void ff_mss12_slicecontext_reset(SliceContext *sc)
Definition: mss12.c:436
Common header for Microsoft Screen 1 and 2.
#define ARITH_GET_MODEL_SYM(prefix)
Definition: mss12.h:120
#define ARITH_GET_BIT(prefix)
Definition: mss12.h:104
static int decode_pal_v2(MSS12Context *ctx, const uint8_t *buf, int buf_size)
Definition: mss2.c:161
static void arith2_rescale_interval(ArithCoder *c, int range, int low, int high, int n)
Definition: mss2.c:76
#define READ_PAIR(a, b)
AVCodec ff_mss2_decoder
Definition: mss2.c:850
static int arith2_get_scaled_value(int value, int n, int range)
Definition: mss2.c:66
static av_cold int mss2_decode_init(AVCodecContext *avctx)
Definition: mss2.c:820
static void arith2_normalise(ArithCoder *c)
Definition: mss2.c:47
static int arith2_get_prob(ArithCoder *c, int16_t *probs)
Definition: mss2.c:114
static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: mss2.c:473
static av_cold int mss2_decode_end(AVCodecContext *avctx)
Definition: mss2.c:806
static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int x, int y, int w, int h, int wmv9_mask)
Definition: mss2.c:378
static int arith2_get_consumed_bytes(ArithCoder *c)
Definition: mss2.c:136
static void arith2_init(ArithCoder *c, GetByteContext *gB)
Definition: mss2.c:150
#define MAX_WMV9_RECTANGLES
Definition: mss2.c:470
static av_cold int wmv9_init(AVCodecContext *avctx)
Definition: mss2.c:747
static int arith2_get_number(ArithCoder *c, int n)
Definition: mss2.c:94
static int decode_rle(GetBitContext *gb, uint8_t *pal_dst, ptrdiff_t pal_stride, uint8_t *rgb_dst, ptrdiff_t rgb_stride, uint32_t *pal, int keyframe, int kf_slipt, int slice, int w, int h)
Definition: mss2.c:241
#define ARITH2_PADDING
Definition: mss2.c:471
static int decode_555(AVCodecContext *avctx, GetByteContext *gB, uint16_t *dst, ptrdiff_t stride, int keyframe, int w, int h)
Definition: mss2.c:178
av_cold void ff_mss2dsp_init(MSS2DSPContext *dsp)
Definition: mss2dsp.c:150
Microsoft Screen 2 (aka Windows Media Video V9 Screen) decoder DSP routines.
const char data[16]
Definition: mxf.c:142
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:387
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
quarterpel DSP functions
main external API structure.
Definition: avcodec.h:536
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
int width
picture width / height.
Definition: avcodec.h:709
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:796
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
Definition: avcodec.h:1992
void * priv_data
Definition: avcodec.h:563
AVCodec.
Definition: codec.h:197
const char * name
Name of the codec implementation.
Definition: codec.h:204
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:396
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:401
This structure stores compressed data.
Definition: packet.h:346
int size
Definition: packet.h:370
uint8_t * data
Definition: packet.h:369
MSS12Context c
Definition: mss2.c:41
QpelDSPContext qdsp
Definition: mss2.c:43
int split_position
Definition: mss2.c:39
MSS2DSPContext dsp
Definition: mss2.c:42
VC1Context v
Definition: mss2.c:38
AVFrame * last_pic
Definition: mss2.c:40
SliceContext sc[2]
Definition: mss2.c:44
qpel_mc_func(* qpel_put)[16]
Definition: motion_est.h:91
qpel_mc_func(* qpel_avg)[16]
Definition: motion_est.h:92
MpegEncContext.
Definition: mpegvideo.h:81
QpelDSPContext qdsp
Definition: mpegvideo.h:235
MotionEstContext me
Definition: mpegvideo.h:282
int max_b_frames
max number of B-frames for encoding
Definition: mpegvideo.h:115
struct AVCodecContext * avctx
Definition: mpegvideo.h:98
quarterpel DSP context
Definition: qpeldsp.h:72
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:74
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:73
int x
Definition: mss2.c:467
int coded
Definition: mss2.c:467
int w
Definition: mss2.c:467
int y
Definition: mss2.c:467
int h
Definition: mss2.c:467
The VC1 Context.
Definition: vc1.h:173
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags.
Definition: vc1.h:216
int res_x8
reserved
Definition: vc1.h:183
int dquant
How qscale varies with MBs, 2 bits (not in Simple)
Definition: vc1.h:222
int multires
frame-level RESPIC syntax element present
Definition: vc1.h:184
const uint8_t * zz_4x8
Zigzag scan table for TT_4x8 coding mode.
Definition: vc1.h:240
VC1DSPContext vc1dsp
Definition: vc1.h:177
int finterpflag
INTERPFRM present.
Definition: vc1.h:226
int extended_mv
Ext MV in P/B (not in Simple)
Definition: vc1.h:221
int end_mb_x
Horizontal macroblock limit (used only by mss2)
Definition: vc1.h:397
int res_rtm_flag
reserved, set to 1
Definition: vc1.h:189
int res_fasttx
reserved, always 1
Definition: vc1.h:185
int res_sprite
Simple/Main Profile sequence header.
Definition: vc1.h:181
int quantizer_mode
2 bits, quantizer mode used for sequence, see QUANT_*
Definition: vc1.h:225
int res_y411
reserved, old interlaced mode
Definition: vc1.h:182
MpegEncContext s
Definition: vc1.h:174
int overlap
overlapped transforms in use
Definition: vc1.h:224
int res_transtab
reserved, always 0
Definition: vc1.h:186
int bitrtq_postproc
5 bits, quantized framerate-based postprocessing strength
Definition: vc1.h:218
const uint8_t * zz_8x4
Zigzag scan table for TT_8x4 coding mode.
Definition: vc1.h:239
int resync_marker
could this stream contain resync markers
Definition: vc1.h:400
uint8_t respic
Frame-level flag for resized images.
Definition: vc1.h:272
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
Definition: vc1.h:220
int frmrtq_postproc
3 bits,
Definition: vc1.h:217
int rangered
RANGEREDFRM (range reduction) syntax element present at frame level.
Definition: vc1.h:187
int vstransform
variable-size [48]x[48] transform type + info
Definition: vc1.h:223
Definition: vlc.h:26
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
#define avpriv_request_sample(...)
#define av_freep(p)
#define av_log(a,...)
AVFormatContext * ctx
Definition: movenc.c:48
av_cold int ff_vc1_init_common(VC1Context *v)
Init VC-1 specific tables and VC1Context members.
Definition: vc1.c:1703
int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext *gb)
Definition: vc1.c:627
int ff_vc1_decode_init_alloc_tables(VC1Context *v)
Definition: vc1dec.c:324
int ff_vc1_decode_end(AVCodecContext *avctx)
Close a VC1/WMV3 decoder.
Definition: vc1dec.c:588
void ff_vc1_decode_blocks(VC1Context *v)
Definition: vc1_block.c:2993
void ff_vc1_init_transposed_scantables(VC1Context *v)
Definition: vc1dec.c:403
@ PROFILE_MAIN
Definition: vc1_common.h:50
av_cold void ff_vc1dsp_init(VC1DSPContext *dsp)
Definition: vc1dsp.c:971
const char * b
Definition: vf_curves.c:118
const char * r
Definition: vf_curves.c:116
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
Definition: vlc.h:38
uint8_t bits
Definition: vp3data.h:141
static double c[64]
const uint8_t ff_wmv2_scantableA[64]
Definition: wmv2data.c:23
const uint8_t ff_wmv2_scantableB[64]
Definition: wmv2data.c:30