FFmpeg  4.4.5
decklink_dec.cpp
Go to the documentation of this file.
1 /*
2  * Blackmagic DeckLink input
3  * Copyright (c) 2013-2014 Luca Barbato, Deti Fliegl
4  * Copyright (c) 2014 Rafaël Carré
5  * Copyright (c) 2017 Akamai Technologies, Inc.
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <atomic>
25 #include <vector>
26 using std::atomic;
27 
28 /* Include internal.h first to avoid conflict between winsock.h (used by
29  * DeckLink headers) and winsock2.h (used by libavformat) in MSVC++ builds */
30 extern "C" {
31 #include "libavformat/internal.h"
32 }
33 
34 #include <DeckLinkAPI.h>
35 
36 extern "C" {
37 #include "config.h"
39 #include "libavformat/avformat.h"
40 #include "libavutil/avassert.h"
41 #include "libavutil/avutil.h"
42 #include "libavutil/common.h"
43 #include "libavutil/internal.h"
44 #include "libavutil/imgutils.h"
45 #include "libavutil/intreadwrite.h"
46 #include "libavutil/time.h"
47 #include "libavutil/timecode.h"
48 #include "libavutil/mathematics.h"
49 #include "libavutil/reverse.h"
50 #include "avdevice.h"
51 #if CONFIG_LIBZVBI
52 #include <libzvbi.h>
53 #endif
54 }
55 
56 #include "decklink_common.h"
57 #include "decklink_dec.h"
58 
59 #define MAX_WIDTH_VANC 1920
60 const BMDDisplayMode AUTODETECT_DEFAULT_MODE = bmdModeNTSC;
61 
62 typedef struct VANCLineNumber {
63  BMDDisplayMode mode;
67  int vanc_end;
69 
70 /* These VANC line numbers need not be very accurate. In any case
71  * GetBufferForVerticalBlankingLine() will return an error when invalid
72  * ancillary line number was requested. We just need to make sure that the
73  * entire VANC region is covered, while making sure we don't decode VANC of
74  * another source during switching*/
76  /* SD Modes */
77 
78  {bmdModeNTSC, 11, 19, 274, 282},
79  {bmdModeNTSC2398, 11, 19, 274, 282},
80  {bmdModePAL, 7, 22, 320, 335},
81  {bmdModeNTSCp, 11, -1, -1, 39},
82  {bmdModePALp, 7, -1, -1, 45},
83 
84  /* HD 1080 Modes */
85 
86  {bmdModeHD1080p2398, 8, -1, -1, 42},
87  {bmdModeHD1080p24, 8, -1, -1, 42},
88  {bmdModeHD1080p25, 8, -1, -1, 42},
89  {bmdModeHD1080p2997, 8, -1, -1, 42},
90  {bmdModeHD1080p30, 8, -1, -1, 42},
91  {bmdModeHD1080i50, 8, 20, 570, 585},
92  {bmdModeHD1080i5994, 8, 20, 570, 585},
93  {bmdModeHD1080i6000, 8, 20, 570, 585},
94  {bmdModeHD1080p50, 8, -1, -1, 42},
95  {bmdModeHD1080p5994, 8, -1, -1, 42},
96  {bmdModeHD1080p6000, 8, -1, -1, 42},
97 
98  /* HD 720 Modes */
99 
100  {bmdModeHD720p50, 8, -1, -1, 26},
101  {bmdModeHD720p5994, 8, -1, -1, 26},
102  {bmdModeHD720p60, 8, -1, -1, 26},
103 
104  /* For all other modes, for which we don't support VANC */
105  {bmdModeUnknown, 0, -1, -1, -1}
106 };
107 
108 class decklink_allocator : public IDeckLinkMemoryAllocator
109 {
110 public:
112  virtual ~decklink_allocator() { }
113 
114  // IDeckLinkMemoryAllocator methods
115  virtual HRESULT STDMETHODCALLTYPE AllocateBuffer(unsigned int bufferSize, void* *allocatedBuffer)
116  {
117  void *buf = av_malloc(bufferSize + AV_INPUT_BUFFER_PADDING_SIZE);
118  if (!buf)
119  return E_OUTOFMEMORY;
120  *allocatedBuffer = buf;
121  return S_OK;
122  }
123  virtual HRESULT STDMETHODCALLTYPE ReleaseBuffer(void* buffer)
124  {
125  av_free(buffer);
126  return S_OK;
127  }
128  virtual HRESULT STDMETHODCALLTYPE Commit() { return S_OK; }
129  virtual HRESULT STDMETHODCALLTYPE Decommit() { return S_OK; }
130 
131  // IUnknown methods
132  virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
133  virtual ULONG STDMETHODCALLTYPE AddRef(void) { return ++_refs; }
134  virtual ULONG STDMETHODCALLTYPE Release(void)
135  {
136  int ret = --_refs;
137  if (!ret)
138  delete this;
139  return ret;
140  }
141 
142 private:
143  std::atomic<int> _refs;
144 };
145 
146 extern "C" {
147 static void decklink_object_free(void *opaque, uint8_t *data)
148 {
149  IUnknown *obj = (class IUnknown *)opaque;
150  obj->Release();
151 }
152 }
153 
154 static int get_vanc_line_idx(BMDDisplayMode mode)
155 {
156  unsigned int i;
157  for (i = 0; i < FF_ARRAY_ELEMS(vanc_line_numbers); i++) {
158  if (mode == vanc_line_numbers[i].mode)
159  return i;
160  }
161  /* Return the VANC idx for Unknown mode */
162  return i - 1;
163 }
164 
165 static inline void clear_parity_bits(uint16_t *buf, int len) {
166  int i;
167  for (i = 0; i < len; i++)
168  buf[i] &= 0xff;
169 }
170 
171 static int check_vanc_parity_checksum(uint16_t *buf, int len, uint16_t checksum) {
172  int i;
173  uint16_t vanc_sum = 0;
174  for (i = 3; i < len - 1; i++) {
175  uint16_t v = buf[i];
176  int np = v >> 8;
177  int p = av_parity(v & 0xff);
178  if ((!!p ^ !!(v & 0x100)) || (np != 1 && np != 2)) {
179  // Parity check failed
180  return -1;
181  }
182  vanc_sum += v;
183  }
184  vanc_sum &= 0x1ff;
185  vanc_sum |= ((~vanc_sum & 0x100) << 1);
186  if (checksum != vanc_sum) {
187  // Checksum verification failed
188  return -1;
189  }
190  return 0;
191 }
192 
193 /* The 10-bit VANC data is packed in V210, we only need the luma component. */
194 static void extract_luma_from_v210(uint16_t *dst, const uint8_t *src, int width)
195 {
196  int i;
197  for (i = 0; i < width / 3; i++) {
198  *dst++ = (src[1] >> 2) + ((src[2] & 15) << 6);
199  *dst++ = src[4] + ((src[5] & 3) << 8);
200  *dst++ = (src[6] >> 4) + ((src[7] & 63) << 4);
201  src += 8;
202  }
203 }
204 
205 static void unpack_v210(uint16_t *dst, const uint8_t *src, int width)
206 {
207  int i;
208  for (i = 0; i < width * 2 / 3; i++) {
209  *dst++ = src[0] + ((src[1] & 3) << 8);
210  *dst++ = (src[1] >> 2) + ((src[2] & 15) << 6);
211  *dst++ = (src[2] >> 4) + ((src[3] & 63) << 4);
212  src += 4;
213  }
214 }
215 
217 {
218  uint8_t ret = (line < 313) << 5;
219  if (line >= 7 && line <= 22)
220  ret += line;
221  if (line >= 320 && line <= 335)
222  ret += (line - 313);
223  return ret;
224 }
225 
226 static void fill_data_unit_head(int line, uint8_t *tgt)
227 {
228  tgt[0] = 0x02; // data_unit_id
229  tgt[1] = 0x2c; // data_unit_length
230  tgt[2] = calc_parity_and_line_offset(line); // field_parity, line_offset
231  tgt[3] = 0xe4; // framing code
232 }
233 
234 #if CONFIG_LIBZVBI
235 static uint8_t* teletext_data_unit_from_vbi_data(int line, uint8_t *src, uint8_t *tgt, vbi_pixfmt fmt)
236 {
237  vbi_bit_slicer slicer;
238 
239  vbi_bit_slicer_init(&slicer, 720, 13500000, 6937500, 6937500, 0x00aaaae4, 0xffff, 18, 6, 42 * 8, VBI_MODULATION_NRZ_MSB, fmt);
240 
241  if (vbi_bit_slice(&slicer, src, tgt + 4) == FALSE)
242  return tgt;
243 
245 
246  return tgt + 46;
247 }
248 
249 static uint8_t* teletext_data_unit_from_vbi_data_10bit(int line, uint8_t *src, uint8_t *tgt)
250 {
251  uint8_t y[720];
252  uint8_t *py = y;
253  uint8_t *pend = y + 720;
254  /* The 10-bit VBI data is packed in V210, but libzvbi only supports 8-bit,
255  * so we extract the 8 MSBs of the luma component, that is enough for
256  * teletext bit slicing. */
257  while (py < pend) {
258  *py++ = (src[1] >> 4) + ((src[2] & 15) << 4);
259  *py++ = (src[4] >> 2) + ((src[5] & 3 ) << 6);
260  *py++ = (src[6] >> 6) + ((src[7] & 63) << 2);
261  src += 8;
262  }
263  return teletext_data_unit_from_vbi_data(line, y, tgt, VBI_PIXFMT_YUV420);
264 }
265 #endif
266 
268 {
269  int i;
270 
271  if (py[0] != 0x255 || py[1] != 0x255 || py[2] != 0x227)
272  return tgt;
273 
275 
276  py += 3;
277  tgt += 4;
278 
279  for (i = 0; i < 42; i++)
280  *tgt++ = ff_reverse[py[i] & 255];
281 
282  return tgt;
283 }
284 
286 {
287  int shift = -1;
288  if (line >= 6 && line <= 22)
289  shift = line - 6;
290  if (line >= 318 && line <= 335)
291  shift = line - 318 + 17;
292  return shift >= 0 && ((1ULL << shift) & mask);
293 }
294 
295 static uint8_t* teletext_data_unit_from_op47_data(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines)
296 {
297  if (py < pend - 9) {
298  if (py[0] == 0x151 && py[1] == 0x115 && py[3] == 0x102) { // identifier, identifier, format code for WST teletext
299  uint16_t *descriptors = py + 4;
300  int i;
301  py += 9;
302  for (i = 0; i < 5 && py < pend - 45; i++, py += 45) {
303  int line = (descriptors[i] & 31) + (!(descriptors[i] & 128)) * 313;
304  if (line && linemask_matches(line, wanted_lines))
306  }
307  }
308  }
309  return tgt;
310 }
311 
312 static uint8_t* teletext_data_unit_from_ancillary_packet(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines, int allow_multipacket)
313 {
314  uint16_t did = py[0]; // data id
315  uint16_t sdid = py[1]; // secondary data id
316  uint16_t dc = py[2] & 255; // data count
317  py += 3;
318  pend = FFMIN(pend, py + dc);
319  if (did == 0x143 && sdid == 0x102) { // subtitle distribution packet
320  tgt = teletext_data_unit_from_op47_data(py, pend, tgt, wanted_lines);
321  } else if (allow_multipacket && did == 0x143 && sdid == 0x203) { // VANC multipacket
322  py += 2; // priority, line/field
323  while (py < pend - 3) {
324  tgt = teletext_data_unit_from_ancillary_packet(py, pend, tgt, wanted_lines, 0);
325  py += 4 + (py[2] & 255); // ndid, nsdid, ndc, line/field
326  }
327  }
328  return tgt;
329 }
330 
331 static uint8_t *vanc_to_cc(AVFormatContext *avctx, uint16_t *buf, size_t words,
332  unsigned &cc_count)
333 {
334  size_t i, len = (buf[5] & 0xff) + 6 + 1;
335  uint8_t cdp_sum, rate;
336  uint16_t hdr, ftr;
337  uint8_t *cc;
338  uint16_t *cdp = &buf[6]; // CDP follows
339  if (cdp[0] != 0x96 || cdp[1] != 0x69) {
340  av_log(avctx, AV_LOG_WARNING, "Invalid CDP header 0x%.2x 0x%.2x\n", cdp[0], cdp[1]);
341  return NULL;
342  }
343 
344  len -= 7; // remove VANC header and checksum
345 
346  if (cdp[2] != len) {
347  av_log(avctx, AV_LOG_WARNING, "CDP len %d != %zu\n", cdp[2], len);
348  return NULL;
349  }
350 
351  cdp_sum = 0;
352  for (i = 0; i < len - 1; i++)
353  cdp_sum += cdp[i];
354  cdp_sum = cdp_sum ? 256 - cdp_sum : 0;
355  if (cdp[len - 1] != cdp_sum) {
356  av_log(avctx, AV_LOG_WARNING, "CDP checksum invalid 0x%.4x != 0x%.4x\n", cdp_sum, cdp[len-1]);
357  return NULL;
358  }
359 
360  rate = cdp[3];
361  if (!(rate & 0x0f)) {
362  av_log(avctx, AV_LOG_WARNING, "CDP frame rate invalid (0x%.2x)\n", rate);
363  return NULL;
364  }
365  rate >>= 4;
366  if (rate > 8) {
367  av_log(avctx, AV_LOG_WARNING, "CDP frame rate invalid (0x%.2x)\n", rate);
368  return NULL;
369  }
370 
371  if (!(cdp[4] & 0x43)) /* ccdata_present | caption_service_active | reserved */ {
372  av_log(avctx, AV_LOG_WARNING, "CDP flags invalid (0x%.2x)\n", cdp[4]);
373  return NULL;
374  }
375 
376  hdr = (cdp[5] << 8) | cdp[6];
377  if (cdp[7] != 0x72) /* ccdata_id */ {
378  av_log(avctx, AV_LOG_WARNING, "Invalid ccdata_id 0x%.2x\n", cdp[7]);
379  return NULL;
380  }
381 
382  cc_count = cdp[8];
383  if (!(cc_count & 0xe0)) {
384  av_log(avctx, AV_LOG_WARNING, "Invalid cc_count 0x%.2x\n", cc_count);
385  return NULL;
386  }
387 
388  cc_count &= 0x1f;
389  if ((len - 13) < cc_count * 3) {
390  av_log(avctx, AV_LOG_WARNING, "Invalid cc_count %d (> %zu)\n", cc_count * 3, len - 13);
391  return NULL;
392  }
393 
394  if (cdp[len - 4] != 0x74) /* footer id */ {
395  av_log(avctx, AV_LOG_WARNING, "Invalid footer id 0x%.2x\n", cdp[len-4]);
396  return NULL;
397  }
398 
399  ftr = (cdp[len - 3] << 8) | cdp[len - 2];
400  if (ftr != hdr) {
401  av_log(avctx, AV_LOG_WARNING, "Header 0x%.4x != Footer 0x%.4x\n", hdr, ftr);
402  return NULL;
403  }
404 
405  cc = (uint8_t *)av_malloc(cc_count * 3);
406  if (cc == NULL) {
407  av_log(avctx, AV_LOG_WARNING, "CC - av_malloc failed for cc_count = %d\n", cc_count);
408  return NULL;
409  }
410 
411  for (size_t i = 0; i < cc_count; i++) {
412  cc[3*i + 0] = cdp[9 + 3*i+0] /* & 3 */;
413  cc[3*i + 1] = cdp[9 + 3*i+1];
414  cc[3*i + 2] = cdp[9 + 3*i+2];
415  }
416 
417  cc_count *= 3;
418  return cc;
419 }
420 
421 static uint8_t *get_metadata(AVFormatContext *avctx, uint16_t *buf, size_t width,
422  uint8_t *tgt, size_t tgt_size, AVPacket *pkt)
423 {
424  decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
425  uint16_t *max_buf = buf + width;
426 
427  while (buf < max_buf - 6) {
428  int len;
429  uint16_t did = buf[3] & 0xFF; // data id
430  uint16_t sdid = buf[4] & 0xFF; // secondary data id
431  /* Check for VANC header */
432  if (buf[0] != 0 || buf[1] != 0x3ff || buf[2] != 0x3ff) {
433  return tgt;
434  }
435 
436  len = (buf[5] & 0xff) + 6 + 1;
437  if (len > max_buf - buf) {
438  av_log(avctx, AV_LOG_WARNING, "Data Count (%d) > data left (%zu)\n",
439  len, max_buf - buf);
440  return tgt;
441  }
442 
443  if (did == 0x43 && (sdid == 0x02 || sdid == 0x03) && cctx->teletext_lines &&
444  width == 1920 && tgt_size >= 1920) {
445  if (check_vanc_parity_checksum(buf, len, buf[len - 1]) < 0) {
446  av_log(avctx, AV_LOG_WARNING, "VANC parity or checksum incorrect\n");
447  goto skip_packet;
448  }
449  tgt = teletext_data_unit_from_ancillary_packet(buf + 3, buf + len, tgt, cctx->teletext_lines, 1);
450  } else if (did == 0x61 && sdid == 0x01) {
451  unsigned int data_len;
452  uint8_t *data;
453  if (check_vanc_parity_checksum(buf, len, buf[len - 1]) < 0) {
454  av_log(avctx, AV_LOG_WARNING, "VANC parity or checksum incorrect\n");
455  goto skip_packet;
456  }
457  clear_parity_bits(buf, len);
458  data = vanc_to_cc(avctx, buf, width, data_len);
459  if (data) {
460  if (av_packet_add_side_data(pkt, AV_PKT_DATA_A53_CC, data, data_len) < 0)
461  av_free(data);
462  }
463  } else {
464  av_log(avctx, AV_LOG_DEBUG, "Unknown meta data DID = 0x%.2x SDID = 0x%.2x\n",
465  did, sdid);
466  }
467 skip_packet:
468  buf += len;
469  }
470 
471  return tgt;
472 }
473 
475 {
476  struct decklink_cctx *ctx = (struct decklink_cctx *)avctx->priv_data;
477  memset(q, 0, sizeof(AVPacketQueue));
480  q->avctx = avctx;
481  q->max_q_size = ctx->queue_size;
482 }
483 
485 {
486  PacketList *pkt, *pkt1;
487 
489  for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
490  pkt1 = pkt->next;
491  av_packet_unref(&pkt->pkt);
492  av_freep(&pkt);
493  }
494  q->last_pkt = NULL;
495  q->first_pkt = NULL;
496  q->nb_packets = 0;
497  q->size = 0;
499 }
500 
502 {
506 }
507 
508 static unsigned long long avpacket_queue_size(AVPacketQueue *q)
509 {
510  unsigned long long size;
512  size = q->size;
514  return size;
515 }
516 
518 {
519  PacketList *pkt1;
520 
521  // Drop Packet if queue size is > maximum queue size
522  if (avpacket_queue_size(q) > (uint64_t)q->max_q_size) {
524  av_log(q->avctx, AV_LOG_WARNING, "Decklink input buffer overrun!\n");
525  return -1;
526  }
527  /* ensure the packet is reference counted */
528  if (av_packet_make_refcounted(pkt) < 0) {
530  return -1;
531  }
532 
533  pkt1 = (PacketList *)av_malloc(sizeof(PacketList));
534  if (!pkt1) {
536  return -1;
537  }
538  av_packet_move_ref(&pkt1->pkt, pkt);
539  pkt1->next = NULL;
540 
542 
543  if (!q->last_pkt) {
544  q->first_pkt = pkt1;
545  } else {
546  q->last_pkt->next = pkt1;
547  }
548 
549  q->last_pkt = pkt1;
550  q->nb_packets++;
551  q->size += pkt1->pkt.size + sizeof(*pkt1);
552 
554 
556  return 0;
557 }
558 
560 {
561  PacketList *pkt1;
562  int ret;
563 
565 
566  for (;; ) {
567  pkt1 = q->first_pkt;
568  if (pkt1) {
569  q->first_pkt = pkt1->next;
570  if (!q->first_pkt) {
571  q->last_pkt = NULL;
572  }
573  q->nb_packets--;
574  q->size -= pkt1->pkt.size + sizeof(*pkt1);
575  *pkt = pkt1->pkt;
576  av_free(pkt1);
577  ret = 1;
578  break;
579  } else if (!block) {
580  ret = 0;
581  break;
582  } else {
583  pthread_cond_wait(&q->cond, &q->mutex);
584  }
585  }
587  return ret;
588 }
589 
590 static void handle_klv(AVFormatContext *avctx, decklink_ctx *ctx, IDeckLinkVideoInputFrame *videoFrame, int64_t pts)
591 {
592  const uint8_t KLV_DID = 0x44;
593  const uint8_t KLV_IN_VANC_SDID = 0x04;
594 
595  struct KLVPacket
596  {
597  uint16_t sequence_counter;
598  std::vector<uint8_t> data;
599  };
600 
601  size_t total_size = 0;
602  std::vector<std::vector<KLVPacket>> klv_packets(256);
603 
604  IDeckLinkVideoFrameAncillaryPackets *packets = nullptr;
605  if (videoFrame->QueryInterface(IID_IDeckLinkVideoFrameAncillaryPackets, (void**)&packets) != S_OK)
606  return;
607 
608  IDeckLinkAncillaryPacketIterator *it = nullptr;
609  if (packets->GetPacketIterator(&it) != S_OK) {
610  packets->Release();
611  return;
612  }
613 
614  IDeckLinkAncillaryPacket *packet = nullptr;
615  while (it->Next(&packet) == S_OK) {
616  uint8_t *data = nullptr;
617  uint32_t size = 0;
618 
619  if (packet->GetDID() == KLV_DID && packet->GetSDID() == KLV_IN_VANC_SDID) {
620  av_log(avctx, AV_LOG_DEBUG, "Found KLV VANC packet on line: %d\n", packet->GetLineNumber());
621 
622  if (packet->GetBytes(bmdAncillaryPacketFormatUInt8, (const void**) &data, &size) == S_OK) {
623  // MID and PSC
624  if (size > 3) {
625  uint8_t mid = data[0];
626  uint16_t psc = data[1] << 8 | data[2];
627 
628  av_log(avctx, AV_LOG_DEBUG, "KLV with MID: %d and PSC: %d\n", mid, psc);
629 
630  auto& list = klv_packets[mid];
631  uint16_t expected_psc = list.size() + 1;
632 
633  if (psc == expected_psc) {
634  uint32_t data_len = size - 3;
635  total_size += data_len;
636 
637  KLVPacket packet{ psc };
638  packet.data.resize(data_len);
639  memcpy(packet.data.data(), data + 3, data_len);
640 
641  list.push_back(std::move(packet));
642  } else {
643  av_log(avctx, AV_LOG_WARNING, "Out of order PSC: %d for MID: %d\n", psc, mid);
644 
645  if (!list.empty()) {
646  for (auto& klv : list)
647  total_size -= klv.data.size();
648 
649  list.clear();
650  }
651  }
652  }
653  }
654  }
655 
656  packet->Release();
657  }
658 
659  it->Release();
660  packets->Release();
661 
662  if (total_size > 0) {
663  std::vector<uint8_t> klv;
664  klv.reserve(total_size);
665 
666  for (size_t i = 0; i < klv_packets.size(); ++i) {
667  auto& list = klv_packets[i];
668 
669  if (list.empty())
670  continue;
671 
672  av_log(avctx, AV_LOG_DEBUG, "Joining MID: %d\n", (int)i);
673 
674  for (auto& packet : list)
675  klv.insert(klv.end(), packet.data.begin(), packet.data.end());
676  }
677 
678  AVPacket klv_packet = { 0 };
679  klv_packet.pts = pts;
680  klv_packet.dts = pts;
681  klv_packet.flags |= AV_PKT_FLAG_KEY;
682  klv_packet.stream_index = ctx->klv_st->index;
683  klv_packet.data = klv.data();
684  klv_packet.size = klv.size();
685 
686  if (avpacket_queue_put(&ctx->queue, &klv_packet) < 0) {
687  ++ctx->dropped;
688  }
689  }
690 }
691 
692 class decklink_input_callback : public IDeckLinkInputCallback
693 {
694 public:
697 
698  virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
699  virtual ULONG STDMETHODCALLTYPE AddRef(void);
700  virtual ULONG STDMETHODCALLTYPE Release(void);
701  virtual HRESULT STDMETHODCALLTYPE VideoInputFormatChanged(BMDVideoInputFormatChangedEvents, IDeckLinkDisplayMode*, BMDDetectedVideoInputFormatFlags);
702  virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame*, IDeckLinkAudioInputPacket*);
703 
704 private:
705  std::atomic<int> _refs;
708  int no_video;
711 };
712 
714 {
715  avctx = _avctx;
716  decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
717  ctx = (struct decklink_ctx *)cctx->ctx;
718  no_video = 0;
720 }
721 
723 {
724 }
725 
727 {
728  return ++_refs;
729 }
730 
732 {
733  int ret = --_refs;
734  if (!ret)
735  delete this;
736  return ret;
737 }
738 
739 static int64_t get_pkt_pts(IDeckLinkVideoInputFrame *videoFrame,
740  IDeckLinkAudioInputPacket *audioFrame,
741  int64_t wallclock,
742  int64_t abs_wallclock,
743  DecklinkPtsSource pts_src,
744  AVRational time_base, int64_t *initial_pts,
745  int copyts)
746 {
748  BMDTimeValue bmd_pts;
749  BMDTimeValue bmd_duration;
750  HRESULT res = E_INVALIDARG;
751  switch (pts_src) {
752  case PTS_SRC_AUDIO:
753  if (audioFrame)
754  res = audioFrame->GetPacketTime(&bmd_pts, time_base.den);
755  break;
756  case PTS_SRC_VIDEO:
757  if (videoFrame)
758  res = videoFrame->GetStreamTime(&bmd_pts, &bmd_duration, time_base.den);
759  break;
760  case PTS_SRC_REFERENCE:
761  if (videoFrame)
762  res = videoFrame->GetHardwareReferenceTimestamp(time_base.den, &bmd_pts, &bmd_duration);
763  break;
764  case PTS_SRC_WALLCLOCK:
765  /* fall through */
767  {
768  /* MSVC does not support compound literals like AV_TIME_BASE_Q
769  * in C++ code (compiler error C4576) */
770  AVRational timebase;
771  timebase.num = 1;
772  timebase.den = AV_TIME_BASE;
773  if (pts_src == PTS_SRC_WALLCLOCK)
774  pts = av_rescale_q(wallclock, timebase, time_base);
775  else
776  pts = av_rescale_q(abs_wallclock, timebase, time_base);
777  break;
778  }
779  }
780  if (res == S_OK)
781  pts = bmd_pts / time_base.num;
782 
783  if (!copyts) {
784  if (pts != AV_NOPTS_VALUE && *initial_pts == AV_NOPTS_VALUE)
785  *initial_pts = pts;
786  if (*initial_pts != AV_NOPTS_VALUE)
787  pts -= *initial_pts;
788  }
789 
790  return pts;
791 }
792 
793 static int get_bmd_timecode(AVFormatContext *avctx, AVTimecode *tc, AVRational frame_rate, BMDTimecodeFormat tc_format, IDeckLinkVideoInputFrame *videoFrame)
794 {
795  IDeckLinkTimecode *timecode;
796  int ret = AVERROR(ENOENT);
797 #if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b000000
798  int hfr = (tc_format == bmdTimecodeRP188HighFrameRate);
799 #else
800  int hfr = 0;
801 #endif
802  if (videoFrame->GetTimecode(tc_format, &timecode) == S_OK) {
803  uint8_t hh, mm, ss, ff;
804  if (timecode->GetComponents(&hh, &mm, &ss, &ff) == S_OK) {
805  int flags = (timecode->GetFlags() & bmdTimecodeIsDropFrame) ? AV_TIMECODE_FLAG_DROPFRAME : 0;
806  if (!hfr && av_cmp_q(frame_rate, av_make_q(30, 1)) == 1)
807  ff = ff << 1 | !!(timecode->GetFlags() & bmdTimecodeFieldMark);
808  ret = av_timecode_init_from_components(tc, frame_rate, flags, hh, mm, ss, ff, avctx);
809  }
810  timecode->Release();
811  }
812  return ret;
813 }
814 
815 static int get_frame_timecode(AVFormatContext *avctx, decklink_ctx *ctx, AVTimecode *tc, IDeckLinkVideoInputFrame *videoFrame)
816 {
817  AVRational frame_rate = ctx->video_st->r_frame_rate;
818  int ret;
819  /* 50/60 fps content has alternating VITC1 and VITC2 timecode (see SMPTE ST
820  * 12-2, section 7), so the native ordering of RP188Any (HFR, VITC1, LTC,
821  * VITC2) would not work because LTC might not contain the field flag.
822  * Therefore we query the types manually. */
823  if (ctx->tc_format == bmdTimecodeRP188Any && av_cmp_q(frame_rate, av_make_q(30, 1)) == 1) {
824 #if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b000000
825  ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188HighFrameRate, videoFrame);
826  if (ret == AVERROR(ENOENT))
827 #endif
828  ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188VITC1, videoFrame);
829  if (ret == AVERROR(ENOENT))
830  ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188VITC2, videoFrame);
831  if (ret == AVERROR(ENOENT))
832  ret = get_bmd_timecode(avctx, tc, frame_rate, bmdTimecodeRP188LTC, videoFrame);
833  } else {
834  ret = get_bmd_timecode(avctx, tc, frame_rate, ctx->tc_format, videoFrame);
835  }
836  return ret;
837 }
838 
840  IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame)
841 {
842  void *frameBytes;
843  void *audioFrameBytes;
844  BMDTimeValue frameTime;
845  BMDTimeValue frameDuration;
846  int64_t wallclock = 0, abs_wallclock = 0;
847  struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
848 
849  if (ctx->autodetect) {
850  if (videoFrame && !(videoFrame->GetFlags() & bmdFrameHasNoInputSource) &&
851  ctx->bmd_mode == bmdModeUnknown)
852  {
854  }
855  return S_OK;
856  }
857 
858  // Drop the frames till system's timestamp aligns with the configured value.
859  if (0 == ctx->frameCount && cctx->timestamp_align) {
860  AVRational remainder = av_make_q(av_gettime() % cctx->timestamp_align, 1000000);
861  AVRational frame_duration = av_inv_q(ctx->video_st->r_frame_rate);
862  if (av_cmp_q(remainder, frame_duration) > 0) {
863  ++ctx->dropped;
864  return S_OK;
865  }
866  }
867 
868  ctx->frameCount++;
870  wallclock = av_gettime_relative();
872  abs_wallclock = av_gettime();
873 
874  // Handle Video Frame
875  if (videoFrame) {
876  AVPacket pkt = { 0 };
877  if (ctx->frameCount % 25 == 0) {
878  unsigned long long qsize = avpacket_queue_size(&ctx->queue);
880  "Frame received (#%lu) - Valid (%liB) - QSize %fMB\n",
881  ctx->frameCount,
882  videoFrame->GetRowBytes() * videoFrame->GetHeight(),
883  (double)qsize / 1024 / 1024);
884  }
885 
886  videoFrame->GetBytes(&frameBytes);
887  videoFrame->GetStreamTime(&frameTime, &frameDuration,
889 
890  if (videoFrame->GetFlags() & bmdFrameHasNoInputSource) {
891  if (ctx->draw_bars && videoFrame->GetPixelFormat() == bmdFormat8BitYUV) {
892  unsigned bars[8] = {
893  0xEA80EA80, 0xD292D210, 0xA910A9A5, 0x90229035,
894  0x6ADD6ACA, 0x51EF515A, 0x286D28EF, 0x10801080 };
895  int width = videoFrame->GetWidth();
896  int height = videoFrame->GetHeight();
897  unsigned *p = (unsigned *)frameBytes;
898 
899  for (int y = 0; y < height; y++) {
900  for (int x = 0; x < width; x += 2)
901  *p++ = bars[(x * 8) / width];
902  }
903  }
904 
905  if (!no_video) {
906  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - No input signal detected "
907  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
908  }
909  no_video = 1;
910  } else {
911  if (no_video) {
912  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - Input returned "
913  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
914  }
915  no_video = 0;
916 
917  // Handle Timecode (if requested)
918  if (ctx->tc_format) {
919  AVTimecode tcr;
920  if (get_frame_timecode(avctx, ctx, &tcr, videoFrame) >= 0) {
921  char tcstr[AV_TIMECODE_STR_SIZE];
922  const char *tc = av_timecode_make_string(&tcr, tcstr, 0);
923  if (tc) {
924  AVDictionary* metadata_dict = NULL;
925  uint8_t* packed_metadata;
926 
927  if (av_cmp_q(ctx->video_st->r_frame_rate, av_make_q(60, 1)) < 1) {
928  uint32_t tc_data = av_timecode_get_smpte_from_framenum(&tcr, 0);
929  int size = sizeof(uint32_t) * 4;
930  uint32_t *sd = (uint32_t *)av_packet_new_side_data(&pkt, AV_PKT_DATA_S12M_TIMECODE, size);
931 
932  if (sd) {
933  *sd = 1; // one TC
934  *(sd + 1) = tc_data; // TC
935  }
936  }
937 
938  if (av_dict_set(&metadata_dict, "timecode", tc, 0) >= 0) {
939  buffer_size_t metadata_len;
940  packed_metadata = av_packet_pack_dictionary(metadata_dict, &metadata_len);
941  av_dict_free(&metadata_dict);
942  if (packed_metadata) {
943  if (av_packet_add_side_data(&pkt, AV_PKT_DATA_STRINGS_METADATA, packed_metadata, metadata_len) < 0)
944  av_freep(&packed_metadata);
945  else if (!ctx->tc_seen)
947  }
948  }
949  }
950  } else {
951  av_log(avctx, AV_LOG_DEBUG, "Unable to find timecode.\n");
952  }
953  }
954  }
955 
956  if (ctx->tc_format && cctx->wait_for_tc && !ctx->tc_seen) {
957 
958  av_log(avctx, AV_LOG_WARNING, "No TC detected yet. wait_for_tc set. Dropping. \n");
959  av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - "
960  "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
961  return S_OK;
962  }
963 
964  pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, abs_wallclock, ctx->video_pts_source, ctx->video_st->time_base, &initial_video_pts, cctx->copyts);
965  pkt.dts = pkt.pts;
966 
967  pkt.duration = frameDuration;
968  //To be made sure it still applies
971  pkt.data = (uint8_t *)frameBytes;
972  pkt.size = videoFrame->GetRowBytes() *
973  videoFrame->GetHeight();
974  //fprintf(stderr,"Video Frame size %d ts %d\n", pkt.size, pkt.pts);
975 
976  if (!no_video) {
977  IDeckLinkVideoFrameAncillary *vanc;
978  AVPacket txt_pkt = { 0 };
979  uint8_t txt_buf0[3531]; // 35 * 46 bytes decoded teletext lines + 1 byte data_identifier + 1920 bytes OP47 decode buffer
980  uint8_t *txt_buf = txt_buf0;
981 
982  if (ctx->enable_klv) {
983  handle_klv(avctx, ctx, videoFrame, pkt.pts);
984  }
985 
986  if (videoFrame->GetAncillaryData(&vanc) == S_OK) {
987  int i;
988  BMDPixelFormat vanc_format = vanc->GetPixelFormat();
989  txt_buf[0] = 0x10; // data_identifier - EBU_data
990  txt_buf++;
991 #if CONFIG_LIBZVBI
992  if (ctx->bmd_mode == bmdModePAL && ctx->teletext_lines &&
993  (vanc_format == bmdFormat8BitYUV || vanc_format == bmdFormat10BitYUV)) {
994  int64_t line_mask = 1;
995  av_assert0(videoFrame->GetWidth() == 720);
996  for (i = 6; i < 336; i++, line_mask <<= 1) {
997  uint8_t *buf;
998  if ((ctx->teletext_lines & line_mask) && vanc->GetBufferForVerticalBlankingLine(i, (void**)&buf) == S_OK) {
999  if (vanc_format == bmdFormat8BitYUV)
1000  txt_buf = teletext_data_unit_from_vbi_data(i, buf, txt_buf, VBI_PIXFMT_UYVY);
1001  else
1002  txt_buf = teletext_data_unit_from_vbi_data_10bit(i, buf, txt_buf);
1003  }
1004  if (i == 22)
1005  i = 317;
1006  }
1007  }
1008 #endif
1009  if (vanc_format == bmdFormat10BitYUV && videoFrame->GetWidth() <= MAX_WIDTH_VANC) {
1010  int idx = get_vanc_line_idx(ctx->bmd_mode);
1011  for (i = vanc_line_numbers[idx].vanc_start; i <= vanc_line_numbers[idx].vanc_end; i++) {
1012  uint8_t *buf;
1013  if (vanc->GetBufferForVerticalBlankingLine(i, (void**)&buf) == S_OK) {
1014  uint16_t vanc[MAX_WIDTH_VANC];
1015  size_t vanc_size = videoFrame->GetWidth();
1016  if (ctx->bmd_mode == bmdModeNTSC && videoFrame->GetWidth() * 2 <= MAX_WIDTH_VANC) {
1017  vanc_size = vanc_size * 2;
1018  unpack_v210(vanc, buf, videoFrame->GetWidth());
1019  } else {
1020  extract_luma_from_v210(vanc, buf, videoFrame->GetWidth());
1021  }
1022  txt_buf = get_metadata(avctx, vanc, vanc_size,
1023  txt_buf, sizeof(txt_buf0) - (txt_buf - txt_buf0), &pkt);
1024  }
1025  if (i == vanc_line_numbers[idx].field0_vanc_end)
1027  }
1028  }
1029  vanc->Release();
1030  if (txt_buf - txt_buf0 > 1) {
1031  int stuffing_units = (4 - ((45 + txt_buf - txt_buf0) / 46) % 4) % 4;
1032  while (stuffing_units--) {
1033  memset(txt_buf, 0xff, 46);
1034  txt_buf[1] = 0x2c; // data_unit_length
1035  txt_buf += 46;
1036  }
1037  txt_pkt.pts = pkt.pts;
1038  txt_pkt.dts = pkt.dts;
1039  txt_pkt.stream_index = ctx->teletext_st->index;
1040  txt_pkt.data = txt_buf0;
1041  txt_pkt.size = txt_buf - txt_buf0;
1042  if (avpacket_queue_put(&ctx->queue, &txt_pkt) < 0) {
1043  ++ctx->dropped;
1044  }
1045  }
1046  }
1047  }
1048 
1050  if (pkt.buf)
1051  videoFrame->AddRef();
1052 
1053  if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
1054  ++ctx->dropped;
1055  }
1056  }
1057 
1058  // Handle Audio Frame
1059  if (audioFrame) {
1060  AVPacket pkt = { 0 };
1061  BMDTimeValue audio_pts;
1062 
1063  //hack among hacks
1064  pkt.size = audioFrame->GetSampleFrameCount() * ctx->audio_st->codecpar->channels * (ctx->audio_depth / 8);
1065  audioFrame->GetBytes(&audioFrameBytes);
1066  audioFrame->GetPacketTime(&audio_pts, ctx->audio_st->time_base.den);
1067  pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, abs_wallclock, ctx->audio_pts_source, ctx->audio_st->time_base, &initial_audio_pts, cctx->copyts);
1068  pkt.dts = pkt.pts;
1069 
1070  //fprintf(stderr,"Audio Frame size %d ts %d\n", pkt.size, pkt.pts);
1073  pkt.data = (uint8_t *)audioFrameBytes;
1074 
1075  if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
1076  ++ctx->dropped;
1077  }
1078  }
1079 
1080  return S_OK;
1081 }
1082 
1084  BMDVideoInputFormatChangedEvents events, IDeckLinkDisplayMode *mode,
1085  BMDDetectedVideoInputFormatFlags formatFlags)
1086 {
1087  struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
1088  ctx->bmd_mode = mode->GetDisplayMode();
1089  // check the C context member to make sure we set both raw_format and bmd_mode with data from the same format change callback
1090  if (!cctx->raw_format)
1091  ctx->raw_format = (formatFlags & bmdDetectedVideoInputRGB444) ? bmdFormat8BitARGB : bmdFormat8BitYUV;
1092  return S_OK;
1093 }
1094 
1095 static int decklink_autodetect(struct decklink_cctx *cctx) {
1096  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
1097  DECKLINK_BOOL autodetect_supported = false;
1098  int i;
1099 
1100  if (ctx->attr->GetFlag(BMDDeckLinkSupportsInputFormatDetection, &autodetect_supported) != S_OK)
1101  return -1;
1102  if (autodetect_supported == false)
1103  return -1;
1104 
1105  ctx->autodetect = 1;
1106  ctx->bmd_mode = bmdModeUnknown;
1107  if (ctx->dli->EnableVideoInput(AUTODETECT_DEFAULT_MODE,
1108  bmdFormat8BitYUV,
1109  bmdVideoInputEnableFormatDetection) != S_OK) {
1110  return -1;
1111  }
1112 
1113  if (ctx->dli->StartStreams() != S_OK) {
1114  return -1;
1115  }
1116 
1117  // 3 second timeout
1118  for (i = 0; i < 30; i++) {
1119  av_usleep(100000);
1120  /* Sometimes VideoInputFrameArrived is called without the
1121  * bmdFrameHasNoInputSource flag before VideoInputFormatChanged.
1122  * So don't break for bmd_mode == AUTODETECT_DEFAULT_MODE. */
1123  if (ctx->bmd_mode != bmdModeUnknown &&
1124  ctx->bmd_mode != AUTODETECT_DEFAULT_MODE)
1125  break;
1126  }
1127 
1128  ctx->dli->PauseStreams();
1129  ctx->dli->FlushStreams();
1130  ctx->autodetect = 0;
1131  if (ctx->bmd_mode != bmdModeUnknown) {
1132  cctx->format_code = (char *)av_mallocz(5);
1133  if (!cctx->format_code)
1134  return -1;
1135  AV_WB32(cctx->format_code, ctx->bmd_mode);
1136  return 0;
1137  } else {
1138  return -1;
1139  }
1140 
1141 }
1142 
1143 extern "C" {
1144 
1146 {
1147  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1148  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
1149 
1150  if (ctx->dli) {
1151  ctx->dli->StopStreams();
1152  ctx->dli->DisableVideoInput();
1153  ctx->dli->DisableAudioInput();
1154  }
1155 
1156  ff_decklink_cleanup(avctx);
1157  avpacket_queue_end(&ctx->queue);
1158 
1159  av_freep(&cctx->ctx);
1160 
1161  return 0;
1162 }
1163 
1165 {
1166  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1167  struct decklink_ctx *ctx;
1168  class decklink_allocator *allocator;
1170  AVStream *st;
1171  HRESULT result;
1172  int ret;
1173 
1174  ctx = (struct decklink_ctx *) av_mallocz(sizeof(struct decklink_ctx));
1175  if (!ctx)
1176  return AVERROR(ENOMEM);
1177  ctx->list_devices = cctx->list_devices;
1178  ctx->list_formats = cctx->list_formats;
1179  ctx->enable_klv = cctx->enable_klv;
1180  ctx->teletext_lines = cctx->teletext_lines;
1181  ctx->preroll = cctx->preroll;
1182  ctx->duplex_mode = cctx->duplex_mode;
1183  if (cctx->tc_format > 0 && (unsigned int)cctx->tc_format < FF_ARRAY_ELEMS(decklink_timecode_format_map))
1184  ctx->tc_format = decklink_timecode_format_map[cctx->tc_format];
1185  if (cctx->video_input > 0 && (unsigned int)cctx->video_input < FF_ARRAY_ELEMS(decklink_video_connection_map))
1186  ctx->video_input = decklink_video_connection_map[cctx->video_input];
1187  if (cctx->audio_input > 0 && (unsigned int)cctx->audio_input < FF_ARRAY_ELEMS(decklink_audio_connection_map))
1188  ctx->audio_input = decklink_audio_connection_map[cctx->audio_input];
1189  ctx->audio_pts_source = cctx->audio_pts_source;
1190  ctx->video_pts_source = cctx->video_pts_source;
1191  ctx->draw_bars = cctx->draw_bars;
1192  ctx->audio_depth = cctx->audio_depth;
1193  if (cctx->raw_format > 0 && (unsigned int)cctx->raw_format < FF_ARRAY_ELEMS(decklink_raw_format_map))
1194  ctx->raw_format = decklink_raw_format_map[cctx->raw_format];
1195  cctx->ctx = ctx;
1196 
1197  /* Check audio channel option for valid values: 2, 8 or 16 */
1198  switch (cctx->audio_channels) {
1199  case 2:
1200  case 8:
1201  case 16:
1202  break;
1203  default:
1204  av_log(avctx, AV_LOG_ERROR, "Value of channels option must be one of 2, 8 or 16\n");
1205  return AVERROR(EINVAL);
1206  }
1207 
1208  /* Check audio bit depth option for valid values: 16 or 32 */
1209  switch (cctx->audio_depth) {
1210  case 16:
1211  case 32:
1212  break;
1213  default:
1214  av_log(avctx, AV_LOG_ERROR, "Value for audio bit depth option must be either 16 or 32\n");
1215  return AVERROR(EINVAL);
1216  }
1217 
1218  /* List available devices. */
1219  if (ctx->list_devices) {
1220  ff_decklink_list_devices_legacy(avctx, 1, 0);
1221  return AVERROR_EXIT;
1222  }
1223 
1224  ret = ff_decklink_init_device(avctx, avctx->url);
1225  if (ret < 0)
1226  return ret;
1227 
1228  /* Get input device. */
1229  if (ctx->dl->QueryInterface(IID_IDeckLinkInput, (void **) &ctx->dli) != S_OK) {
1230  av_log(avctx, AV_LOG_ERROR, "Could not open input device from '%s'\n",
1231  avctx->url);
1232  ret = AVERROR(EIO);
1233  goto error;
1234  }
1235 
1236  if (ff_decklink_set_configs(avctx, DIRECTION_IN) < 0) {
1237  av_log(avctx, AV_LOG_ERROR, "Could not set input configuration\n");
1238  ret = AVERROR(EIO);
1239  goto error;
1240  }
1241 
1242  /* List supported formats. */
1243  if (ctx->list_formats) {
1245  ret = AVERROR_EXIT;
1246  goto error;
1247  }
1248 
1250  ret = (ctx->dli->SetCallback(input_callback) == S_OK ? 0 : AVERROR_EXTERNAL);
1251  input_callback->Release();
1252  if (ret < 0) {
1253  av_log(avctx, AV_LOG_ERROR, "Cannot set input callback\n");
1254  goto error;
1255  }
1256 
1257  allocator = new decklink_allocator();
1258  ret = (ctx->dli->SetVideoInputFrameMemoryAllocator(allocator) == S_OK ? 0 : AVERROR_EXTERNAL);
1259  allocator->Release();
1260  if (ret < 0) {
1261  av_log(avctx, AV_LOG_ERROR, "Cannot set custom memory allocator\n");
1262  goto error;
1263  }
1264 
1265  if (!cctx->format_code) {
1266  if (decklink_autodetect(cctx) < 0) {
1267  av_log(avctx, AV_LOG_ERROR, "Cannot Autodetect input stream or No signal\n");
1268  ret = AVERROR(EIO);
1269  goto error;
1270  }
1271  av_log(avctx, AV_LOG_INFO, "Autodetected the input mode\n");
1272  }
1273  if (ctx->raw_format == (BMDPixelFormat)0)
1274  ctx->raw_format = bmdFormat8BitYUV;
1275  if (ff_decklink_set_format(avctx, DIRECTION_IN) < 0) {
1276  av_log(avctx, AV_LOG_ERROR, "Could not set format code %s for %s\n",
1277  cctx->format_code ? cctx->format_code : "(unset)", avctx->url);
1278  ret = AVERROR(EIO);
1279  goto error;
1280  }
1281 
1282 #if !CONFIG_LIBZVBI
1283  if (ctx->teletext_lines && ctx->bmd_mode == bmdModePAL) {
1284  av_log(avctx, AV_LOG_ERROR, "Libzvbi support is needed for capturing SD PAL teletext, please recompile FFmpeg.\n");
1285  ret = AVERROR(ENOSYS);
1286  goto error;
1287  }
1288 #endif
1289 
1290  /* Setup streams. */
1291  st = avformat_new_stream(avctx, NULL);
1292  if (!st) {
1293  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1294  ret = AVERROR(ENOMEM);
1295  goto error;
1296  }
1297  st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
1298  st->codecpar->codec_id = cctx->audio_depth == 32 ? AV_CODEC_ID_PCM_S32LE : AV_CODEC_ID_PCM_S16LE;
1299  st->codecpar->sample_rate = bmdAudioSampleRate48kHz;
1300  st->codecpar->channels = cctx->audio_channels;
1301  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1302  ctx->audio_st=st;
1303 
1304  st = avformat_new_stream(avctx, NULL);
1305  if (!st) {
1306  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1307  ret = AVERROR(ENOMEM);
1308  goto error;
1309  }
1310  st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
1311  st->codecpar->width = ctx->bmd_width;
1312  st->codecpar->height = ctx->bmd_height;
1313 
1314  st->time_base.den = ctx->bmd_tb_den;
1315  st->time_base.num = ctx->bmd_tb_num;
1316  st->r_frame_rate = av_make_q(st->time_base.den, st->time_base.num);
1317 
1318  switch(ctx->raw_format) {
1319  case bmdFormat8BitYUV:
1320  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1321  st->codecpar->format = AV_PIX_FMT_UYVY422;
1322  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 16, st->time_base.den, st->time_base.num);
1323  break;
1324  case bmdFormat10BitYUV:
1325  st->codecpar->codec_id = AV_CODEC_ID_V210;
1326  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 64, st->time_base.den, st->time_base.num * 3);
1327  break;
1328  case bmdFormat8BitARGB:
1329  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1330  st->codecpar->format = AV_PIX_FMT_0RGB;
1331  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
1332  break;
1333  case bmdFormat8BitBGRA:
1334  st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
1335  st->codecpar->format = AV_PIX_FMT_BGR0;
1336  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
1337  break;
1338  case bmdFormat10BitRGB:
1339  st->codecpar->codec_id = AV_CODEC_ID_R210;
1340  st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 30, st->time_base.den, st->time_base.num);
1341  break;
1342  default:
1343  char fourcc_str[AV_FOURCC_MAX_STRING_SIZE] = {0};
1344  av_fourcc_make_string(fourcc_str, ctx->raw_format);
1345  av_log(avctx, AV_LOG_ERROR, "Raw Format %s not supported\n", fourcc_str);
1346  ret = AVERROR(EINVAL);
1347  goto error;
1348  }
1349 
1350  switch (ctx->bmd_field_dominance) {
1351  case bmdUpperFieldFirst:
1352  st->codecpar->field_order = AV_FIELD_TT;
1353  break;
1354  case bmdLowerFieldFirst:
1355  st->codecpar->field_order = AV_FIELD_BB;
1356  break;
1357  case bmdProgressiveFrame:
1358  case bmdProgressiveSegmentedFrame:
1359  st->codecpar->field_order = AV_FIELD_PROGRESSIVE;
1360  break;
1361  }
1362 
1363  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1364 
1365  ctx->video_st=st;
1366 
1367  if (ctx->enable_klv) {
1368  st = avformat_new_stream(avctx, NULL);
1369  if (!st) {
1370  ret = AVERROR(ENOMEM);
1371  goto error;
1372  }
1373  st->codecpar->codec_type = AVMEDIA_TYPE_DATA;
1374  st->time_base.den = ctx->bmd_tb_den;
1375  st->time_base.num = ctx->bmd_tb_num;
1376  st->codecpar->codec_id = AV_CODEC_ID_SMPTE_KLV;
1377  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1378  ctx->klv_st = st;
1379  }
1380 
1381  if (ctx->teletext_lines) {
1382  st = avformat_new_stream(avctx, NULL);
1383  if (!st) {
1384  av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
1385  ret = AVERROR(ENOMEM);
1386  goto error;
1387  }
1388  st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
1389  st->time_base.den = ctx->bmd_tb_den;
1390  st->time_base.num = ctx->bmd_tb_num;
1391  st->codecpar->codec_id = AV_CODEC_ID_DVB_TELETEXT;
1392  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
1393  ctx->teletext_st = st;
1394  }
1395 
1396  av_log(avctx, AV_LOG_VERBOSE, "Using %d input audio channels\n", ctx->audio_st->codecpar->channels);
1397  result = ctx->dli->EnableAudioInput(bmdAudioSampleRate48kHz, cctx->audio_depth == 32 ? bmdAudioSampleType32bitInteger : bmdAudioSampleType16bitInteger, ctx->audio_st->codecpar->channels);
1398 
1399  if (result != S_OK) {
1400  av_log(avctx, AV_LOG_ERROR, "Cannot enable audio input\n");
1401  ret = AVERROR(EIO);
1402  goto error;
1403  }
1404 
1405  result = ctx->dli->EnableVideoInput(ctx->bmd_mode,
1406  ctx->raw_format,
1407  bmdVideoInputFlagDefault);
1408 
1409  if (result != S_OK) {
1410  av_log(avctx, AV_LOG_ERROR, "Cannot enable video input\n");
1411  ret = AVERROR(EIO);
1412  goto error;
1413  }
1414 
1415  avpacket_queue_init (avctx, &ctx->queue);
1416 
1417  if (ctx->dli->StartStreams() != S_OK) {
1418  av_log(avctx, AV_LOG_ERROR, "Cannot start input stream\n");
1419  ret = AVERROR(EIO);
1420  goto error;
1421  }
1422 
1423  return 0;
1424 
1425 error:
1426  ff_decklink_cleanup(avctx);
1427  return ret;
1428 }
1429 
1431 {
1432  struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
1433  struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
1434 
1435  avpacket_queue_get(&ctx->queue, pkt, 1);
1436 
1437  if (ctx->tc_format && !(av_dict_get(ctx->video_st->metadata, "timecode", NULL, 0))) {
1440  if (side_metadata) {
1441  if (av_packet_unpack_dictionary(side_metadata, size, &ctx->video_st->metadata) < 0)
1442  av_log(avctx, AV_LOG_ERROR, "Unable to set timecode\n");
1443  }
1444  }
1445 
1446  return 0;
1447 }
1448 
1450 {
1451  return ff_decklink_list_devices(avctx, device_list, 1, 0);
1452 }
1453 
1454 } /* extern "C" */
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
uint8_t
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
Main libavdevice API header.
Main libavformat public API header.
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Definition: avpacket.c:554
uint8_t * av_packet_pack_dictionary(AVDictionary *dict, int *size)
Definition: avpacket.c:511
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t *size)
Definition: avpacket.c:368
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t size)
Definition: avpacket.c:343
Convenience header that includes libavutil's core.
#define flags(name, subs,...)
Definition: cbs_av1.c:572
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
@ AV_FIELD_TT
Definition: codec_par.h:39
@ AV_FIELD_BB
Definition: codec_par.h:40
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:38
common internal and external API header
#define FFMIN(a, b)
Definition: common.h:105
#define av_parity
Definition: common.h:182
#define NULL
Definition: coverity.c:32
long long int64_t
Definition: coverity.c:34
mode
Use these values in ebur128_init (or'ed).
Definition: ebur128.h:83
#define pthread_mutex_lock(a)
Definition: ffprobe.c:63
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:67
@ AV_CODEC_ID_V210
Definition: codec_id.h:176
@ AV_CODEC_ID_RAWVIDEO
Definition: codec_id.h:62
@ AV_CODEC_ID_R210
Definition: codec_id.h:182
@ AV_CODEC_ID_PCM_S16LE
Definition: codec_id.h:313
@ AV_CODEC_ID_SMPTE_KLV
Definition: codec_id.h:561
@ AV_CODEC_ID_PCM_S32LE
Definition: codec_id.h:321
@ AV_CODEC_ID_DVB_TELETEXT
Definition: codec_id.h:530
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding.
Definition: avcodec.h:215
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:634
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:410
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
Definition: avpacket.c:696
FF_ENABLE_DEPRECATION_WARNINGS int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:309
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:690
@ AV_PKT_DATA_STRINGS_METADATA
A list of zero terminated key/value strings.
Definition: packet.h:172
@ AV_PKT_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1:2014.
Definition: packet.h:291
@ AV_PKT_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: packet.h:242
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:4509
AVBufferRef * av_buffer_create(uint8_t *data, buffer_size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:29
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
#define AVERROR(e)
Definition: error.h:43
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
#define AV_FOURCC_MAX_STRING_SIZE
Definition: avutil.h:346
char * av_fourcc_make_string(char *buf, uint32_t fourcc)
Fill the provided buffer with a string containing a FourCC (four-character code) representation.
Definition: utils.c:116
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
@ AVMEDIA_TYPE_DATA
Opaque data information usually continuous.
Definition: avutil.h:203
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
misc image utilities
int i
Definition: input.c:407
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: utils.c:4945
common internal API header
int buffer_size_t
Definition: internal.h:306
const uint8_t ff_reverse[256]
Definition: reverse.c:23
static const uint16_t mask[17]
Definition: lzw.c:38
static void input_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
Definition: mmaldec.c:201
const char data[16]
Definition: mxf.c:142
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:144
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:104
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:133
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:112
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:240
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:237
#define tc
Definition: regdef.h:69
static char buffer[20]
Definition: seek.c:32
#define FF_ARRAY_ELEMS(a)
static int shift(int a, int b)
Definition: sonic.c:82
int channels
Audio only.
Definition: codec_par.h:166
List of devices.
Definition: avdevice.h:465
Format I/O context.
Definition: avformat.h:1232
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1474
char * url
input or output URL.
Definition: avformat.h:1328
void * priv_data
Format private data.
Definition: avformat.h:1260
int64_t max_q_size
pthread_cond_t cond
unsigned long long size
pthread_mutex_t mutex
AVFormatContext * avctx
PacketList * first_pkt
PacketList * last_pkt
This structure stores compressed data.
Definition: packet.h:346
int stream_index
Definition: packet.h:371
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:352
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:375
int size
Definition: packet.h:370
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:387
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:362
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:368
uint8_t * data
Definition: packet.h:369
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int num
Numerator.
Definition: rational.h:59
int den
Denominator.
Definition: rational.h:60
Stream structure.
Definition: avformat.h:873
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1038
int index
stream index in AVFormatContext
Definition: avformat.h:874
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:902
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1015
Definition: mxf.h:67
struct PacketList * next
AVPacket pkt
BMDDisplayMode mode
Definition: graph2dot.c:48
#define av_free(p)
#define av_freep(p)
#define av_malloc(s)
#define av_log(a,...)
static void error(const char *err)
static volatile int checksum
Definition: adler32.c:30
#define src
Definition: vp8dsp.c:255
static int16_t block[64]
Definition: dct.c:116
AVPacket * pkt
Definition: movenc.c:59
AVFormatContext * ctx
Definition: movenc.c:48
#define height
#define width
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
char * av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum)
Load timecode string in buf.
Definition: timecode.c:102
int av_timecode_init_from_components(AVTimecode *tc, AVRational rate, int flags, int hh, int mm, int ss, int ff, void *log_ctx)
Init a timecode struct from the passed timecode components.
Definition: timecode.c:229
uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum)
Convert frame number to SMPTE 12M binary representation.
Definition: timecode.c:52
Timecode helpers header.
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
@ AV_TIMECODE_FLAG_DROPFRAME
timecode is drop frame
Definition: timecode.h:36
static int64_t pts
int size
if(ret< 0)
Definition: vf_mcdeint.c:282
int len