47 s->hencdsp.diff_int16((uint16_t *)dst, (
const uint16_t *)
src0, (
const uint16_t *)
src1,
s->n - 1,
w);
55 int min_width =
FFMIN(
w, 32);
58 for (
i = 0;
i < min_width;
i++) {
65 s->llvidencdsp.diff_bytes(dst + 32,
src + 32,
src + 31,
w - 32);
68 const uint16_t *src16 = (
const uint16_t *)
src;
69 uint16_t *dst16 = ( uint16_t *)dst;
70 for (
i = 0;
i < min_width;
i++) {
71 const int temp = src16[
i];
72 dst16[
i] =
temp - left;
77 s->hencdsp.diff_int16(dst16 + 32, src16 + 32, src16 + 31,
s->n - 1,
w - 32);
84 int *red,
int *green,
int *blue,
89 int min_width =
FFMIN(
w, 8);
95 for (
i = 0;
i < min_width;
i++) {
96 const int rt =
src[
i * 4 +
R];
97 const int gt =
src[
i * 4 +
G];
98 const int bt =
src[
i * 4 +
B];
99 const int at =
src[
i * 4 +
A];
100 dst[
i * 4 +
R] = rt -
r;
101 dst[
i * 4 +
G] = gt -
g;
102 dst[
i * 4 +
B] = bt -
b;
103 dst[
i * 4 +
A] = at -
a;
110 s->llvidencdsp.diff_bytes(dst + 32,
src + 32,
src + 32 - 4,
w * 4 - 32);
112 *red =
src[(
w - 1) * 4 +
R];
113 *green =
src[(
w - 1) * 4 +
G];
114 *blue =
src[(
w - 1) * 4 +
B];
120 int *red,
int *green,
int *blue)
128 const int rt =
src[
i * 3 + 0];
129 const int gt =
src[
i * 3 + 1];
130 const int bt =
src[
i * 3 + 2];
131 dst[
i * 3 + 0] = rt -
r;
132 dst[
i * 3 + 1] = gt -
g;
133 dst[
i * 3 + 2] = bt -
b;
139 s->llvidencdsp.diff_bytes(dst + 48,
src + 48,
src + 48 - 3,
w * 3 - 48);
141 *red =
src[(
w - 1) * 3 + 0];
142 *green =
src[(
w - 1) * 3 + 1];
143 *blue =
src[(
w - 1) * 3 + 2];
149 s->llvidencdsp.sub_median_pred(dst,
src1, src2,
w , left, left_top);
151 s->hencdsp.sub_hfyu_median_pred_int16((uint16_t *)dst, (
const uint16_t *)
src1, (
const uint16_t *)src2,
s->n - 1,
w , left, left_top);
161 for (
i = 0;
i < n;) {
165 for (;
i < n &&
len[
i] ==
val && repeat < 255;
i++)
168 av_assert0(val < 32 && val >0 && repeat < 256 && repeat>0);
171 buf[
index++] = repeat;
187 count = 1 +
s->alpha + 2*
s->chroma;
189 for (
i = 0;
i < count;
i++) {
215 #define STATS_OUT_SIZE 21*MAX_N*3 + 4
225 #if FF_API_CODED_FRAME
231 #if FF_API_PRIVATE_OPT
238 s->bps =
desc->comp[0].depth;
240 s->chroma =
desc->nb_components > 2;
298 s->bitstream_bpp = 32;
301 s->bitstream_bpp = 24;
312 #if FF_API_PRIVATE_OPT
322 "context=1 is not compatible with "
323 "2 pass huffyuv encoding\n");
331 "Error: YV12 is not supported by huffyuv; use "
332 "vcodec=ffvhuff or format=422p\n");
335 #if FF_API_PRIVATE_OPT
338 "Error: per-frame huffman tables are not supported "
339 "by huffyuv; use vcodec=ffvhuff\n");
342 if (
s->version > 2) {
344 "Error: ver>2 is not supported "
345 "by huffyuv; use vcodec=ffvhuff\n");
349 if (
s->interlaced != (
s->height > 288 ))
351 "using huffyuv 2.2.0 or newer interlacing flag\n");
355 av_log(avctx,
AV_LOG_ERROR,
"Ver > 3 is under development, files encoded with it may not be decodable with future versions!!!\n"
356 "Use vstrict=-2 / -strict -2 to use it anyway.\n");
360 if (
s->bitstream_bpp >= 24 &&
s->predictor ==
MEDIAN &&
s->version <= 2) {
362 "Error: RGB is incompatible with median predictor\n");
370 if (
s->version < 3) {
374 ((
uint8_t*)avctx->
extradata)[1] = ((
s->bps-1)<<4) |
s->chroma_h_shift | (
s->chroma_v_shift<<2);
381 s->avctx->extradata_size = 4;
386 for (
i = 0;
i < 4;
i++)
387 for (j = 0; j <
s->vlc_n; j++)
391 for (
i = 0;
i < 4;
i++) {
394 for (j = 0; j <
s->vlc_n; j++) {
395 s->stats[
i][j] += strtol(p, &next, 0);
396 if (next == p)
return -1;
400 if (p[0] == 0 || p[1] == 0 || p[2] == 0)
break;
403 for (
i = 0;
i < 4;
i++)
404 for (j = 0; j <
s->vlc_n; j++) {
405 int d =
FFMIN(j,
s->vlc_n - j);
407 s->stats[
i][j] = 100000000 / (d*d + 1);
414 s->avctx->extradata_size += ret;
417 for (
i = 0;
i < 4;
i++) {
418 int pels =
s->width *
s->height / (
i ? 40 : 10);
419 for (j = 0; j <
s->vlc_n; j++) {
420 int d =
FFMIN(j,
s->vlc_n - j);
421 s->stats[
i][j] = pels/(d*d + 1);
425 for (
i = 0;
i < 4;
i++)
426 for (j = 0; j <
s->vlc_n; j++)
453 int y1 = y[2 * i + 1];\
460 for(
i = 0;
i < count;
i++) {
471 for (
i = 0;
i < count;
i++) {
483 for(
i = 0;
i < count;
i++) {
504 int y0 = s->temp[0][width-1];
506 int y0 = s->temp16[0][width-1] & mask;
508 int y0 = s->temp16[0][width-1];
510 s->stats[plane][y0]++;
512 s->stats[plane][y0>>2]++;
514 put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);
516 put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
517 put_bits(&s->pb, 2, y0&3);
520 int y0 = s->temp[0][2 * i];\
521 int y1 = s->temp[0][2 * i + 1];
523 int y0 = s->temp16[0][2 * i] & mask;\
524 int y1 = s->temp16[0][2 * i + 1] & mask;
526 int y0 = s->temp16[0][2 * i];\
527 int y1 = s->temp16[0][2 * i + 1];
529 s->stats[plane][y0]++;\
530 s->stats[plane][y1]++;
532 s->stats[plane][y0>>2]++;\
533 s->stats[plane][y1>>2]++;
535 put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);\
536 put_bits(&s->pb, s->len[plane][y1], s->bits[plane][y1]);
538 put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
539 put_bits(&s->pb, 2, y0&3);\
540 put_bits(&s->pb, s->len[plane][y1>>2], s->bits[plane][y1>>2]);\
541 put_bits(&s->pb, 2, y1&3);
545 for (
i = 0;
i < count;
i++) {
558 for (
i = 0;
i < count;
i++) {
569 for (
i = 0;
i < count;
i++) {
578 }
else if (
s->bps <= 14) {
581 for (
i = 0;
i < count;
i++) {
594 for (
i = 0;
i < count;
i++) {
605 for (
i = 0;
i < count;
i++) {
616 for (
i = 0;
i < count;
i++) {
629 for (
i = 0;
i < count;
i++) {
640 for (
i = 0;
i < count;
i++) {
666 int y0 = s->temp[0][2 * i];\
667 int y1 = s->temp[0][2 * i + 1];
672 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
673 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
678 for (
i = 0;
i < count;
i++) {
687 for (
i = 0;
i < count;
i++) {
693 for (
i = 0;
i < count;
i++) {
712 int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G]; \
713 int b =(s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g) & 0xFF;\
714 int r =(s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g) & 0xFF;\
715 int a = s->temp[0][planes * i + A];
725 put_bits(&s->pb, s->len[1][g], s->bits[1][g]); \
726 put_bits(&s->pb, s->len[0][b], s->bits[0][b]); \
727 put_bits(&s->pb, s->len[2][r], s->bits[2][r]); \
729 put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
733 for (
i = 0;
i < count;
i++) {
738 for (
i = 0;
i < count;
i++) {
744 for (
i = 0;
i < count;
i++) {
753 const AVFrame *pict,
int *got_packet)
756 const int width =
s->width;
757 const int width2 =
s->width>>1;
759 const int fake_ystride =
s->interlaced ? pict->
linesize[0]*2 : pict->
linesize[0];
760 const int fake_ustride =
s->interlaced ? pict->
linesize[1]*2 : pict->
linesize[1];
761 const int fake_vstride =
s->interlaced ? pict->
linesize[2]*2 : pict->
linesize[2];
762 const AVFrame *
const p = pict;
763 int i, j,
size = 0, ret;
773 for (
i = 0;
i < 4;
i++)
774 for (j = 0; j <
s->vlc_n; j++)
775 s->stats[
i][j] >>= 1;
782 int lefty, leftu, leftv, y, cy;
796 int lefttopy, lefttopu, lefttopv;
813 lefttopy = p->
data[0][3];
814 lefttopu = p->
data[1][1];
815 lefttopv = p->
data[2][1];
816 s->llvidencdsp.sub_median_pred(
s->temp[0], p->
data[0] + 4, p->
data[0] + fake_ystride + 4,
width - 4, &lefty, &lefttopy);
817 s->llvidencdsp.sub_median_pred(
s->temp[1], p->
data[1] + 2, p->
data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
818 s->llvidencdsp.sub_median_pred(
s->temp[2], p->
data[2] + 2, p->
data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
822 for (; y <
height; y++,cy++) {
825 if (
s->bitstream_bpp == 12) {
828 s->llvidencdsp.sub_median_pred(
s->temp[0], ydst - fake_ystride, ydst,
width, &lefty, &lefttopy);
838 s->llvidencdsp.sub_median_pred(
s->temp[0], ydst - fake_ystride, ydst,
width, &lefty, &lefttopy);
839 s->llvidencdsp.sub_median_pred(
s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
840 s->llvidencdsp.sub_median_pred(
s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
845 for (cy = y = 1; y <
height; y++, cy++) {
849 if (
s->bitstream_bpp == 12) {
852 if (
s->predictor ==
PLANE &&
s->interlaced < y) {
853 s->llvidencdsp.diff_bytes(
s->temp[1], ydst, ydst - fake_ystride,
width);
868 if (
s->predictor ==
PLANE &&
s->interlaced < cy) {
869 s->llvidencdsp.diff_bytes(
s->temp[1], ydst, ydst - fake_ystride,
width);
870 s->llvidencdsp.diff_bytes(
s->temp[2], udst, udst - fake_ustride, width2);
871 s->llvidencdsp.diff_bytes(
s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
888 const int fake_stride = -fake_ystride;
890 int leftr, leftg, leftb, lefta;
898 &leftr, &leftg, &leftb, &lefta);
901 for (y = 1; y <
s->height; y++) {
903 if (
s->predictor ==
PLANE &&
s->interlaced < y) {
904 s->llvidencdsp.diff_bytes(
s->temp[1], dst, dst - fake_stride,
width * 4);
906 &leftr, &leftg, &leftb, &lefta);
909 &leftr, &leftg, &leftb, &lefta);
916 const int fake_stride = -fake_ystride;
918 int leftr, leftg, leftb;
926 &leftr, &leftg, &leftb);
929 for (y = 1; y <
s->height; y++) {
931 if (
s->predictor ==
PLANE &&
s->interlaced < y) {
932 s->llvidencdsp.diff_bytes(
s->temp[1], dst, dst - fake_stride,
935 &leftr, &leftg, &leftb);
938 &leftr, &leftg, &leftb);
942 }
else if (
s->version > 2) {
944 for (plane = 0; plane < 1 + 2*
s->chroma +
s->alpha; plane++) {
948 int fake_stride = fake_ystride;
950 if (
s->chroma && (plane == 1 || plane == 2)) {
951 w >>=
s->chroma_h_shift;
952 h >>=
s->chroma_v_shift;
953 fake_stride = plane == 1 ? fake_ustride : fake_vstride;
970 lefttop = p->
data[plane][0];
980 for (y = 1; y <
h; y++) {
983 if (
s->predictor ==
PLANE &&
s->interlaced < y) {
1009 for (
i = 0;
i < 4;
i++) {
1010 for (j = 0; j <
s->vlc_n; j++) {
1011 snprintf(p, end-p,
"%"PRIu64
" ",
s->stats[
i][j]);
1027 s->picture_number++;
1048 #define OFFSET(x) offsetof(HYuvContext, x)
1049 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1051 #define COMMON_OPTIONS \
1052 { "non_deterministic", "Allow multithreading for e.g. context=1 at the expense of determinism", \
1053 OFFSET(non_determ), AV_OPT_TYPE_BOOL, { .i64 = 1 }, \
1055 { "pred", "Prediction method", OFFSET(predictor), AV_OPT_TYPE_INT, { .i64 = LEFT }, LEFT, MEDIAN, VE, "pred" }, \
1056 { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, INT_MIN, INT_MAX, VE, "pred" }, \
1057 { "plane", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PLANE }, INT_MIN, INT_MAX, VE, "pred" }, \
1058 { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, INT_MIN, INT_MAX, VE, "pred" }, \
1104 #if CONFIG_FFVHUFF_ENCODER
static double val(void *priv, double ch)
AVCodec ff_ffvhuff_encoder
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Libavcodec external API header.
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
static av_cold int init(AVCodecContext *avctx)
#define u(width, name, range_min, range_max)
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define AV_CODEC_FLAG2_NO_OUTPUT
Skip bitstream encoding.
#define AV_INPUT_BUFFER_MIN_SIZE
minimum encoding buffer size Used to avoid some checks during header writing.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
#define AV_LOG_INFO
Standard information.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
const char * av_default_item_name(void *ptr)
Return the context name.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
@ AV_PICTURE_TYPE_I
Intra.
#define LIBAVUTIL_VERSION_INT
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
huffman tree builder and VLC generator
av_cold void ff_huffyuv_common_end(HYuvContext *s)
av_cold void ff_huffyuv_common_init(AVCodecContext *avctx)
int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int n)
av_cold int ff_huffyuv_alloc_temp(HYuvContext *s)
huffyuv codec for libavcodec.
static int encode_plane_bitstream(HYuvContext *s, int width, int plane)
static void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha)
static void sub_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
static av_cold int encode_init(AVCodecContext *avctx)
static void diff_bytes(HYuvContext *s, uint8_t *dst, const uint8_t *src0, const uint8_t *src1, int w)
static int sub_left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int left)
static int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
static const AVClass normal_class
static int store_huffman_tables(HYuvContext *s, uint8_t *buf)
static const AVOption ff_options[]
static void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue)
static av_cold int encode_end(AVCodecContext *avctx)
AVCodec ff_huffyuv_encoder
static const AVClass ff_class
static int encode_422_bitstream(HYuvContext *s, int offset, int count)
static const AVOption normal_options[]
static int encode_gray_bitstream(HYuvContext *s, int count)
av_cold void ff_huffyuvencdsp_init(HuffYUVEncDSPContext *c, AVCodecContext *avctx)
static const int16_t alpha[]
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define FF_DISABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
static enum AVPixelFormat pix_fmts[]
static const struct @322 planes[]
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
static const uint16_t mask[17]
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
#define AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUV420P14
AVPixelFormat
Pixel format.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
#define AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P10
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static int put_bits_count(PutBitContext *s)
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Describe the class of an AVClass context structure.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
main external API structure.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
char * stats_out
pass1 encoding statistics output buffer
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
const struct AVCodec * codec
int flags
AV_CODEC_FLAG_*.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
attribute_deprecated int prediction_method
attribute_deprecated int context_model
const char * name
Name of the codec implementation.
This structure describes decoded (raw) audio or video data.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int key_frame
1 -> keyframe, 0-> not
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
enum AVPictureType pict_type
Picture type of the frame.
This structure stores compressed data.
int flags
A combination of AV_PKT_FLAG values.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static const uint8_t offset[127][2]