31 #define BITSTREAM_READER_LE
69 for (
i = 1;
i < 64;) {
85 for (j = 0; j <
value; j++)
111 block[0] += 128 << 4;
134 ptrdiff_t dst_stride,
int dc)
138 for (j = 0; j < 8; j++)
139 memset(dst + j * dst_stride,
level, 8);
143 int mb_x,
int mb_y,
const int8_t *
dc)
165 mode = bytestream2_get_byte(&
s->gb);
172 for (
i = 0;
i < 6;
i++) {
181 memset(
dc, bytestream2_get_byte(&
s->gb), 4);
182 dc[4] = bytestream2_get_byte(&
s->gb);
183 dc[5] = bytestream2_get_byte(&
s->gb);
184 }
else if (
mode == 6) {
186 }
else if (
mode == 12) {
187 for (
i = 0;
i < 6;
i++) {
188 dc[
i] = bytestream2_get_byte(&
s->gb);
203 const int a = (14 * (100 -
quant)) / 100 + 1;
204 const int b = (11 * (100 -
quant)) / 100 + 4;
205 for (j = 0; j < 8; j++)
206 for (
i = 0;
i < 8;
i++)
207 s->qtable[j * 8 +
i] = ((
a * (j +
i) / (7 + 7) +
b) *
212 void *
data,
int *got_frame,
216 int buf_size = avpkt->
size;
226 big_endian =
AV_RL32(&buf[4]) > 0x000FFFFF;
229 s->width = bytestream2_get_be16u(&
s->gb);
230 s->height = bytestream2_get_be16u(&
s->gb);
232 s->width = bytestream2_get_le16u(&
s->gb);
233 s->height = bytestream2_get_le16u(&
s->gb);
const uint16_t ff_inv_aanscales[64]
AAN (Arai, Agui and Nakajima) (I)DCT tables.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
Libavcodec external API header.
static av_cold int init(AVCodecContext *avctx)
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
void ff_ea_idct_put_c(uint8_t *dest, ptrdiff_t linesize, int16_t *block)
static void tgq_calculate_qtable(TgqContext *s, int quant)
static void tgq_dconly(TgqContext *s, unsigned char *dst, ptrdiff_t dst_stride, int dc)
static int tgq_decode_block(TgqContext *s, int16_t block[64], GetBitContext *gb)
static av_cold int tgq_decode_init(AVCodecContext *avctx)
static void tgq_idct_put_mb_dconly(TgqContext *s, AVFrame *frame, int mb_x, int mb_y, const int8_t *dc)
static void tgq_idct_put_mb(TgqContext *s, int16_t(*block)[64], AVFrame *frame, int mb_x, int mb_y)
static int tgq_decode_mb(TgqContext *s, AVFrame *frame, int mb_y, int mb_x)
static int tgq_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
mode
Use these values in ebur128_init (or'ed).
bitstream reader API header.
static int get_sbits(GetBitContext *s, int n)
static void skip_bits(GetBitContext *s, int n)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
@ AV_PICTURE_TYPE_I
Intra.
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
av_cold void ff_init_scantable_permutation(uint8_t *idct_permutation, enum idct_permutation_type perm_type)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
const uint8_t ff_zigzag_direct[64]
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
main external API structure.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int width
picture width / height.
const char * name
Name of the codec implementation.
This structure describes decoded (raw) audio or video data.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int key_frame
1 -> keyframe, 0-> not
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
enum AVPictureType pict_type
Picture type of the frame.
This structure stores compressed data.
Rational number (pair of numerator and denominator).