35 #define FREEZE_INTERVAL 128
39 #define MAX_FRAME_SIZE 32768
44 #define MAX_TRELLIS 16
50 for (i = 0; i < 2; i++) {
55 #if FF_API_OLD_ENCODE_AUDIO
76 int frontier = 1 << avctx->
trellis;
79 for (i = 0; i < 2; i++) {
103 "allowed. Using %d instead of %d\n", new_frame_size,
119 "allowed. Using %d instead of %d\n", new_trellis,
125 #if FF_API_OLD_ENCODE_AUDIO
140 35, 72, 110, 150, 190, 233, 276, 323,
141 370, 422, 473, 530, 587, 650, 714, 786,
142 858, 940, 1023, 1121, 1219, 1339, 1458, 1612,
143 1765, 1980, 2195, 2557, 2919
147 int *xlow,
int *xhigh)
153 *xlow = xout1 + xout2 >> 14;
154 *xhigh = xout1 - xout2 >> 14;
165 int diff = av_clip_int16(xhigh - state->s_predictor);
166 int pred = 141 * state->scale_factor >> 8;
168 return ((diff ^ (diff >> (
sizeof(diff)*8-1))) < pred) + 2*(diff >= 0);
173 int diff = av_clip_int16(xlow - state->s_predictor);
175 int limit = diff ^ (diff >> (
sizeof(diff)*8-1));
177 limit = limit + 1 << 10;
178 if (limit >
low_quant[8] * state->scale_factor)
180 while (i < 29 && limit >
low_quant[i] * state->scale_factor)
182 return (diff < 0 ? (i < 2 ? 63 : 33) : 61) - i;
190 int frontier = 1 << trellis;
193 int pathn[2] = {0, 0}, froze = -1;
196 for (i = 0; i < 2; i++) {
198 nodes_next[i] = c->
nodep_buf[i] + frontier;
200 nodes[i][0] = c->
node_buf[i] + frontier;
201 nodes[i][0]->
ssd = 0;
202 nodes[i][0]->
path = 0;
203 nodes[i][0]->state = c->
band[i];
206 for (i = 0; i < nb_samples >> 1; i++) {
209 int heap_pos[2] = {0, 0};
211 for (j = 0; j < 2; j++) {
212 next[j] = c->
node_buf[j] + frontier*(i & 1);
213 memset(nodes_next[j], 0, frontier *
sizeof(**nodes_next));
218 for (j = 0; j < frontier && nodes[0][j]; j++) {
224 int range = j < frontier/2 ? 4 : 0;
227 int ilow =
encode_low(&cur_node->state, xlow);
229 for (k = ilow - range; k <= ilow + range && k <= 63; k += 4) {
230 int decoded, dec_diff, pos;
237 decoded = av_clip((cur_node->state.scale_factor *
239 + cur_node->state.s_predictor, -16384, 16383);
240 dec_diff = xlow - decoded;
242 #define STORE_NODE(index, UPDATE, VALUE)\
243 ssd = cur_node->ssd + dec_diff*dec_diff;\
246 if (ssd < cur_node->ssd)\
248 if (heap_pos[index] < frontier) {\
249 pos = heap_pos[index]++;\
250 assert(pathn[index] < FREEZE_INTERVAL * frontier);\
251 node = nodes_next[index][pos] = next[index]++;\
252 node->path = pathn[index]++;\
256 pos = (frontier>>1) + (heap_pos[index] & ((frontier>>1) - 1));\
257 if (ssd >= nodes_next[index][pos]->ssd)\
260 node = nodes_next[index][pos];\
263 node->state = cur_node->state;\
265 c->paths[index][node->path].value = VALUE;\
266 c->paths[index][node->path].prev = cur_node->path;\
270 int parent = (pos - 1) >> 1;\
271 if (nodes_next[index][parent]->ssd <= ssd)\
273 FFSWAP(struct TrellisNode*, nodes_next[index][parent],\
274 nodes_next[index][pos]);\
281 for (j = 0; j < frontier && nodes[1][j]; j++) {
289 for (ihigh = 0; ihigh < 4; ihigh++) {
290 int dhigh, decoded, dec_diff, pos;
294 dhigh = cur_node->state.scale_factor *
296 decoded = av_clip(dhigh + cur_node->state.s_predictor,
298 dec_diff = xhigh - decoded;
304 for (j = 0; j < 2; j++) {
307 if (nodes[j][0]->
ssd > (1 << 16)) {
308 for (k = 1; k < frontier && nodes[j][k]; k++)
309 nodes[j][k]->
ssd -= nodes[j][0]->
ssd;
310 nodes[j][0]->ssd = 0;
315 p[0] = &c->
paths[0][nodes[0][0]->path];
316 p[1] = &c->
paths[1][nodes[1][0]->path];
317 for (j = i; j > froze; j--) {
318 dst[j] = p[1]->value << 6 | p[0]->value;
323 pathn[0] = pathn[1] = 0;
324 memset(nodes[0] + 1, 0, (frontier - 1)*
sizeof(**nodes));
325 memset(nodes[1] + 1, 0, (frontier - 1)*
sizeof(**nodes));
331 for (j = i; j > froze; j--) {
332 dst[j] = p[1]->value << 6 | p[0]->value;
336 c->
band[0] = nodes[0][0]->state;
337 c->
band[1] = nodes[1][0]->state;
343 int xlow, xhigh, ilow, ihigh;
350 *dst = ihigh << 6 | ilow;
363 const AVFrame *frame,
int *got_packet_ptr)
366 const int16_t *
samples = (
const int16_t *)frame->
data[0];
383 if (nb_samples < frame->nb_samples) {
struct G722Context::TrellisNode ** nodep_buf[2]
This structure describes decoded (raw) audio or video data.
AVFrame * coded_frame
the picture in the bitstream
struct G722Context::TrellisPath * paths[2]
static void filter_samples(G722Context *c, const int16_t *samples, int *xlow, int *xhigh)
static void g722_encode_no_trellis(G722Context *c, uint8_t *dst, int nb_samples, const int16_t *samples)
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
static int encode_high(const struct G722Band *state, int xhigh)
#define FFSWAP(type, a, b)
#define PREV_SAMPLES_BUF_SIZE
int64_t pts
presentation timestamp in time_base units (time when frame should be shown to user) If AV_NOPTS_VALUE...
static av_cold int g722_encode_init(AVCodecContext *avctx)
static int init(AVCodecParserContext *s)
#define CODEC_CAP_SMALL_LAST_FRAME
struct G722Context::TrellisNode * node_buf[2]
const int16_t ff_g722_low_inv_quant6[64]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void ff_g722_apply_qmf(const int16_t *prev_samples, int *xout1, int *xout2)
int16_t prev_samples[PREV_SAMPLES_BUF_SIZE]
memory of past decoded samples
AVCodec ff_adpcm_g722_encoder
void av_log(void *avcl, int level, const char *fmt,...)
const char * name
Name of the codec implementation.
AVFrame * avcodec_alloc_frame(void)
Allocate an AVFrame and set its fields to default values.
struct G722Context::G722Band band[2]
static void g722_encode_trellis(G722Context *c, int trellis, uint8_t *dst, int nb_samples, const int16_t *samples)
static av_cold int g722_encode_close(AVCodecContext *avctx)
static int encode_low(const struct G722Band *state, int xlow)
int ff_alloc_packet(AVPacket *avpkt, int size)
Check AVPacket size and/or allocate data.
void ff_g722_update_low_predictor(struct G722Band *band, const int ilow)
static const float pred[4]
int frame_size
Number of samples per channel in an audio frame.
static const int16_t low_quant[33]
main external API structure.
static void close(AVCodecParserContext *s)
struct G722Context G722Context
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
common internal api header.
common internal and external API header
#define AVERROR_INVALIDDATA
AVSampleFormat
Audio Sample Formats.
int prev_samples_pos
the number of values in prev_samples
int trellis
trellis RD quantization
#define STORE_NODE(index, UPDATE, VALUE)
const int16_t ff_g722_high_inv_quant[4]
int channels
number of audio channels
static av_always_inline void encode_byte(G722Context *c, uint8_t *dst, const int16_t *samples)
static int g722_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
static av_always_inline int64_t ff_samples_to_time_base(AVCodecContext *avctx, int64_t samples)
Rescale from sample rate to AVCodecContext.time_base.
void ff_g722_update_high_predictor(struct G722Band *band, const int dhigh, const int ihigh)
This structure stores compressed data.
int16_t scale_factor
delayed quantizer scale factor
int nb_samples
number of audio samples (per channel) described by this frame
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...