FFmpeg  2.6.9
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
vf_pp7.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (c) 2014 Arwa Arif <arwaarif1994@gmail.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 /**
23  * @file
24  * Postprocessing filter - 7
25  *
26  * Originally written by Michael Niedermayer for the MPlayer
27  * project, and ported by Arwa Arif for FFmpeg.
28  */
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "internal.h"
35 #include "vf_pp7.h"
36 
37 enum mode {
41 };
42 
43 #define OFFSET(x) offsetof(PP7Context, x)
44 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
45 static const AVOption pp7_options[] = {
46  { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 64, FLAGS },
47  { "mode", "set thresholding mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_MEDIUM}, 0, 2, FLAGS, "mode" },
48  { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_HARD}, INT_MIN, INT_MAX, FLAGS, "mode" },
49  { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_SOFT}, INT_MIN, INT_MAX, FLAGS, "mode" },
50  { "medium", "medium thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_MEDIUM}, INT_MIN, INT_MAX, FLAGS, "mode" },
51  { NULL }
52 };
53 
55 
56 DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
57  { 0, 48, 12, 60, 3, 51, 15, 63, },
58  { 32, 16, 44, 28, 35, 19, 47, 31, },
59  { 8, 56, 4, 52, 11, 59, 7, 55, },
60  { 40, 24, 36, 20, 43, 27, 39, 23, },
61  { 2, 50, 14, 62, 1, 49, 13, 61, },
62  { 34, 18, 46, 30, 33, 17, 45, 29, },
63  { 10, 58, 6, 54, 9, 57, 5, 53, },
64  { 42, 26, 38, 22, 41, 25, 37, 21, },
65 };
66 
67 #define N0 4
68 #define N1 5
69 #define N2 10
70 #define SN0 2
71 #define SN1 2.2360679775
72 #define SN2 3.16227766017
73 #define N (1 << 16)
74 
75 static const int factor[16] = {
76  N / (N0 * N0), N / (N0 * N1), N / (N0 * N0), N / (N0 * N2),
77  N / (N1 * N0), N / (N1 * N1), N / (N1 * N0), N / (N1 * N2),
78  N / (N0 * N0), N / (N0 * N1), N / (N0 * N0), N / (N0 * N2),
79  N / (N2 * N0), N / (N2 * N1), N / (N2 * N0), N / (N2 * N2),
80 };
81 
82 static const int thres[16] = {
83  N / (SN0 * SN0), N / (SN0 * SN2), N / (SN0 * SN0), N / (SN0 * SN2),
84  N / (SN2 * SN0), N / (SN2 * SN2), N / (SN2 * SN0), N / (SN2 * SN2),
85  N / (SN0 * SN0), N / (SN0 * SN2), N / (SN0 * SN0), N / (SN0 * SN2),
86  N / (SN2 * SN0), N / (SN2 * SN2), N / (SN2 * SN0), N / (SN2 * SN2),
87 };
88 
89 static void init_thres2(PP7Context *p)
90 {
91  int qp, i;
92  int bias = 0; //FIXME
93 
94  for (qp = 0; qp < 99; qp++) {
95  for (i = 0; i < 16; i++) {
96  p->thres2[qp][i] = ((i&1) ? SN2 : SN0) * ((i&4) ? SN2 : SN0) * FFMAX(1, qp) * (1<<2) - 1 - bias;
97  }
98  }
99 }
100 
101 static inline void dctA_c(int16_t *dst, uint8_t *src, int stride)
102 {
103  int i;
104 
105  for (i = 0; i < 4; i++) {
106  int s0 = src[0 * stride] + src[6 * stride];
107  int s1 = src[1 * stride] + src[5 * stride];
108  int s2 = src[2 * stride] + src[4 * stride];
109  int s3 = src[3 * stride];
110  int s = s3 + s3;
111  s3 = s - s0;
112  s0 = s + s0;
113  s = s2 + s1;
114  s2 = s2 - s1;
115  dst[0] = s0 + s;
116  dst[2] = s0 - s;
117  dst[1] = 2 * s3 + s2;
118  dst[3] = s3 - 2 * s2;
119  src++;
120  dst += 4;
121  }
122 }
123 
124 static void dctB_c(int16_t *dst, int16_t *src)
125 {
126  int i;
127 
128  for (i = 0; i < 4; i++) {
129  int s0 = src[0 * 4] + src[6 * 4];
130  int s1 = src[1 * 4] + src[5 * 4];
131  int s2 = src[2 * 4] + src[4 * 4];
132  int s3 = src[3 * 4];
133  int s = s3 + s3;
134  s3 = s - s0;
135  s0 = s + s0;
136  s = s2 + s1;
137  s2 = s2 - s1;
138  dst[0 * 4] = s0 + s;
139  dst[2 * 4] = s0 - s;
140  dst[1 * 4] = 2 * s3 + s2;
141  dst[3 * 4] = s3 - 2 * s2;
142  src++;
143  dst++;
144  }
145 }
146 
147 static int hardthresh_c(PP7Context *p, int16_t *src, int qp)
148 {
149  int i;
150  int a;
151 
152  a = src[0] * factor[0];
153  for (i = 1; i < 16; i++) {
154  unsigned int threshold1 = p->thres2[qp][i];
155  unsigned int threshold2 = threshold1 << 1;
156  int level = src[i];
157  if (((unsigned)(level + threshold1)) > threshold2)
158  a += level * factor[i];
159  }
160  return (a + (1 << 11)) >> 12;
161 }
162 
163 static int mediumthresh_c(PP7Context *p, int16_t *src, int qp)
164 {
165  int i;
166  int a;
167 
168  a = src[0] * factor[0];
169  for (i = 1; i < 16; i++) {
170  unsigned int threshold1 = p->thres2[qp][i];
171  unsigned int threshold2 = threshold1 << 1;
172  int level = src[i];
173  if (((unsigned)(level + threshold1)) > threshold2) {
174  if (((unsigned)(level + 2 * threshold1)) > 2 * threshold2)
175  a += level * factor[i];
176  else {
177  if (level > 0)
178  a += 2 * (level - (int)threshold1) * factor[i];
179  else
180  a += 2 * (level + (int)threshold1) * factor[i];
181  }
182  }
183  }
184  return (a + (1 << 11)) >> 12;
185 }
186 
187 static int softthresh_c(PP7Context *p, int16_t *src, int qp)
188 {
189  int i;
190  int a;
191 
192  a = src[0] * factor[0];
193  for (i = 1; i < 16; i++) {
194  unsigned int threshold1 = p->thres2[qp][i];
195  unsigned int threshold2 = threshold1 << 1;
196  int level = src[i];
197  if (((unsigned)(level + threshold1)) > threshold2) {
198  if (level > 0)
199  a += (level - (int)threshold1) * factor[i];
200  else
201  a += (level + (int)threshold1) * factor[i];
202  }
203  }
204  return (a + (1 << 11)) >> 12;
205 }
206 
207 static void filter(PP7Context *p, uint8_t *dst, uint8_t *src,
208  int dst_stride, int src_stride,
209  int width, int height,
210  uint8_t *qp_store, int qp_stride, int is_luma)
211 {
212  int x, y;
213  const int stride = is_luma ? p->temp_stride : ((width + 16 + 15) & (~15));
214  uint8_t *p_src = p->src + 8 * stride;
215  int16_t *block = (int16_t *)p->src;
216  int16_t *temp = (int16_t *)(p->src + 32);
217 
218  if (!src || !dst) return;
219  for (y = 0; y < height; y++) {
220  int index = 8 + 8 * stride + y * stride;
221  memcpy(p_src + index, src + y * src_stride, width);
222  for (x = 0; x < 8; x++) {
223  p_src[index - x - 1]= p_src[index + x ];
224  p_src[index + width + x ]= p_src[index + width - x - 1];
225  }
226  }
227  for (y = 0; y < 8; y++) {
228  memcpy(p_src + ( 7 - y ) * stride, p_src + ( y + 8 ) * stride, stride);
229  memcpy(p_src + (height + 8 + y) * stride, p_src + (height - y + 7) * stride, stride);
230  }
231  //FIXME (try edge emu)
232 
233  for (y = 0; y < height; y++) {
234  for (x = -8; x < 0; x += 4) {
235  const int index = x + y * stride + (8 - 3) * (1 + stride) + 8; //FIXME silly offset
236  uint8_t *src = p_src + index;
237  int16_t *tp = temp + 4 * x;
238 
239  dctA_c(tp + 4 * 8, src, stride);
240  }
241  for (x = 0; x < width; ) {
242  const int qps = 3 + is_luma;
243  int qp;
244  int end = FFMIN(x + 8, width);
245 
246  if (p->qp)
247  qp = p->qp;
248  else {
249  qp = qp_store[ (FFMIN(x, width - 1) >> qps) + (FFMIN(y, height - 1) >> qps) * qp_stride];
250  qp = ff_norm_qscale(qp, p->qscale_type);
251  }
252  for (; x < end; x++) {
253  const int index = x + y * stride + (8 - 3) * (1 + stride) + 8; //FIXME silly offset
254  uint8_t *src = p_src + index;
255  int16_t *tp = temp + 4 * x;
256  int v;
257 
258  if ((x & 3) == 0)
259  dctA_c(tp + 4 * 8, src, stride);
260 
261  p->dctB(block, tp);
262 
263  v = p->requantize(p, block, qp);
264  v = (v + dither[y & 7][x & 7]) >> 6;
265  if ((unsigned)v > 255)
266  v = (-v) >> 31;
267  dst[x + y * dst_stride] = v;
268  }
269  }
270  }
271 }
272 
274 {
275  static const enum PixelFormat pix_fmts[] = {
283  };
285  return 0;
286 }
287 
288 static int config_input(AVFilterLink *inlink)
289 {
290  AVFilterContext *ctx = inlink->dst;
291  PP7Context *pp7 = ctx->priv;
292  const int h = FFALIGN(inlink->h + 16, 16);
293  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
294 
295  pp7->hsub = desc->log2_chroma_w;
296  pp7->vsub = desc->log2_chroma_h;
297 
298  pp7->temp_stride = FFALIGN(inlink->w + 16, 16);
299  pp7->src = av_malloc_array(pp7->temp_stride, (h + 8) * sizeof(uint8_t));
300 
301  if (!pp7->src)
302  return AVERROR(ENOMEM);
303 
304  init_thres2(pp7);
305 
306  switch (pp7->mode) {
307  case 0: pp7->requantize = hardthresh_c; break;
308  case 1: pp7->requantize = softthresh_c; break;
309  default:
310  case 2: pp7->requantize = mediumthresh_c; break;
311  }
312 
313  pp7->dctB = dctB_c;
314 
315  if (ARCH_X86)
316  ff_pp7_init_x86(pp7);
317 
318  return 0;
319 }
320 
321 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
322 {
323  AVFilterContext *ctx = inlink->dst;
324  PP7Context *pp7 = ctx->priv;
325  AVFilterLink *outlink = ctx->outputs[0];
326  AVFrame *out = in;
327 
328  int qp_stride = 0;
329  uint8_t *qp_table = NULL;
330 
331  if (!pp7->qp)
332  qp_table = av_frame_get_qp_table(in, &qp_stride, &pp7->qscale_type);
333 
334  if (!ctx->is_disabled) {
335  const int cw = FF_CEIL_RSHIFT(inlink->w, pp7->hsub);
336  const int ch = FF_CEIL_RSHIFT(inlink->h, pp7->vsub);
337 
338  /* get a new frame if in-place is not possible or if the dimensions
339  * are not multiple of 8 */
340  if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
341  const int aligned_w = FFALIGN(inlink->w, 8);
342  const int aligned_h = FFALIGN(inlink->h, 8);
343 
344  out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
345  if (!out) {
346  av_frame_free(&in);
347  return AVERROR(ENOMEM);
348  }
349  av_frame_copy_props(out, in);
350  out->width = in->width;
351  out->height = in->height;
352  }
353 
354  if (qp_table || pp7->qp) {
355 
356  filter(pp7, out->data[0], in->data[0], out->linesize[0], in->linesize[0],
357  inlink->w, inlink->h, qp_table, qp_stride, 1);
358  filter(pp7, out->data[1], in->data[1], out->linesize[1], in->linesize[1],
359  cw, ch, qp_table, qp_stride, 0);
360  filter(pp7, out->data[2], in->data[2], out->linesize[2], in->linesize[2],
361  cw, ch, qp_table, qp_stride, 0);
362  emms_c();
363  }
364  }
365 
366  if (in != out) {
367  if (in->data[3])
368  av_image_copy_plane(out->data[3], out->linesize[3],
369  in ->data[3], in ->linesize[3],
370  inlink->w, inlink->h);
371  av_frame_free(&in);
372  }
373  return ff_filter_frame(outlink, out);
374 }
375 
376 static av_cold void uninit(AVFilterContext *ctx)
377 {
378  PP7Context *pp7 = ctx->priv;
379  av_freep(&pp7->src);
380 }
381 
382 static const AVFilterPad pp7_inputs[] = {
383  {
384  .name = "default",
385  .type = AVMEDIA_TYPE_VIDEO,
386  .config_props = config_input,
387  .filter_frame = filter_frame,
388  },
389  { NULL }
390 };
391 
392 static const AVFilterPad pp7_outputs[] = {
393  {
394  .name = "default",
395  .type = AVMEDIA_TYPE_VIDEO,
396  },
397  { NULL }
398 };
399 
401  .name = "pp7",
402  .description = NULL_IF_CONFIG_SMALL("Apply Postprocessing 7 filter."),
403  .priv_size = sizeof(PP7Context),
404  .uninit = uninit,
406  .inputs = pp7_inputs,
407  .outputs = pp7_outputs,
408  .priv_class = &pp7_class,
410 };
#define NULL
Definition: coverity.c:32
float v
const char * s
Definition: avisynth_c.h:669
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2029
This structure describes decoded (raw) audio or video data.
Definition: frame.h:163
AVOption.
Definition: opt.h:255
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:73
misc image utilities
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:246
else temp
Definition: vf_mcdeint.c:257
AVFILTER_DEFINE_CLASS(pp7)
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:181
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:53
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:109
#define OFFSET(x)
Definition: vf_pp7.c:43
#define SN0
Definition: vf_pp7.c:70
int8_t * av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
Definition: frame.c:62
int is_disabled
the enabled state from the last expression evaluation
Definition: avfilter.h:686
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
#define FFALIGN(x, a)
Definition: common.h:86
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:294
const char * name
Pad name.
Definition: internal.h:67
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1145
uint8_t
#define av_cold
Definition: attributes.h:74
mode
Definition: f_perms.c:27
AVOptions.
int thres2[99][16]
Definition: vf_pp7.h:29
int qp
Definition: vf_pp7.h:31
static int query_formats(AVFilterContext *ctx)
Definition: vf_pp7.c:273
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:67
static const AVOption pp7_options[]
Definition: vf_pp7.c:45
#define emms_c()
Definition: internal.h:50
static int softthresh_c(PP7Context *p, int16_t *src, int qp)
Definition: vf_pp7.c:187
#define N
Definition: vf_pp7.c:73
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range ...
Definition: pixfmt.h:107
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_...
Definition: pixfmt.h:81
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:539
#define N2
Definition: vf_pp7.c:69
A filter pad used for either input or output.
Definition: internal.h:61
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_pp7.c:376
int qscale_type
Definition: vf_pp7.h:33
int width
width and height of the video frame
Definition: frame.h:212
static void init_thres2(PP7Context *p)
Definition: vf_pp7.c:89
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
void(* dctB)(int16_t *dst, int16_t *src)
Definition: vf_pp7.h:40
#define s2
Definition: regdef.h:39
#define ARCH_X86
Definition: config.h:38
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:180
void * priv
private data for use by the filter
Definition: avfilter.h:654
#define s0
Definition: regdef.h:37
static void dctB_c(int16_t *dst, int16_t *src)
Definition: vf_pp7.c:124
simple assert() macros that are a bit more flexible than ISO C assert().
int(* requantize)(struct PP7Context *p, int16_t *src, int qp)
Definition: vf_pp7.h:39
#define FFMAX(a, b)
Definition: common.h:79
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_pp7.c:321
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:72
#define FLAGS
Definition: vf_pp7.c:44
static int hardthresh_c(PP7Context *p, int16_t *src, int qp)
Definition: vf_pp7.c:147
#define FFMIN(a, b)
Definition: common.h:81
float y
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:80
#define SN2
Definition: vf_pp7.c:72
#define FF_CEIL_RSHIFT(a, b)
Definition: common.h:57
#define N0
Definition: vf_pp7.c:67
int hsub
Definition: vf_pp7.h:34
int vsub
Definition: vf_pp7.h:35
static const AVFilterPad pp7_outputs[]
Definition: vf_pp7.c:392
#define PixelFormat
Definition: pixfmt.h:399
static void filter(PP7Context *p, uint8_t *dst, uint8_t *src, int dst_stride, int src_stride, int width, int height, uint8_t *qp_store, int qp_stride, int is_luma)
Definition: vf_pp7.c:207
#define s3
Definition: regdef.h:40
uint8_t * src
Definition: vf_pp7.h:37
AVS_Value src
Definition: avisynth_c.h:524
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:403
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:191
static int mediumthresh_c(PP7Context *p, int16_t *src, int qp)
Definition: vf_pp7.c:163
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
BYTE int const BYTE int int int height
Definition: avisynth_c.h:714
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:74
Filter definition.
Definition: avfilter.h:470
int index
Definition: gxfenc.c:89
static const AVFilterPad inputs[]
Definition: af_ashowinfo.c:237
void ff_pp7_init_x86(PP7Context *pp7)
Definition: vf_pp7_init.c:28
static const int factor[16]
Definition: vf_pp7.c:75
const char * name
Filter name.
Definition: avfilter.h:474
#define s1
Definition: regdef.h:38
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:459
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:648
static int ff_norm_qscale(int qscale, int type)
Normalize the qscale factor FIXME the H264 qscale is a log based scale, mpeg1/2 is not...
Definition: internal.h:383
static const uint8_t dither[8][8]
Definition: vf_pp7.c:56
static int flags
Definition: cpu.c:47
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:174
uint8_t level
Definition: svq3.c:150
static void dctA_c(int16_t *dst, uint8_t *src, int stride)
Definition: vf_pp7.c:101
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
static const AVFilterPad pp7_inputs[]
Definition: vf_pp7.c:382
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
Y , 8bpp.
Definition: pixfmt.h:76
int mode
Definition: vf_pp7.h:32
#define N1
Definition: vf_pp7.c:68
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
Definition: pixfmt.h:82
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:75
static const int thres[16]
Definition: vf_pp7.c:82
An instance of a filter.
Definition: avfilter.h:633
int height
Definition: frame.h:212
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
#define av_malloc_array(a, b)
int temp_stride
Definition: vf_pp7.h:36
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:273
#define stride
internal API functions
AVFilter ff_vf_pp7
Definition: vf_pp7.c:400
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:463
static int config_input(AVFilterLink *inlink)
Definition: vf_pp7.c:288
static int width
static int16_t block[64]
Definition: dct-test.c:110