f_reverse.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. /*
  2. * Copyright (c) 2015 Derek Buitenhuis
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/opt.h"
  21. #include "avfilter.h"
  22. #include "formats.h"
  23. #include "internal.h"
  24. #include "video.h"
  25. #define DEFAULT_LENGTH 300
  26. typedef struct ReverseContext {
  27. int nb_frames;
  28. AVFrame **frames;
  29. unsigned int frames_size;
  30. unsigned int pts_size;
  31. int64_t *pts;
  32. int flush_idx;
  33. } ReverseContext;
  34. static av_cold int init(AVFilterContext *ctx)
  35. {
  36. ReverseContext *s = ctx->priv;
  37. s->pts = av_fast_realloc(NULL, &s->pts_size,
  38. DEFAULT_LENGTH * sizeof(*(s->pts)));
  39. if (!s->pts)
  40. return AVERROR(ENOMEM);
  41. s->frames = av_fast_realloc(NULL, &s->frames_size,
  42. DEFAULT_LENGTH * sizeof(*(s->frames)));
  43. if (!s->frames) {
  44. av_freep(&s->pts);
  45. return AVERROR(ENOMEM);
  46. }
  47. return 0;
  48. }
  49. static av_cold void uninit(AVFilterContext *ctx)
  50. {
  51. ReverseContext *s = ctx->priv;
  52. av_freep(&s->pts);
  53. av_freep(&s->frames);
  54. }
  55. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  56. {
  57. AVFilterContext *ctx = inlink->dst;
  58. ReverseContext *s = ctx->priv;
  59. void *ptr;
  60. if (s->nb_frames + 1 > s->pts_size / sizeof(*(s->pts))) {
  61. ptr = av_fast_realloc(s->pts, &s->pts_size, s->pts_size * 2);
  62. if (!ptr)
  63. return AVERROR(ENOMEM);
  64. s->pts = ptr;
  65. }
  66. if (s->nb_frames + 1 > s->frames_size / sizeof(*(s->frames))) {
  67. ptr = av_fast_realloc(s->frames, &s->frames_size, s->frames_size * 2);
  68. if (!ptr)
  69. return AVERROR(ENOMEM);
  70. s->frames = ptr;
  71. }
  72. s->frames[s->nb_frames] = in;
  73. s->pts[s->nb_frames] = in->pts;
  74. s->nb_frames++;
  75. return 0;
  76. }
  77. #if CONFIG_REVERSE_FILTER
  78. static int request_frame(AVFilterLink *outlink)
  79. {
  80. AVFilterContext *ctx = outlink->src;
  81. ReverseContext *s = ctx->priv;
  82. int ret;
  83. ret = ff_request_frame(ctx->inputs[0]);
  84. if (ret == AVERROR_EOF && s->nb_frames > 0) {
  85. AVFrame *out = s->frames[s->nb_frames - 1];
  86. out->pts = s->pts[s->flush_idx++];
  87. ret = ff_filter_frame(outlink, out);
  88. s->nb_frames--;
  89. }
  90. return ret;
  91. }
  92. static const AVFilterPad reverse_inputs[] = {
  93. {
  94. .name = "default",
  95. .type = AVMEDIA_TYPE_VIDEO,
  96. .filter_frame = filter_frame,
  97. },
  98. { NULL }
  99. };
  100. static const AVFilterPad reverse_outputs[] = {
  101. {
  102. .name = "default",
  103. .type = AVMEDIA_TYPE_VIDEO,
  104. .request_frame = request_frame,
  105. },
  106. { NULL }
  107. };
  108. AVFilter ff_vf_reverse = {
  109. .name = "reverse",
  110. .description = NULL_IF_CONFIG_SMALL("Reverse a clip."),
  111. .priv_size = sizeof(ReverseContext),
  112. .init = init,
  113. .uninit = uninit,
  114. .inputs = reverse_inputs,
  115. .outputs = reverse_outputs,
  116. };
  117. #endif /* CONFIG_REVERSE_FILTER */
  118. #if CONFIG_AREVERSE_FILTER
  119. static int query_formats(AVFilterContext *ctx)
  120. {
  121. AVFilterFormats *formats;
  122. AVFilterChannelLayouts *layouts;
  123. int ret;
  124. layouts = ff_all_channel_counts();
  125. if (!layouts)
  126. return AVERROR(ENOMEM);
  127. ret = ff_set_common_channel_layouts(ctx, layouts);
  128. if (ret < 0)
  129. return ret;
  130. ret = ff_set_common_formats(ctx, ff_all_formats(AVMEDIA_TYPE_AUDIO));
  131. if (ret < 0)
  132. return ret;
  133. formats = ff_all_samplerates();
  134. if (!formats)
  135. return AVERROR(ENOMEM);
  136. return ff_set_common_samplerates(ctx, formats);
  137. }
  138. static void reverse_samples_planar(AVFrame *out)
  139. {
  140. for (int p = 0; p < out->channels; p++) {
  141. switch (out->format) {
  142. case AV_SAMPLE_FMT_U8P: {
  143. uint8_t *dst = (uint8_t *)out->extended_data[p];
  144. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  145. FFSWAP(uint8_t, dst[i], dst[j]);
  146. }
  147. break;
  148. case AV_SAMPLE_FMT_S16P: {
  149. int16_t *dst = (int16_t *)out->extended_data[p];
  150. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  151. FFSWAP(int16_t, dst[i], dst[j]);
  152. }
  153. break;
  154. case AV_SAMPLE_FMT_S32P: {
  155. int32_t *dst = (int32_t *)out->extended_data[p];
  156. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  157. FFSWAP(int32_t, dst[i], dst[j]);
  158. }
  159. break;
  160. case AV_SAMPLE_FMT_FLTP: {
  161. float *dst = (float *)out->extended_data[p];
  162. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  163. FFSWAP(float, dst[i], dst[j]);
  164. }
  165. break;
  166. case AV_SAMPLE_FMT_DBLP: {
  167. double *dst = (double *)out->extended_data[p];
  168. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  169. FFSWAP(double, dst[i], dst[j]);
  170. }
  171. break;
  172. }
  173. }
  174. }
  175. static void reverse_samples_packed(AVFrame *out)
  176. {
  177. const int channels = out->channels;
  178. switch (out->format) {
  179. case AV_SAMPLE_FMT_U8: {
  180. uint8_t *dst = (uint8_t *)out->extended_data[0];
  181. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  182. for (int p = 0; p < channels; p++)
  183. FFSWAP(uint8_t, dst[i * channels + p], dst[j * channels + p]);
  184. }
  185. break;
  186. case AV_SAMPLE_FMT_S16: {
  187. int16_t *dst = (int16_t *)out->extended_data[0];
  188. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  189. for (int p = 0; p < channels; p++)
  190. FFSWAP(int16_t, dst[i * channels + p], dst[j * channels + p]);
  191. }
  192. break;
  193. case AV_SAMPLE_FMT_S32: {
  194. int32_t *dst = (int32_t *)out->extended_data[0];
  195. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  196. for (int p = 0; p < channels; p++)
  197. FFSWAP(int32_t, dst[i * channels + p], dst[j * channels + p]);
  198. }
  199. break;
  200. case AV_SAMPLE_FMT_FLT: {
  201. float *dst = (float *)out->extended_data[0];
  202. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  203. for (int p = 0; p < channels; p++)
  204. FFSWAP(float, dst[i * channels + p], dst[j * channels + p]);
  205. }
  206. break;
  207. case AV_SAMPLE_FMT_DBL: {
  208. double *dst = (double *)out->extended_data[0];
  209. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  210. for (int p = 0; p < channels; p++)
  211. FFSWAP(double, dst[i * channels + p], dst[j * channels + p]);
  212. }
  213. break;
  214. }
  215. }
  216. static int areverse_request_frame(AVFilterLink *outlink)
  217. {
  218. AVFilterContext *ctx = outlink->src;
  219. ReverseContext *s = ctx->priv;
  220. int ret;
  221. ret = ff_request_frame(ctx->inputs[0]);
  222. if (ret == AVERROR_EOF && s->nb_frames > 0) {
  223. AVFrame *out = s->frames[s->nb_frames - 1];
  224. out->pts = s->pts[s->flush_idx++];
  225. if (av_sample_fmt_is_planar(out->format))
  226. reverse_samples_planar(out);
  227. else
  228. reverse_samples_packed(out);
  229. ret = ff_filter_frame(outlink, out);
  230. s->nb_frames--;
  231. }
  232. return ret;
  233. }
  234. static const AVFilterPad areverse_inputs[] = {
  235. {
  236. .name = "default",
  237. .type = AVMEDIA_TYPE_AUDIO,
  238. .filter_frame = filter_frame,
  239. .needs_writable = 1,
  240. },
  241. { NULL }
  242. };
  243. static const AVFilterPad areverse_outputs[] = {
  244. {
  245. .name = "default",
  246. .type = AVMEDIA_TYPE_AUDIO,
  247. .request_frame = areverse_request_frame,
  248. },
  249. { NULL }
  250. };
  251. AVFilter ff_af_areverse = {
  252. .name = "areverse",
  253. .description = NULL_IF_CONFIG_SMALL("Reverse an audio clip."),
  254. .query_formats = query_formats,
  255. .priv_size = sizeof(ReverseContext),
  256. .init = init,
  257. .uninit = uninit,
  258. .inputs = areverse_inputs,
  259. .outputs = areverse_outputs,
  260. };
  261. #endif /* CONFIG_AREVERSE_FILTER */