f_graphmonitor.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425
  1. /*
  2. * Copyright (c) 2018 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "float.h"
  21. #include "libavutil/pixdesc.h"
  22. #include "libavutil/eval.h"
  23. #include "libavutil/intreadwrite.h"
  24. #include "libavutil/opt.h"
  25. #include "libavutil/timestamp.h"
  26. #include "libavutil/xga_font_data.h"
  27. #include "avfilter.h"
  28. #include "filters.h"
  29. #include "formats.h"
  30. #include "internal.h"
  31. #include "video.h"
  32. typedef struct GraphMonitorContext {
  33. const AVClass *class;
  34. int w, h;
  35. float opacity;
  36. int mode;
  37. int flags;
  38. AVRational frame_rate;
  39. int64_t pts;
  40. uint8_t white[4];
  41. uint8_t yellow[4];
  42. uint8_t red[4];
  43. uint8_t green[4];
  44. uint8_t bg[4];
  45. } GraphMonitorContext;
  46. enum {
  47. MODE_QUEUE = 1 << 0,
  48. MODE_FCIN = 1 << 1,
  49. MODE_FCOUT = 1 << 2,
  50. MODE_PTS = 1 << 3,
  51. MODE_TIME = 1 << 4,
  52. MODE_TB = 1 << 5,
  53. MODE_FMT = 1 << 6,
  54. MODE_SIZE = 1 << 7,
  55. MODE_RATE = 1 << 8,
  56. };
  57. #define OFFSET(x) offsetof(GraphMonitorContext, x)
  58. #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  59. static const AVOption graphmonitor_options[] = {
  60. { "size", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
  61. { "s", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
  62. { "opacity", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VF },
  63. { "o", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VF },
  64. { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" },
  65. { "m", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" },
  66. { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, VF, "mode" },
  67. { "compact", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, VF, "mode" },
  68. { "flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=MODE_QUEUE}, 0, INT_MAX, VF, "flags" },
  69. { "f", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=MODE_QUEUE}, 0, INT_MAX, VF, "flags" },
  70. { "queue", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_QUEUE}, 0, 0, VF, "flags" },
  71. { "frame_count_in", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FCOUT}, 0, 0, VF, "flags" },
  72. { "frame_count_out", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FCIN}, 0, 0, VF, "flags" },
  73. { "pts", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_PTS}, 0, 0, VF, "flags" },
  74. { "time", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_TIME}, 0, 0, VF, "flags" },
  75. { "timebase", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_TB}, 0, 0, VF, "flags" },
  76. { "format", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FMT}, 0, 0, VF, "flags" },
  77. { "size", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_SIZE}, 0, 0, VF, "flags" },
  78. { "rate", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_RATE}, 0, 0, VF, "flags" },
  79. { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
  80. { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
  81. { NULL }
  82. };
  83. static int query_formats(AVFilterContext *ctx)
  84. {
  85. AVFilterLink *outlink = ctx->outputs[0];
  86. static const enum AVPixelFormat pix_fmts[] = {
  87. AV_PIX_FMT_RGBA,
  88. AV_PIX_FMT_NONE
  89. };
  90. int ret;
  91. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  92. if ((ret = ff_formats_ref(fmts_list, &outlink->in_formats)) < 0)
  93. return ret;
  94. return 0;
  95. }
  96. static void clear_image(GraphMonitorContext *s, AVFrame *out, AVFilterLink *outlink)
  97. {
  98. int bg = AV_RN32(s->bg);
  99. for (int i = 0; i < out->height; i++)
  100. for (int j = 0; j < out->width; j++)
  101. AV_WN32(out->data[0] + i * out->linesize[0] + j * 4, bg);
  102. }
  103. static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint8_t *color)
  104. {
  105. const uint8_t *font;
  106. int font_height;
  107. int i;
  108. font = avpriv_cga_font, font_height = 8;
  109. if (y + 8 >= pic->height ||
  110. x + strlen(txt) * 8 >= pic->width)
  111. return;
  112. for (i = 0; txt[i]; i++) {
  113. int char_y, mask;
  114. uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8)*4;
  115. for (char_y = 0; char_y < font_height; char_y++) {
  116. for (mask = 0x80; mask; mask >>= 1) {
  117. if (font[txt[i] * font_height + char_y] & mask) {
  118. p[0] = color[0];
  119. p[1] = color[1];
  120. p[2] = color[2];
  121. }
  122. p += 4;
  123. }
  124. p += pic->linesize[0] - 8 * 4;
  125. }
  126. }
  127. }
  128. static int filter_have_queued(AVFilterContext *filter)
  129. {
  130. for (int j = 0; j < filter->nb_inputs; j++) {
  131. AVFilterLink *l = filter->inputs[j];
  132. size_t frames = ff_inlink_queued_frames(l);
  133. if (frames)
  134. return 1;
  135. }
  136. for (int j = 0; j < filter->nb_outputs; j++) {
  137. AVFilterLink *l = filter->outputs[j];
  138. size_t frames = ff_inlink_queued_frames(l);
  139. if (frames)
  140. return 1;
  141. }
  142. return 0;
  143. }
  144. static void draw_items(AVFilterContext *ctx, AVFrame *out,
  145. int xpos, int ypos,
  146. AVFilterLink *l,
  147. size_t frames)
  148. {
  149. GraphMonitorContext *s = ctx->priv;
  150. char buffer[1024] = { 0 };
  151. if (s->flags & MODE_FMT) {
  152. if (l->type == AVMEDIA_TYPE_VIDEO) {
  153. snprintf(buffer, sizeof(buffer)-1, " | format: %s",
  154. av_get_pix_fmt_name(l->format));
  155. } else if (l->type == AVMEDIA_TYPE_AUDIO) {
  156. snprintf(buffer, sizeof(buffer)-1, " | format: %s",
  157. av_get_sample_fmt_name(l->format));
  158. }
  159. drawtext(out, xpos, ypos, buffer, s->white);
  160. xpos += strlen(buffer) * 8;
  161. }
  162. if (s->flags & MODE_SIZE) {
  163. if (l->type == AVMEDIA_TYPE_VIDEO) {
  164. snprintf(buffer, sizeof(buffer)-1, " | size: %dx%d", l->w, l->h);
  165. } else if (l->type == AVMEDIA_TYPE_AUDIO) {
  166. snprintf(buffer, sizeof(buffer)-1, " | channels: %d", l->channels);
  167. }
  168. drawtext(out, xpos, ypos, buffer, s->white);
  169. xpos += strlen(buffer) * 8;
  170. }
  171. if (s->flags & MODE_RATE) {
  172. if (l->type == AVMEDIA_TYPE_VIDEO) {
  173. snprintf(buffer, sizeof(buffer)-1, " | fps: %d/%d", l->frame_rate.num, l->frame_rate.den);
  174. } else if (l->type == AVMEDIA_TYPE_AUDIO) {
  175. snprintf(buffer, sizeof(buffer)-1, " | samplerate: %d", l->sample_rate);
  176. }
  177. drawtext(out, xpos, ypos, buffer, s->white);
  178. xpos += strlen(buffer) * 8;
  179. }
  180. if (s->flags & MODE_TB) {
  181. snprintf(buffer, sizeof(buffer)-1, " | tb: %d/%d", l->time_base.num, l->time_base.den);
  182. drawtext(out, xpos, ypos, buffer, s->white);
  183. xpos += strlen(buffer) * 8;
  184. }
  185. if (s->flags & MODE_QUEUE) {
  186. snprintf(buffer, sizeof(buffer)-1, " | queue: ");
  187. drawtext(out, xpos, ypos, buffer, s->white);
  188. xpos += strlen(buffer) * 8;
  189. snprintf(buffer, sizeof(buffer)-1, "%"SIZE_SPECIFIER, frames);
  190. drawtext(out, xpos, ypos, buffer, frames > 0 ? frames >= 10 ? frames >= 50 ? s->red : s->yellow : s->green : s->white);
  191. xpos += strlen(buffer) * 8;
  192. }
  193. if (s->flags & MODE_FCIN) {
  194. snprintf(buffer, sizeof(buffer)-1, " | in: %"PRId64, l->frame_count_in);
  195. drawtext(out, xpos, ypos, buffer, s->white);
  196. xpos += strlen(buffer) * 8;
  197. }
  198. if (s->flags & MODE_FCOUT) {
  199. snprintf(buffer, sizeof(buffer)-1, " | out: %"PRId64, l->frame_count_out);
  200. drawtext(out, xpos, ypos, buffer, s->white);
  201. xpos += strlen(buffer) * 8;
  202. }
  203. if (s->flags & MODE_PTS) {
  204. snprintf(buffer, sizeof(buffer)-1, " | pts: %s", av_ts2str(l->current_pts_us));
  205. drawtext(out, xpos, ypos, buffer, s->white);
  206. xpos += strlen(buffer) * 8;
  207. }
  208. if (s->flags & MODE_TIME) {
  209. snprintf(buffer, sizeof(buffer)-1, " | time: %s", av_ts2timestr(l->current_pts_us, &AV_TIME_BASE_Q));
  210. drawtext(out, xpos, ypos, buffer, s->white);
  211. xpos += strlen(buffer) * 8;
  212. }
  213. }
  214. static int create_frame(AVFilterContext *ctx, int64_t pts)
  215. {
  216. GraphMonitorContext *s = ctx->priv;
  217. AVFilterLink *outlink = ctx->outputs[0];
  218. AVFrame *out;
  219. int xpos, ypos = 0;
  220. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  221. if (!out)
  222. return AVERROR(ENOMEM);
  223. clear_image(s, out, outlink);
  224. for (int i = 0; i < ctx->graph->nb_filters; i++) {
  225. AVFilterContext *filter = ctx->graph->filters[i];
  226. char buffer[1024] = { 0 };
  227. if (s->mode && !filter_have_queued(filter))
  228. continue;
  229. xpos = 0;
  230. drawtext(out, xpos, ypos, filter->name, s->white);
  231. xpos += strlen(filter->name) * 8 + 10;
  232. drawtext(out, xpos, ypos, filter->filter->name, s->white);
  233. ypos += 10;
  234. for (int j = 0; j < filter->nb_inputs; j++) {
  235. AVFilterLink *l = filter->inputs[j];
  236. size_t frames = ff_inlink_queued_frames(l);
  237. if (s->mode && !frames)
  238. continue;
  239. xpos = 10;
  240. snprintf(buffer, sizeof(buffer)-1, "in%d: ", j);
  241. drawtext(out, xpos, ypos, buffer, s->white);
  242. xpos += strlen(buffer) * 8;
  243. drawtext(out, xpos, ypos, l->src->name, s->white);
  244. xpos += strlen(l->src->name) * 8 + 10;
  245. draw_items(ctx, out, xpos, ypos, l, frames);
  246. ypos += 10;
  247. }
  248. ypos += 2;
  249. for (int j = 0; j < filter->nb_outputs; j++) {
  250. AVFilterLink *l = filter->outputs[j];
  251. size_t frames = ff_inlink_queued_frames(l);
  252. if (s->mode && !frames)
  253. continue;
  254. xpos = 10;
  255. snprintf(buffer, sizeof(buffer)-1, "out%d: ", j);
  256. drawtext(out, xpos, ypos, buffer, s->white);
  257. xpos += strlen(buffer) * 8;
  258. drawtext(out, xpos, ypos, l->dst->name, s->white);
  259. xpos += strlen(l->dst->name) * 8 + 10;
  260. draw_items(ctx, out, xpos, ypos, l, frames);
  261. ypos += 10;
  262. }
  263. ypos += 5;
  264. }
  265. out->pts = pts;
  266. s->pts = pts;
  267. return ff_filter_frame(outlink, out);
  268. }
  269. static int activate(AVFilterContext *ctx)
  270. {
  271. GraphMonitorContext *s = ctx->priv;
  272. AVFilterLink *inlink = ctx->inputs[0];
  273. AVFilterLink *outlink = ctx->outputs[0];
  274. int64_t pts = AV_NOPTS_VALUE;
  275. FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
  276. if (ff_inlink_queued_frames(inlink)) {
  277. AVFrame *frame = NULL;
  278. int ret;
  279. ret = ff_inlink_consume_frame(inlink, &frame);
  280. if (ret < 0)
  281. return ret;
  282. if (ret > 0) {
  283. pts = frame->pts;
  284. av_frame_free(&frame);
  285. }
  286. }
  287. if (pts != AV_NOPTS_VALUE) {
  288. pts = av_rescale_q(pts, inlink->time_base, outlink->time_base);
  289. if (s->pts < pts && ff_outlink_frame_wanted(outlink))
  290. return create_frame(ctx, pts);
  291. }
  292. FF_FILTER_FORWARD_STATUS(inlink, outlink);
  293. FF_FILTER_FORWARD_WANTED(outlink, inlink);
  294. return FFERROR_NOT_READY;
  295. }
  296. static int config_output(AVFilterLink *outlink)
  297. {
  298. GraphMonitorContext *s = outlink->src->priv;
  299. s->bg[3] = 255 * s->opacity;
  300. s->white[0] = s->white[1] = s->white[2] = 255;
  301. s->yellow[0] = s->yellow[1] = 255;
  302. s->red[0] = 255;
  303. s->green[1] = 255;
  304. outlink->w = s->w;
  305. outlink->h = s->h;
  306. outlink->sample_aspect_ratio = (AVRational){1,1};
  307. outlink->frame_rate = s->frame_rate;
  308. outlink->time_base = av_inv_q(s->frame_rate);
  309. return 0;
  310. }
  311. #if CONFIG_GRAPHMONITOR_FILTER
  312. AVFILTER_DEFINE_CLASS(graphmonitor);
  313. static const AVFilterPad graphmonitor_inputs[] = {
  314. {
  315. .name = "default",
  316. .type = AVMEDIA_TYPE_VIDEO,
  317. },
  318. { NULL }
  319. };
  320. static const AVFilterPad graphmonitor_outputs[] = {
  321. {
  322. .name = "default",
  323. .type = AVMEDIA_TYPE_VIDEO,
  324. .config_props = config_output,
  325. },
  326. { NULL }
  327. };
  328. AVFilter ff_vf_graphmonitor = {
  329. .name = "graphmonitor",
  330. .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
  331. .priv_size = sizeof(GraphMonitorContext),
  332. .priv_class = &graphmonitor_class,
  333. .query_formats = query_formats,
  334. .activate = activate,
  335. .inputs = graphmonitor_inputs,
  336. .outputs = graphmonitor_outputs,
  337. };
  338. #endif // CONFIG_GRAPHMONITOR_FILTER
  339. #if CONFIG_AGRAPHMONITOR_FILTER
  340. #define agraphmonitor_options graphmonitor_options
  341. AVFILTER_DEFINE_CLASS(agraphmonitor);
  342. static const AVFilterPad agraphmonitor_inputs[] = {
  343. {
  344. .name = "default",
  345. .type = AVMEDIA_TYPE_AUDIO,
  346. },
  347. { NULL }
  348. };
  349. static const AVFilterPad agraphmonitor_outputs[] = {
  350. {
  351. .name = "default",
  352. .type = AVMEDIA_TYPE_VIDEO,
  353. .config_props = config_output,
  354. },
  355. { NULL }
  356. };
  357. AVFilter ff_avf_agraphmonitor = {
  358. .name = "agraphmonitor",
  359. .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
  360. .priv_size = sizeof(GraphMonitorContext),
  361. .priv_class = &agraphmonitor_class,
  362. .query_formats = query_formats,
  363. .activate = activate,
  364. .inputs = agraphmonitor_inputs,
  365. .outputs = agraphmonitor_outputs,
  366. };
  367. #endif // CONFIG_AGRAPHMONITOR_FILTER