2
0

lookahead.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. /*
  2. * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <assert.h>
  11. #include <stdlib.h>
  12. #include "vpx_config.h"
  13. #include "lookahead.h"
  14. #include "vp8/common/extend.h"
  15. #define MAX_LAG_BUFFERS (CONFIG_REALTIME_ONLY ? 1 : 25)
  16. struct lookahead_ctx {
  17. unsigned int max_sz; /* Absolute size of the queue */
  18. unsigned int sz; /* Number of buffers currently in the queue */
  19. unsigned int read_idx; /* Read index */
  20. unsigned int write_idx; /* Write index */
  21. struct lookahead_entry *buf; /* Buffer list */
  22. };
  23. /* Return the buffer at the given absolute index and increment the index */
  24. static struct lookahead_entry *pop(struct lookahead_ctx *ctx,
  25. unsigned int *idx) {
  26. unsigned int index = *idx;
  27. struct lookahead_entry *buf = ctx->buf + index;
  28. assert(index < ctx->max_sz);
  29. if (++index >= ctx->max_sz) index -= ctx->max_sz;
  30. *idx = index;
  31. return buf;
  32. }
  33. void vp8_lookahead_destroy(struct lookahead_ctx *ctx) {
  34. if (ctx) {
  35. if (ctx->buf) {
  36. unsigned int i;
  37. for (i = 0; i < ctx->max_sz; ++i) {
  38. vp8_yv12_de_alloc_frame_buffer(&ctx->buf[i].img);
  39. }
  40. free(ctx->buf);
  41. }
  42. free(ctx);
  43. }
  44. }
  45. struct lookahead_ctx *vp8_lookahead_init(unsigned int width,
  46. unsigned int height,
  47. unsigned int depth) {
  48. struct lookahead_ctx *ctx = NULL;
  49. unsigned int i;
  50. /* Clamp the lookahead queue depth */
  51. if (depth < 1) {
  52. depth = 1;
  53. } else if (depth > MAX_LAG_BUFFERS) {
  54. depth = MAX_LAG_BUFFERS;
  55. }
  56. /* Keep last frame in lookahead buffer by increasing depth by 1.*/
  57. depth += 1;
  58. /* Align the buffer dimensions */
  59. width = (width + 15) & ~15;
  60. height = (height + 15) & ~15;
  61. /* Allocate the lookahead structures */
  62. ctx = calloc(1, sizeof(*ctx));
  63. if (ctx) {
  64. ctx->max_sz = depth;
  65. ctx->buf = calloc(depth, sizeof(*ctx->buf));
  66. if (!ctx->buf) goto bail;
  67. for (i = 0; i < depth; ++i) {
  68. if (vp8_yv12_alloc_frame_buffer(&ctx->buf[i].img, width, height,
  69. VP8BORDERINPIXELS)) {
  70. goto bail;
  71. }
  72. }
  73. }
  74. return ctx;
  75. bail:
  76. vp8_lookahead_destroy(ctx);
  77. return NULL;
  78. }
  79. int vp8_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
  80. int64_t ts_start, int64_t ts_end, unsigned int flags,
  81. unsigned char *active_map) {
  82. struct lookahead_entry *buf;
  83. int row, col, active_end;
  84. int mb_rows = (src->y_height + 15) >> 4;
  85. int mb_cols = (src->y_width + 15) >> 4;
  86. if (ctx->sz + 2 > ctx->max_sz) return 1;
  87. ctx->sz++;
  88. buf = pop(ctx, &ctx->write_idx);
  89. /* Only do this partial copy if the following conditions are all met:
  90. * 1. Lookahead queue has has size of 1.
  91. * 2. Active map is provided.
  92. * 3. This is not a key frame, golden nor altref frame.
  93. */
  94. if (ctx->max_sz == 1 && active_map && !flags) {
  95. for (row = 0; row < mb_rows; ++row) {
  96. col = 0;
  97. while (1) {
  98. /* Find the first active macroblock in this row. */
  99. for (; col < mb_cols; ++col) {
  100. if (active_map[col]) break;
  101. }
  102. /* No more active macroblock in this row. */
  103. if (col == mb_cols) break;
  104. /* Find the end of active region in this row. */
  105. active_end = col;
  106. for (; active_end < mb_cols; ++active_end) {
  107. if (!active_map[active_end]) break;
  108. }
  109. /* Only copy this active region. */
  110. vp8_copy_and_extend_frame_with_rect(src, &buf->img, row << 4, col << 4,
  111. 16, (active_end - col) << 4);
  112. /* Start again from the end of this active region. */
  113. col = active_end;
  114. }
  115. active_map += mb_cols;
  116. }
  117. } else {
  118. vp8_copy_and_extend_frame(src, &buf->img);
  119. }
  120. buf->ts_start = ts_start;
  121. buf->ts_end = ts_end;
  122. buf->flags = flags;
  123. return 0;
  124. }
  125. struct lookahead_entry *vp8_lookahead_pop(struct lookahead_ctx *ctx,
  126. int drain) {
  127. struct lookahead_entry *buf = NULL;
  128. assert(ctx != NULL);
  129. if (ctx->sz && (drain || ctx->sz == ctx->max_sz - 1)) {
  130. buf = pop(ctx, &ctx->read_idx);
  131. ctx->sz--;
  132. }
  133. return buf;
  134. }
  135. struct lookahead_entry *vp8_lookahead_peek(struct lookahead_ctx *ctx,
  136. unsigned int index, int direction) {
  137. struct lookahead_entry *buf = NULL;
  138. if (direction == PEEK_FORWARD) {
  139. assert(index < ctx->max_sz - 1);
  140. if (index < ctx->sz) {
  141. index += ctx->read_idx;
  142. if (index >= ctx->max_sz) index -= ctx->max_sz;
  143. buf = ctx->buf + index;
  144. }
  145. } else if (direction == PEEK_BACKWARD) {
  146. assert(index == 1);
  147. if (ctx->read_idx == 0) {
  148. index = ctx->max_sz - 1;
  149. } else {
  150. index = ctx->read_idx - index;
  151. }
  152. buf = ctx->buf + index;
  153. }
  154. return buf;
  155. }
  156. unsigned int vp8_lookahead_depth(struct lookahead_ctx *ctx) { return ctx->sz; }