apr_rmm.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446
  1. /* Copyright 2000-2005 The Apache Software Foundation or its licensors, as
  2. * applicable.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "apr_general.h"
  17. #include "apr_rmm.h"
  18. #include "apr_errno.h"
  19. #include "apr_lib.h"
  20. #include "apr_strings.h"
  21. /* The RMM region is made up of two doubly-linked-list of blocks; the
  22. * list of used blocks, and the list of free blocks (either list may
  23. * be empty). The base pointer, rmm->base, points at the beginning of
  24. * the shmem region in use. Each block is addressable by an
  25. * apr_rmm_off_t value, which represents the offset from the base
  26. * pointer. The term "address" is used here to mean such a value; an
  27. * "offset from rmm->base".
  28. *
  29. * The RMM region contains exactly one "rmm_hdr_block_t" structure,
  30. * the "header block", which is always stored at the base pointer.
  31. * The firstused field in this structure is the address of the first
  32. * block in the "used blocks" list; the firstfree field is the address
  33. * of the first block in the "free blocks" list.
  34. *
  35. * Each block is prefixed by an "rmm_block_t" structure, followed by
  36. * the caller-usable region represented by the block. The next and
  37. * prev fields of the structure are zero if the block is at the end or
  38. * beginning of the linked-list respectively, or otherwise hold the
  39. * address of the next and previous blocks in the list. ("address 0",
  40. * i.e. rmm->base is *not* a valid address for a block, since the
  41. * header block is always stored at that address).
  42. *
  43. * At creation, the RMM region is initialized to hold a single block
  44. * on the free list representing the entire available shm segment
  45. * (minus header block); subsequent allocation and deallocation of
  46. * blocks involves splitting blocks and coalescing adjacent blocks,
  47. * and switching them between the free and used lists as
  48. * appropriate. */
  49. typedef struct rmm_block_t {
  50. apr_size_t size;
  51. apr_rmm_off_t prev;
  52. apr_rmm_off_t next;
  53. } rmm_block_t;
  54. /* Always at our apr_rmm_off(0):
  55. */
  56. typedef struct rmm_hdr_block_t {
  57. apr_size_t abssize;
  58. apr_rmm_off_t /* rmm_block_t */ firstused;
  59. apr_rmm_off_t /* rmm_block_t */ firstfree;
  60. } rmm_hdr_block_t;
  61. #define RMM_HDR_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_hdr_block_t)))
  62. #define RMM_BLOCK_SIZE (APR_ALIGN_DEFAULT(sizeof(rmm_block_t)))
  63. struct apr_rmm_t {
  64. apr_pool_t *p;
  65. rmm_hdr_block_t *base;
  66. apr_size_t size;
  67. apr_anylock_t lock;
  68. };
  69. static apr_rmm_off_t find_block_by_offset(apr_rmm_t *rmm, apr_rmm_off_t next,
  70. apr_rmm_off_t find, int includes)
  71. {
  72. apr_rmm_off_t prev = 0;
  73. while (next) {
  74. struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + next);
  75. if (find == next)
  76. return next;
  77. /* Overshot? */
  78. if (find < next)
  79. return includes ? prev : 0;
  80. prev = next;
  81. next = blk->next;
  82. }
  83. return includes ? prev : 0;
  84. }
  85. static apr_rmm_off_t find_block_of_size(apr_rmm_t *rmm, apr_size_t size)
  86. {
  87. apr_rmm_off_t next = rmm->base->firstfree;
  88. apr_rmm_off_t best = 0;
  89. apr_rmm_off_t bestsize = 0;
  90. while (next) {
  91. struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + next);
  92. if (blk->size == size)
  93. return next;
  94. if (blk->size >= size) {
  95. /* XXX: sub optimal algorithm
  96. * We need the most thorough best-fit logic, since we can
  97. * never grow our rmm, we are SOL when we hit the wall.
  98. */
  99. if (!bestsize || (blk->size < bestsize)) {
  100. bestsize = blk->size;
  101. best = next;
  102. }
  103. }
  104. next = blk->next;
  105. }
  106. if (bestsize > RMM_BLOCK_SIZE + size) {
  107. struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + best);
  108. struct rmm_block_t *new = (rmm_block_t*)((char*)rmm->base + best + size);
  109. new->size = blk->size - size;
  110. new->next = blk->next;
  111. new->prev = best;
  112. blk->size = size;
  113. blk->next = best + size;
  114. if (new->next) {
  115. blk = (rmm_block_t*)((char*)rmm->base + new->next);
  116. blk->prev = best + size;
  117. }
  118. }
  119. return best;
  120. }
  121. static void move_block(apr_rmm_t *rmm, apr_rmm_off_t this, int free)
  122. {
  123. struct rmm_block_t *blk = (rmm_block_t*)((char*)rmm->base + this);
  124. /* close the gap */
  125. if (blk->prev) {
  126. struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
  127. prev->next = blk->next;
  128. }
  129. else {
  130. if (free) {
  131. rmm->base->firstused = blk->next;
  132. }
  133. else {
  134. rmm->base->firstfree = blk->next;
  135. }
  136. }
  137. if (blk->next) {
  138. struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
  139. next->prev = blk->prev;
  140. }
  141. /* now find it in the other list, pushing it to the head if required */
  142. if (free) {
  143. blk->prev = find_block_by_offset(rmm, rmm->base->firstfree, this, 1);
  144. if (!blk->prev) {
  145. blk->next = rmm->base->firstfree;
  146. rmm->base->firstfree = this;
  147. }
  148. }
  149. else {
  150. blk->prev = find_block_by_offset(rmm, rmm->base->firstused, this, 1);
  151. if (!blk->prev) {
  152. blk->next = rmm->base->firstused;
  153. rmm->base->firstused = this;
  154. }
  155. }
  156. /* and open it up */
  157. if (blk->prev) {
  158. struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
  159. if (free && (blk->prev + prev->size == this)) {
  160. /* Collapse us into our predecessor */
  161. prev->size += blk->size;
  162. this = blk->prev;
  163. blk = prev;
  164. }
  165. else {
  166. blk->next = prev->next;
  167. prev->next = this;
  168. }
  169. }
  170. if (blk->next) {
  171. struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
  172. if (free && (this + blk->size == blk->next)) {
  173. /* Collapse us into our successor */
  174. blk->size += next->size;
  175. blk->next = next->next;
  176. if (blk->next) {
  177. next = (rmm_block_t*)((char*)rmm->base + blk->next);
  178. next->prev = this;
  179. }
  180. }
  181. else {
  182. next->prev = this;
  183. }
  184. }
  185. }
  186. APU_DECLARE(apr_status_t) apr_rmm_init(apr_rmm_t **rmm, apr_anylock_t *lock,
  187. void *base, apr_size_t size,
  188. apr_pool_t *p)
  189. {
  190. apr_status_t rv;
  191. rmm_block_t *blk;
  192. apr_anylock_t nulllock;
  193. if (!lock) {
  194. nulllock.type = apr_anylock_none;
  195. nulllock.lock.pm = NULL;
  196. lock = &nulllock;
  197. }
  198. if ((rv = APR_ANYLOCK_LOCK(lock)) != APR_SUCCESS)
  199. return rv;
  200. (*rmm) = (apr_rmm_t *)apr_pcalloc(p, sizeof(apr_rmm_t));
  201. (*rmm)->p = p;
  202. (*rmm)->base = base;
  203. (*rmm)->size = size;
  204. (*rmm)->lock = *lock;
  205. (*rmm)->base->abssize = size;
  206. (*rmm)->base->firstused = 0;
  207. (*rmm)->base->firstfree = RMM_HDR_BLOCK_SIZE;
  208. blk = (rmm_block_t *)((char*)base + (*rmm)->base->firstfree);
  209. blk->size = size - (*rmm)->base->firstfree;
  210. blk->prev = 0;
  211. blk->next = 0;
  212. return APR_ANYLOCK_UNLOCK(lock);
  213. }
  214. APU_DECLARE(apr_status_t) apr_rmm_destroy(apr_rmm_t *rmm)
  215. {
  216. apr_status_t rv;
  217. rmm_block_t *blk;
  218. if ((rv = APR_ANYLOCK_LOCK(&rmm->lock)) != APR_SUCCESS) {
  219. return rv;
  220. }
  221. /* Blast it all --- no going back :) */
  222. if (rmm->base->firstused) {
  223. apr_rmm_off_t this = rmm->base->firstused;
  224. do {
  225. blk = (rmm_block_t *)((char*)rmm->base + this);
  226. this = blk->next;
  227. blk->next = blk->prev = 0;
  228. } while (this);
  229. rmm->base->firstused = 0;
  230. }
  231. if (rmm->base->firstfree) {
  232. apr_rmm_off_t this = rmm->base->firstfree;
  233. do {
  234. blk = (rmm_block_t *)((char*)rmm->base + this);
  235. this = blk->next;
  236. blk->next = blk->prev = 0;
  237. } while (this);
  238. rmm->base->firstfree = 0;
  239. }
  240. rmm->base->abssize = 0;
  241. rmm->size = 0;
  242. return APR_ANYLOCK_UNLOCK(&rmm->lock);
  243. }
  244. APU_DECLARE(apr_status_t) apr_rmm_attach(apr_rmm_t **rmm, apr_anylock_t *lock,
  245. void *base, apr_pool_t *p)
  246. {
  247. apr_anylock_t nulllock;
  248. if (!lock) {
  249. nulllock.type = apr_anylock_none;
  250. nulllock.lock.pm = NULL;
  251. lock = &nulllock;
  252. }
  253. /* sanity would be good here */
  254. (*rmm) = (apr_rmm_t *)apr_pcalloc(p, sizeof(apr_rmm_t));
  255. (*rmm)->p = p;
  256. (*rmm)->base = base;
  257. (*rmm)->size = (*rmm)->base->abssize;
  258. (*rmm)->lock = *lock;
  259. return APR_SUCCESS;
  260. }
  261. APU_DECLARE(apr_status_t) apr_rmm_detach(apr_rmm_t *rmm)
  262. {
  263. /* A noop until we introduce locked/refcounts */
  264. return APR_SUCCESS;
  265. }
  266. APU_DECLARE(apr_rmm_off_t) apr_rmm_malloc(apr_rmm_t *rmm, apr_size_t reqsize)
  267. {
  268. apr_rmm_off_t this;
  269. reqsize = APR_ALIGN_DEFAULT(reqsize) + RMM_BLOCK_SIZE;
  270. APR_ANYLOCK_LOCK(&rmm->lock);
  271. this = find_block_of_size(rmm, reqsize);
  272. if (this) {
  273. move_block(rmm, this, 0);
  274. this += RMM_BLOCK_SIZE;
  275. }
  276. APR_ANYLOCK_UNLOCK(&rmm->lock);
  277. return this;
  278. }
  279. APU_DECLARE(apr_rmm_off_t) apr_rmm_calloc(apr_rmm_t *rmm, apr_size_t reqsize)
  280. {
  281. apr_rmm_off_t this;
  282. reqsize = APR_ALIGN_DEFAULT(reqsize) + RMM_BLOCK_SIZE;
  283. APR_ANYLOCK_LOCK(&rmm->lock);
  284. this = find_block_of_size(rmm, reqsize);
  285. if (this) {
  286. move_block(rmm, this, 0);
  287. this += RMM_BLOCK_SIZE;
  288. memset((char*)rmm->base + this, 0, reqsize - RMM_BLOCK_SIZE);
  289. }
  290. APR_ANYLOCK_UNLOCK(&rmm->lock);
  291. return this;
  292. }
  293. APU_DECLARE(apr_rmm_off_t) apr_rmm_realloc(apr_rmm_t *rmm, void *entity,
  294. apr_size_t reqsize)
  295. {
  296. apr_rmm_off_t this;
  297. apr_rmm_off_t old;
  298. struct rmm_block_t *blk;
  299. apr_size_t oldsize;
  300. if (!entity) {
  301. return apr_rmm_malloc(rmm, reqsize);
  302. }
  303. reqsize = APR_ALIGN_DEFAULT(reqsize);
  304. old = apr_rmm_offset_get(rmm, entity);
  305. if ((this = apr_rmm_malloc(rmm, reqsize)) == 0) {
  306. return 0;
  307. }
  308. blk = (rmm_block_t*)((char*)rmm->base + old - RMM_BLOCK_SIZE);
  309. oldsize = blk->size;
  310. memcpy(apr_rmm_addr_get(rmm, this),
  311. apr_rmm_addr_get(rmm, old), oldsize < reqsize ? oldsize : reqsize);
  312. apr_rmm_free(rmm, old);
  313. return this;
  314. }
  315. APU_DECLARE(apr_status_t) apr_rmm_free(apr_rmm_t *rmm, apr_rmm_off_t this)
  316. {
  317. apr_status_t rv;
  318. struct rmm_block_t *blk;
  319. /* A little sanity check is always healthy, especially here.
  320. * If we really cared, we could make this compile-time
  321. */
  322. if (this < RMM_HDR_BLOCK_SIZE + RMM_BLOCK_SIZE) {
  323. return APR_EINVAL;
  324. }
  325. this -= RMM_BLOCK_SIZE;
  326. blk = (rmm_block_t*)((char*)rmm->base + this);
  327. if ((rv = APR_ANYLOCK_LOCK(&rmm->lock)) != APR_SUCCESS) {
  328. return rv;
  329. }
  330. if (blk->prev) {
  331. struct rmm_block_t *prev = (rmm_block_t*)((char*)rmm->base + blk->prev);
  332. if (prev->next != this) {
  333. APR_ANYLOCK_UNLOCK(&rmm->lock);
  334. return APR_EINVAL;
  335. }
  336. }
  337. else {
  338. if (rmm->base->firstused != this) {
  339. APR_ANYLOCK_UNLOCK(&rmm->lock);
  340. return APR_EINVAL;
  341. }
  342. }
  343. if (blk->next) {
  344. struct rmm_block_t *next = (rmm_block_t*)((char*)rmm->base + blk->next);
  345. if (next->prev != this) {
  346. APR_ANYLOCK_UNLOCK(&rmm->lock);
  347. return APR_EINVAL;
  348. }
  349. }
  350. /* Ok, it remained [apparently] sane, so unlink it
  351. */
  352. move_block(rmm, this, 1);
  353. return APR_ANYLOCK_UNLOCK(&rmm->lock);
  354. }
  355. APU_DECLARE(void *) apr_rmm_addr_get(apr_rmm_t *rmm, apr_rmm_off_t entity)
  356. {
  357. /* debug-sanity checking here would be good
  358. */
  359. return (void*)((char*)rmm->base + entity);
  360. }
  361. APU_DECLARE(apr_rmm_off_t) apr_rmm_offset_get(apr_rmm_t *rmm, void* entity)
  362. {
  363. /* debug, or always, sanity checking here would be good
  364. * since the primitive is apr_rmm_off_t, I don't mind penalizing
  365. * inverse conversions for safety, unless someone can prove that
  366. * there is no choice in some cases.
  367. */
  368. return ((char*)entity - (char*)rmm->base);
  369. }
  370. APU_DECLARE(apr_size_t) apr_rmm_overhead_get(int n)
  371. {
  372. /* overhead per block is at most APR_ALIGN_DEFAULT(1) wasted bytes
  373. * for alignment overhead, plus the size of the rmm_block_t
  374. * structure. */
  375. return RMM_HDR_BLOCK_SIZE + n * (RMM_BLOCK_SIZE + APR_ALIGN_DEFAULT(1));
  376. }