2
0

maxmemory.tcl 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. start_server {tags {"maxmemory"}} {
  2. test "Without maxmemory small integers are shared" {
  3. r config set maxmemory 0
  4. r set a 1
  5. assert {[r object refcount a] > 1}
  6. }
  7. test "With maxmemory and non-LRU policy integers are still shared" {
  8. r config set maxmemory 1073741824
  9. r config set maxmemory-policy allkeys-random
  10. r set a 1
  11. assert {[r object refcount a] > 1}
  12. }
  13. test "With maxmemory and LRU policy integers are not shared" {
  14. r config set maxmemory 1073741824
  15. r config set maxmemory-policy allkeys-lru
  16. r set a 1
  17. r config set maxmemory-policy volatile-lru
  18. r set b 1
  19. assert {[r object refcount a] == 1}
  20. assert {[r object refcount b] == 1}
  21. r config set maxmemory 0
  22. }
  23. foreach policy {
  24. allkeys-random allkeys-lru allkeys-lfu volatile-lru volatile-lfu volatile-random volatile-ttl
  25. } {
  26. test "maxmemory - is the memory limit honoured? (policy $policy)" {
  27. # make sure to start with a blank instance
  28. r flushall
  29. # Get the current memory limit and calculate a new limit.
  30. # We just add 100k to the current memory size so that it is
  31. # fast for us to reach that limit.
  32. set used [s used_memory]
  33. set limit [expr {$used+100*1024}]
  34. r config set maxmemory $limit
  35. r config set maxmemory-policy $policy
  36. # Now add keys until the limit is almost reached.
  37. set numkeys 0
  38. while 1 {
  39. r setex [randomKey] 10000 x
  40. incr numkeys
  41. if {[s used_memory]+4096 > $limit} {
  42. assert {$numkeys > 10}
  43. break
  44. }
  45. }
  46. # If we add the same number of keys already added again, we
  47. # should still be under the limit.
  48. for {set j 0} {$j < $numkeys} {incr j} {
  49. r setex [randomKey] 10000 x
  50. }
  51. assert {[s used_memory] < ($limit+4096)}
  52. }
  53. }
  54. foreach policy {
  55. allkeys-random allkeys-lru volatile-lru volatile-random volatile-ttl
  56. } {
  57. test "maxmemory - only allkeys-* should remove non-volatile keys ($policy)" {
  58. # make sure to start with a blank instance
  59. r flushall
  60. # Get the current memory limit and calculate a new limit.
  61. # We just add 100k to the current memory size so that it is
  62. # fast for us to reach that limit.
  63. set used [s used_memory]
  64. set limit [expr {$used+100*1024}]
  65. r config set maxmemory $limit
  66. r config set maxmemory-policy $policy
  67. # Now add keys until the limit is almost reached.
  68. set numkeys 0
  69. while 1 {
  70. r set [randomKey] x
  71. incr numkeys
  72. if {[s used_memory]+4096 > $limit} {
  73. assert {$numkeys > 10}
  74. break
  75. }
  76. }
  77. # If we add the same number of keys already added again and
  78. # the policy is allkeys-* we should still be under the limit.
  79. # Otherwise we should see an error reported by Redis.
  80. set err 0
  81. for {set j 0} {$j < $numkeys} {incr j} {
  82. if {[catch {r set [randomKey] x} e]} {
  83. if {[string match {*used memory*} $e]} {
  84. set err 1
  85. }
  86. }
  87. }
  88. if {[string match allkeys-* $policy]} {
  89. assert {[s used_memory] < ($limit+4096)}
  90. } else {
  91. assert {$err == 1}
  92. }
  93. }
  94. }
  95. foreach policy {
  96. volatile-lru volatile-lfu volatile-random volatile-ttl
  97. } {
  98. test "maxmemory - policy $policy should only remove volatile keys." {
  99. # make sure to start with a blank instance
  100. r flushall
  101. # Get the current memory limit and calculate a new limit.
  102. # We just add 100k to the current memory size so that it is
  103. # fast for us to reach that limit.
  104. set used [s used_memory]
  105. set limit [expr {$used+100*1024}]
  106. r config set maxmemory $limit
  107. r config set maxmemory-policy $policy
  108. # Now add keys until the limit is almost reached.
  109. set numkeys 0
  110. while 1 {
  111. # Odd keys are volatile
  112. # Even keys are non volatile
  113. if {$numkeys % 2} {
  114. r setex "key:$numkeys" 10000 x
  115. } else {
  116. r set "key:$numkeys" x
  117. }
  118. if {[s used_memory]+4096 > $limit} {
  119. assert {$numkeys > 10}
  120. break
  121. }
  122. incr numkeys
  123. }
  124. # Now we add the same number of volatile keys already added.
  125. # We expect Redis to evict only volatile keys in order to make
  126. # space.
  127. set err 0
  128. for {set j 0} {$j < $numkeys} {incr j} {
  129. catch {r setex "foo:$j" 10000 x}
  130. }
  131. # We should still be under the limit.
  132. assert {[s used_memory] < ($limit+4096)}
  133. # However all our non volatile keys should be here.
  134. for {set j 0} {$j < $numkeys} {incr j 2} {
  135. assert {[r exists "key:$j"]}
  136. }
  137. }
  138. }
  139. }
  140. proc test_slave_buffers {test_name cmd_count payload_len limit_memory pipeline} {
  141. start_server {tags {"maxmemory"}} {
  142. start_server {} {
  143. set slave_pid [s process_id]
  144. test "$test_name" {
  145. set slave [srv 0 client]
  146. set slave_host [srv 0 host]
  147. set slave_port [srv 0 port]
  148. set master [srv -1 client]
  149. set master_host [srv -1 host]
  150. set master_port [srv -1 port]
  151. # add 100 keys of 100k (10MB total)
  152. for {set j 0} {$j < 100} {incr j} {
  153. $master setrange "key:$j" 100000 asdf
  154. }
  155. # make sure master doesn't disconnect slave because of timeout
  156. $master config set repl-timeout 1200 ;# 20 minutes (for valgrind and slow machines)
  157. $master config set maxmemory-policy allkeys-random
  158. $master config set client-output-buffer-limit "replica 100000000 100000000 300"
  159. $master config set repl-backlog-size [expr {10*1024}]
  160. $slave slaveof $master_host $master_port
  161. wait_for_condition 50 100 {
  162. [s 0 master_link_status] eq {up}
  163. } else {
  164. fail "Replication not started."
  165. }
  166. # measure used memory after the slave connected and set maxmemory
  167. set orig_used [s -1 used_memory]
  168. set orig_client_buf [s -1 mem_clients_normal]
  169. set orig_mem_not_counted_for_evict [s -1 mem_not_counted_for_evict]
  170. set orig_used_no_repl [expr {$orig_used - $orig_mem_not_counted_for_evict}]
  171. set limit [expr {$orig_used - $orig_mem_not_counted_for_evict + 20*1024}]
  172. if {$limit_memory==1} {
  173. $master config set maxmemory $limit
  174. }
  175. # put the slave to sleep
  176. set rd_slave [redis_deferring_client]
  177. exec kill -SIGSTOP $slave_pid
  178. # send some 10mb worth of commands that don't increase the memory usage
  179. if {$pipeline == 1} {
  180. set rd_master [redis_deferring_client -1]
  181. for {set k 0} {$k < $cmd_count} {incr k} {
  182. $rd_master setrange key:0 0 [string repeat A $payload_len]
  183. }
  184. for {set k 0} {$k < $cmd_count} {incr k} {
  185. #$rd_master read
  186. }
  187. } else {
  188. for {set k 0} {$k < $cmd_count} {incr k} {
  189. $master setrange key:0 0 [string repeat A $payload_len]
  190. }
  191. }
  192. set new_used [s -1 used_memory]
  193. set slave_buf [s -1 mem_clients_slaves]
  194. set client_buf [s -1 mem_clients_normal]
  195. set mem_not_counted_for_evict [s -1 mem_not_counted_for_evict]
  196. set used_no_repl [expr {$new_used - $mem_not_counted_for_evict}]
  197. set delta [expr {($used_no_repl - $client_buf) - ($orig_used_no_repl - $orig_client_buf)}]
  198. assert {[$master dbsize] == 100}
  199. assert {$slave_buf > 2*1024*1024} ;# some of the data may have been pushed to the OS buffers
  200. set delta_max [expr {$cmd_count / 2}] ;# 1 byte unaccounted for, with 1M commands will consume some 1MB
  201. assert {$delta < $delta_max && $delta > -$delta_max}
  202. $master client kill type slave
  203. set killed_used [s -1 used_memory]
  204. set killed_slave_buf [s -1 mem_clients_slaves]
  205. set killed_mem_not_counted_for_evict [s -1 mem_not_counted_for_evict]
  206. set killed_used_no_repl [expr {$killed_used - $killed_mem_not_counted_for_evict}]
  207. set delta_no_repl [expr {$killed_used_no_repl - $used_no_repl}]
  208. assert {$killed_slave_buf == 0}
  209. assert {$delta_no_repl > -$delta_max && $delta_no_repl < $delta_max}
  210. }
  211. # unfreeze slave process (after the 'test' succeeded or failed, but before we attempt to terminate the server
  212. exec kill -SIGCONT $slave_pid
  213. }
  214. }
  215. }
  216. # test that slave buffer are counted correctly
  217. # we wanna use many small commands, and we don't wanna wait long
  218. # so we need to use a pipeline (redis_deferring_client)
  219. # that may cause query buffer to fill and induce eviction, so we disable it
  220. test_slave_buffers {slave buffer are counted correctly} 1000000 10 0 1
  221. # test that slave buffer don't induce eviction
  222. # test again with fewer (and bigger) commands without pipeline, but with eviction
  223. test_slave_buffers "replica buffer don't induce eviction" 100000 100 1 0