rdb.tcl 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. tags {"rdb external:skip"} {
  2. set server_path [tmpdir "server.rdb-encoding-test"]
  3. # Copy RDB with different encodings in server path
  4. exec cp tests/assets/encodings.rdb $server_path
  5. start_server [list overrides [list "dir" $server_path "dbfilename" "encodings.rdb"]] {
  6. test "RDB encoding loading test" {
  7. r select 0
  8. csvdump r
  9. } {"0","compressible","string","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
  10. "0","hash","hash","a","1","aa","10","aaa","100","b","2","bb","20","bbb","200","c","3","cc","30","ccc","300","ddd","400","eee","5000000000",
  11. "0","hash_zipped","hash","a","1","b","2","c","3",
  12. "0","list","list","1","2","3","a","b","c","100000","6000000000","1","2","3","a","b","c","100000","6000000000","1","2","3","a","b","c","100000","6000000000",
  13. "0","list_zipped","list","1","2","3","a","b","c","100000","6000000000",
  14. "0","number","string","10"
  15. "0","set","set","1","100000","2","3","6000000000","a","b","c",
  16. "0","set_zipped_1","set","1","2","3","4",
  17. "0","set_zipped_2","set","100000","200000","300000","400000",
  18. "0","set_zipped_3","set","1000000000","2000000000","3000000000","4000000000","5000000000","6000000000",
  19. "0","string","string","Hello World"
  20. "0","zset","zset","a","1","b","2","c","3","aa","10","bb","20","cc","30","aaa","100","bbb","200","ccc","300","aaaa","1000","cccc","123456789","bbbb","5000000000",
  21. "0","zset_zipped","zset","a","1","b","2","c","3",
  22. }
  23. }
  24. set server_path [tmpdir "server.rdb-startup-test"]
  25. start_server [list overrides [list "dir" $server_path] keep_persistence true] {
  26. test {Server started empty with non-existing RDB file} {
  27. r debug digest
  28. } {0000000000000000000000000000000000000000}
  29. # Save an RDB file, needed for the next test.
  30. r save
  31. }
  32. start_server [list overrides [list "dir" $server_path] keep_persistence true] {
  33. test {Server started empty with empty RDB file} {
  34. r debug digest
  35. } {0000000000000000000000000000000000000000}
  36. }
  37. start_server [list overrides [list "dir" $server_path] keep_persistence true] {
  38. test {Test RDB stream encoding} {
  39. for {set j 0} {$j < 1000} {incr j} {
  40. if {rand() < 0.9} {
  41. r xadd stream * foo abc
  42. } else {
  43. r xadd stream * bar $j
  44. }
  45. }
  46. r xgroup create stream mygroup 0
  47. set records [r xreadgroup GROUP mygroup Alice COUNT 2 STREAMS stream >]
  48. r xdel stream [lindex [lindex [lindex [lindex $records 0] 1] 1] 0]
  49. r xack stream mygroup [lindex [lindex [lindex [lindex $records 0] 1] 0] 0]
  50. set digest [r debug digest]
  51. r config set sanitize-dump-payload no
  52. r debug reload
  53. set newdigest [r debug digest]
  54. assert {$digest eq $newdigest}
  55. }
  56. test {Test RDB stream encoding - sanitize dump} {
  57. r config set sanitize-dump-payload yes
  58. r debug reload
  59. set newdigest [r debug digest]
  60. assert {$digest eq $newdigest}
  61. }
  62. # delete the stream, maybe valgrind will find something
  63. r del stream
  64. }
  65. # Helper function to start a server and kill it, just to check the error
  66. # logged.
  67. set defaults {}
  68. proc start_server_and_kill_it {overrides code} {
  69. upvar defaults defaults srv srv server_path server_path
  70. set config [concat $defaults $overrides]
  71. set srv [start_server [list overrides $config keep_persistence true]]
  72. uplevel 1 $code
  73. kill_server $srv
  74. }
  75. # Make the RDB file unreadable
  76. file attributes [file join $server_path dump.rdb] -permissions 0222
  77. # Detect root account (it is able to read the file even with 002 perm)
  78. set isroot 0
  79. catch {
  80. open [file join $server_path dump.rdb]
  81. set isroot 1
  82. }
  83. # Now make sure the server aborted with an error
  84. if {!$isroot} {
  85. start_server_and_kill_it [list "dir" $server_path] {
  86. test {Server should not start if RDB file can't be open} {
  87. wait_for_condition 50 100 {
  88. [string match {*Fatal error loading*} \
  89. [exec tail -1 < [dict get $srv stdout]]]
  90. } else {
  91. fail "Server started even if RDB was unreadable!"
  92. }
  93. }
  94. }
  95. }
  96. # Fix permissions of the RDB file.
  97. file attributes [file join $server_path dump.rdb] -permissions 0666
  98. # Corrupt its CRC64 checksum.
  99. set filesize [file size [file join $server_path dump.rdb]]
  100. set fd [open [file join $server_path dump.rdb] r+]
  101. fconfigure $fd -translation binary
  102. seek $fd -8 end
  103. puts -nonewline $fd "foobar00"; # Corrupt the checksum
  104. close $fd
  105. # Now make sure the server aborted with an error
  106. start_server_and_kill_it [list "dir" $server_path] {
  107. test {Server should not start if RDB is corrupted} {
  108. wait_for_condition 50 100 {
  109. [string match {*CRC error*} \
  110. [exec tail -10 < [dict get $srv stdout]]]
  111. } else {
  112. fail "Server started even if RDB was corrupted!"
  113. }
  114. }
  115. }
  116. start_server {} {
  117. test {Test FLUSHALL aborts bgsave} {
  118. # 1000 keys with 1ms sleep per key should take 1 second
  119. r config set rdb-key-save-delay 1000
  120. r debug populate 1000
  121. r bgsave
  122. assert_equal [s rdb_bgsave_in_progress] 1
  123. r flushall
  124. # wait half a second max
  125. wait_for_condition 5 100 {
  126. [s rdb_bgsave_in_progress] == 0
  127. } else {
  128. fail "bgsave not aborted"
  129. }
  130. # veirfy that bgsave failed, by checking that the change counter is still high
  131. assert_lessthan 999 [s rdb_changes_since_last_save]
  132. # make sure the server is still writable
  133. r set x xx
  134. }
  135. test {bgsave resets the change counter} {
  136. r config set rdb-key-save-delay 0
  137. r bgsave
  138. wait_for_condition 50 100 {
  139. [s rdb_bgsave_in_progress] == 0
  140. } else {
  141. fail "bgsave not done"
  142. }
  143. assert_equal [s rdb_changes_since_last_save] 0
  144. }
  145. }
  146. test {client freed during loading} {
  147. start_server [list overrides [list key-load-delay 50 rdbcompression no]] {
  148. # create a big rdb that will take long to load. it is important
  149. # for keys to be big since the server processes events only once in 2mb.
  150. # 100mb of rdb, 100k keys will load in more than 5 seconds
  151. r debug populate 100000 key 1000
  152. restart_server 0 false false
  153. # make sure it's still loading
  154. assert_equal [s loading] 1
  155. # connect and disconnect 5 clients
  156. set clients {}
  157. for {set j 0} {$j < 5} {incr j} {
  158. lappend clients [redis_deferring_client]
  159. }
  160. foreach rd $clients {
  161. $rd debug log bla
  162. }
  163. foreach rd $clients {
  164. $rd read
  165. }
  166. foreach rd $clients {
  167. $rd close
  168. }
  169. # make sure the server freed the clients
  170. wait_for_condition 100 100 {
  171. [s connected_clients] < 3
  172. } else {
  173. fail "clients didn't disconnect"
  174. }
  175. # make sure it's still loading
  176. assert_equal [s loading] 1
  177. # no need to keep waiting for loading to complete
  178. exec kill [srv 0 pid]
  179. }
  180. }
  181. start_server {} {
  182. test {Test RDB load info} {
  183. r debug populate 1000
  184. r save
  185. restart_server 0 true false
  186. wait_done_loading r
  187. assert {[s rdb_last_load_keys_expired] == 0}
  188. assert {[s rdb_last_load_keys_loaded] == 1000}
  189. r debug set-active-expire 0
  190. for {set j 0} {$j < 1024} {incr j} {
  191. r select [expr $j%16]
  192. r set $j somevalue px 10
  193. }
  194. after 20
  195. r save
  196. restart_server 0 true false
  197. wait_done_loading r
  198. assert {[s rdb_last_load_keys_expired] == 1024}
  199. assert {[s rdb_last_load_keys_loaded] == 1000}
  200. }
  201. }
  202. # Our COW metrics (Private_Dirty) work only on Linux
  203. set system_name [string tolower [exec uname -s]]
  204. set page_size [exec getconf PAGESIZE]
  205. if {$system_name eq {linux} && $page_size == 4096} {
  206. start_server {overrides {save ""}} {
  207. test {Test child sending info} {
  208. # make sure that rdb_last_cow_size and current_cow_size are zero (the test using new server),
  209. # so that the comparisons during the test will be valid
  210. assert {[s current_cow_size] == 0}
  211. assert {[s current_save_keys_processed] == 0}
  212. assert {[s current_save_keys_total] == 0}
  213. assert {[s rdb_last_cow_size] == 0}
  214. # using a 200us delay, the bgsave is empirically taking about 10 seconds.
  215. # we need it to take more than some 5 seconds, since redis only report COW once a second.
  216. r config set rdb-key-save-delay 200
  217. r config set loglevel debug
  218. # populate the db with 10k keys of 512B each (since we want to measure the COW size by
  219. # changing some keys and read the reported COW size, we are using small key size to prevent from
  220. # the "dismiss mechanism" free memory and reduce the COW size)
  221. set rd [redis_deferring_client 0]
  222. set size 500 ;# aim for the 512 bin (sds overhead)
  223. set cmd_count 10000
  224. for {set k 0} {$k < $cmd_count} {incr k} {
  225. $rd set key$k [string repeat A $size]
  226. }
  227. for {set k 0} {$k < $cmd_count} {incr k} {
  228. catch { $rd read }
  229. }
  230. $rd close
  231. # start background rdb save
  232. r bgsave
  233. set current_save_keys_total [s current_save_keys_total]
  234. if {$::verbose} {
  235. puts "Keys before bgsave start: current_save_keys_total"
  236. }
  237. # on each iteration, we will write some key to the server to trigger copy-on-write, and
  238. # wait to see that it reflected in INFO.
  239. set iteration 1
  240. set key_idx 0
  241. while 1 {
  242. # take samples before writing new data to the server
  243. set cow_size [s current_cow_size]
  244. if {$::verbose} {
  245. puts "COW info before copy-on-write: $cow_size"
  246. }
  247. set keys_processed [s current_save_keys_processed]
  248. if {$::verbose} {
  249. puts "current_save_keys_processed info : $keys_processed"
  250. }
  251. # trigger copy-on-write
  252. set modified_keys 16
  253. for {set k 0} {$k < $modified_keys} {incr k} {
  254. r setrange key$key_idx 0 [string repeat B $size]
  255. incr key_idx 1
  256. }
  257. # changing 16 keys (512B each) will create at least 8192 COW (2 pages), but we don't want the test
  258. # to be too strict, so we check for a change of at least 4096 bytes
  259. set exp_cow [expr $cow_size + 4096]
  260. # wait to see that current_cow_size value updated (as long as the child is in progress)
  261. wait_for_condition 80 100 {
  262. [s rdb_bgsave_in_progress] == 0 ||
  263. [s current_cow_size] >= $exp_cow &&
  264. [s current_save_keys_processed] > $keys_processed &&
  265. [s current_fork_perc] > 0
  266. } else {
  267. if {$::verbose} {
  268. puts "COW info on fail: [s current_cow_size]"
  269. puts [exec tail -n 100 < [srv 0 stdout]]
  270. }
  271. fail "COW info wasn't reported"
  272. }
  273. # assert that $keys_processed is not greater than total keys.
  274. assert_morethan_equal $current_save_keys_total $keys_processed
  275. # for no accurate, stop after 2 iterations
  276. if {!$::accurate && $iteration == 2} {
  277. break
  278. }
  279. # stop iterating if the bgsave completed
  280. if { [s rdb_bgsave_in_progress] == 0 } {
  281. break
  282. }
  283. incr iteration 1
  284. }
  285. # make sure we saw report of current_cow_size
  286. if {$iteration < 2 && $::verbose} {
  287. puts [exec tail -n 100 < [srv 0 stdout]]
  288. }
  289. assert_morethan_equal $iteration 2
  290. # if bgsave completed, check that rdb_last_cow_size (fork exit report)
  291. # is at least 90% of last rdb_active_cow_size.
  292. if { [s rdb_bgsave_in_progress] == 0 } {
  293. set final_cow [s rdb_last_cow_size]
  294. set cow_size [expr $cow_size * 0.9]
  295. if {$final_cow < $cow_size && $::verbose} {
  296. puts [exec tail -n 100 < [srv 0 stdout]]
  297. }
  298. assert_morethan_equal $final_cow $cow_size
  299. }
  300. }
  301. }
  302. } ;# system_name
  303. } ;# tags