Package pike :: Package test :: Module credit
[hide private]
[frames] | no frames]

Source Code for Module pike.test.credit

  1  # 
  2  # Copyright (c) 2017, Dell EMC Corporation 
  3  # All rights reserved. 
  4  # 
  5  # Redistribution and use in source and binary forms, with or without 
  6  # modification, are permitted provided that the following conditions are met: 
  7  # 
  8  # 1. Redistributions of source code must retain the above copyright notice, 
  9  # this list of conditions and the following disclaimer. 
 10  # 2. Redistributions in binary form must reproduce the above copyright notice, 
 11  # this list of conditions and the following disclaimer in the documentation 
 12  # and/or other materials provided with the distribution. 
 13  # 
 14  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
 15  # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
 16  # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
 17  # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 
 18  # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
 19  # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
 20  # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
 21  # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
 22  # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
 23  # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
 24  # POSSIBILITY OF SUCH DAMAGE. 
 25  # 
 26  # Module Name: 
 27  # 
 28  #        credit.py 
 29  # 
 30  # Abstract: 
 31  # 
 32  #        credit tracking test cases 
 33  # 
 34  # Authors: Masen Furer <masen.furer@dell.com> 
 35  # 
 36   
 37  import pike.model 
 38  import pike.smb2 
 39  import pike.test 
 40  import array 
 41  import random 
 42  import sys 
 43  import time 
 44   
 45  # common size constants 
 46  size_64k = 2**16 
 47  size_128k = 2**17 
 48  size_192k = size_64k + size_128k 
 49  size_512k = 2**19 
 50  size_960k = size_192k * 5 
 51  size_1m = 2**20 
 52  size_2m = 2**21 
 53  size_4m = 2**22 
 54  size_8m = 2**23 
 55   
 56  share_all = pike.smb2.FILE_SHARE_READ | pike.smb2.FILE_SHARE_WRITE | pike.smb2.FILE_SHARE_DELETE 
 57  lease_rh = pike.smb2.SMB2_LEASE_READ_CACHING | pike.smb2.SMB2_LEASE_HANDLE_CACHING 
58 59 # debugging callback functions which are registered if debug logging is enabled 60 -def post_serialize_credit_assessment(nb):
61 smb_res = nb[0] 62 print("{0} ({1}) ___ Charge: {2} / Request: {3} / Total: {4}".format( 63 smb_res.command, 64 smb_res.message_id, 65 smb_res.credit_charge, 66 smb_res.credit_request, 67 nb.conn.credits))
68
69 -def post_deserialize_credit_assessment(nb):
70 smb_res = nb[0] 71 print("{0} ({1}) ___ Charge: {2} / Response: {3} / Total: {4}".format( 72 smb_res.command, 73 smb_res.message_id, 74 smb_res.credit_charge, 75 smb_res.credit_response, 76 nb.conn.credits + smb_res.credit_response - smb_res.credit_charge))
77
78 -def post_serialize_credit_assert(exp_credit_charge, future):
79 def cb(nb): 80 with future: 81 if nb[0].credit_charge != exp_credit_charge: 82 raise AssertionError( 83 "Expected credit_charge {0}. Actual {1}".format( 84 exp_credit_charge, 85 nb[0].credit_charge)) 86 future.complete(True)
87 return cb 88 89 @pike.test.RequireCapabilities(pike.smb2.SMB2_GLOBAL_CAP_LARGE_MTU)
90 -class CreditTest(pike.test.PikeTest):
91 - def __init__(self, *args, **kwargs):
92 super(CreditTest, self).__init__(*args, **kwargs) 93 if self.loglevel == pike.test.logging.DEBUG: 94 self.default_client.register_callback( 95 pike.model.EV_REQ_POST_SERIALIZE, 96 post_serialize_credit_assessment) 97 self.default_client.register_callback( 98 pike.model.EV_RES_POST_DESERIALIZE, 99 post_deserialize_credit_assessment)
100 101 # set the default credit request to 1 to make things more predictable
102 - def setUp(self):
103 self.prev_default_credit_request = pike.model.default_credit_request 104 pike.model.default_credit_request = 1
105
106 - def tearDown(self):
107 pike.model.default_credit_request = self.prev_default_credit_request
108
109 - def generic_mc_write_mc_read(self, file_size, write_size, read_size):
110 """ 111 perform multiple multi credit write operations requesting multiple 112 credits in return 113 then perform one large read operation and subsequently close the file 114 """ 115 fname = self.id().rpartition(".")[-1] 116 buf = "\0\1\2\3\4\5\6\7"*8192 117 buflen = len(buf) 118 file_chunks = file_size / buflen 119 write_chunks = file_size / write_size 120 write_buf = buf * (file_chunks / write_chunks) 121 write_credits_per_op = write_size / size_64k 122 chan, tree = self.tree_connect() 123 starting_credits = chan.connection.negotiate_response.parent.credit_response 124 self.info("creating {0} ({1} bytes)".format(fname, file_size)) 125 126 # get enough initial credits 127 with chan.let(credit_request=16): 128 fh = chan.create(tree, fname).result() 129 130 self.info("writing {0} chunks of {1} bytes; {2} credits per op".format( 131 write_chunks, 132 write_size, 133 write_credits_per_op)) 134 for ix in xrange(write_chunks): 135 credit_assert_future = pike.model.Future() 136 with chan.connection.callback( 137 pike.model.EV_REQ_POST_SERIALIZE, 138 post_serialize_credit_assert( 139 write_credits_per_op, 140 credit_assert_future)): 141 result = chan.write(fh, write_size*ix, write_buf) 142 self.assertEqual(result, write_size) 143 self.assertTrue(credit_assert_future.result()) 144 chan.close(fh) 145 146 # calculate a reasonable expected number of credits 147 # from negotiate, session_setup (x2), tree_connect, create (+16), close 148 exp_credits = starting_credits + ((pike.model.default_credit_request - 1) * 4) + 15 149 credit_request_per_op = pike.model.default_credit_request 150 # from the series of write requests 151 if write_credits_per_op > credit_request_per_op: 152 credit_request_per_op = write_credits_per_op 153 exp_credits += write_chunks * (credit_request_per_op - write_credits_per_op) 154 # windows seems to have a credit wall of 128 155 if exp_credits > 128: 156 exp_credits = 128 157 self.info("Expect the server to have granted at least " 158 "{0} credits".format(exp_credits)) 159 self.assertGreaterEqual(chan.connection.credits, exp_credits) 160 161 read_chunks = file_size / read_size 162 read_buf = buf * (file_chunks / read_chunks) 163 read_credits_per_op = read_size / size_64k 164 self.info("reading {0} chunks of {1} bytes; {2} credits per op".format( 165 read_chunks, 166 read_size, 167 read_credits_per_op)) 168 fh = chan.create( 169 tree, 170 fname, 171 access=pike.smb2.GENERIC_READ | pike.smb2.DELETE, 172 disposition=pike.smb2.FILE_OPEN, 173 options=pike.smb2.FILE_DELETE_ON_CLOSE).result() 174 file_buffer = array.array("B") 175 for ix in xrange(read_chunks): 176 credit_assert_future = pike.model.Future() 177 with chan.connection.callback( 178 pike.model.EV_REQ_POST_SERIALIZE, 179 post_serialize_credit_assert( 180 read_credits_per_op, 181 credit_assert_future)): 182 result = chan.read(fh, read_size, read_size*ix) 183 file_buffer.extend(result) 184 self.assertEqual(len(result), read_size) 185 self.assertTrue(credit_assert_future.result()) 186 chan.close(fh) 187 self.assertEqual(file_buffer.tostring(), buf*file_chunks)
188
189 - def generic_arbitrary_mc_write_mc_read(self, file_size, write_size, read_size):
190 """ 191 perform multiple multi credit write operations requesting multiple 192 credits in return 193 then perform one large read operation and subsequently close the file 194 195 this version of the function works with arbitrary sizes 196 """ 197 fname = self.id().rpartition(".")[-1] 198 buf = "\0\1\2\3\4\5\6\7"*8192 199 buflen = len(buf) 200 file_chunks, file_remainder = divmod(file_size, buflen) 201 file_buf = buf * file_chunks + buf[:file_remainder] 202 203 write_chunks, write_remainder = divmod(file_size, write_size) 204 write_credits_per_op, extra_credits = divmod(write_size, size_64k) 205 if extra_credits > 0: 206 write_credits_per_op += 1 207 208 # if the sizes are not exact multiples, prepare to write an extra chunk 209 extra_write = None 210 if write_remainder > 0: 211 c, extra_credits = divmod(write_remainder, size_64k) 212 if extra_credits > 0: 213 c += 1 214 extra_write = (write_remainder, c) 215 216 chan, tree = self.tree_connect() 217 starting_credits = chan.connection.negotiate_response.parent.credit_response 218 self.info("creating {0} ({1} bytes)".format(fname, file_size)) 219 220 # get enough initial credits 221 with chan.let(credit_request=16): 222 fh = chan.create(tree, fname).result() 223 224 self.info("writing {0} chunks of {1} bytes; {2} credits per op".format( 225 write_chunks, 226 write_size, 227 write_credits_per_op)) 228 ix = None 229 # TODO: consolidate chunks to a list of tuples (file_offset, local_buffer_offset, length) 230 # this would simplify the loop, instead of having the extra chunk 231 # OR abstract the writing, asserting to a helper function (yeah better idea, retains the logic) 232 for ix in xrange(write_chunks): 233 credit_assert_future = pike.model.Future() 234 with chan.connection.callback( 235 pike.model.EV_REQ_POST_SERIALIZE, 236 post_serialize_credit_assert( 237 write_credits_per_op, 238 credit_assert_future)): 239 result = chan.write( 240 fh, 241 write_size*ix, 242 file_buf[write_size*ix:write_size*(ix+1)]) 243 self.assertEqual(result, write_size) 244 self.assertTrue(credit_assert_future.result()) 245 if extra_write is not None: 246 if ix is None: 247 extra_write_offset = 0 248 else: 249 extra_write_offset = write_size*(ix+1) 250 ix = None 251 self.info("writing extra chunk of {0} bytes @ {1}; {2} credits".format( 252 extra_write[0], 253 extra_write_offset, 254 extra_write[1])) 255 credit_assert_future = pike.model.Future() 256 with chan.connection.callback( 257 pike.model.EV_REQ_POST_SERIALIZE, 258 post_serialize_credit_assert( 259 extra_write[1], 260 credit_assert_future)): 261 result = chan.write( 262 fh, 263 extra_write_offset, 264 file_buf[-extra_write[0]:]) 265 self.assertEqual(result, extra_write[0]) 266 self.assertTrue(credit_assert_future.result()) 267 chan.close(fh) 268 269 # calculate a reasonable expected number of credits 270 # from negotiate, session_setup (x2), tree_connect, create (+16), close 271 exp_credits = starting_credits + ((pike.model.default_credit_request - 1) * 4) + 15 272 credit_request_per_op = pike.model.default_credit_request 273 # from the series of write requests 274 if write_credits_per_op > credit_request_per_op: 275 credit_request_per_op = write_credits_per_op 276 exp_credits += write_chunks * (credit_request_per_op - write_credits_per_op) 277 # potential extra write request 278 if extra_write is not None: 279 credit_request_per_op = pike.model.default_credit_request 280 if extra_write[1] > credit_request_per_op: 281 credit_request_per_op = extra_write[1] 282 exp_credits += (credit_request_per_op - extra_write[1]) 283 # windows seems to have a credit wall of 128 284 if exp_credits > 128: 285 exp_credits = 128 286 self.info("Expect the server to have granted at least " 287 "{0} credits".format(exp_credits)) 288 self.assertGreaterEqual(chan.connection.credits, exp_credits) 289 290 read_chunks, read_remainder = divmod(file_size, read_size) 291 read_credits_per_op, extra_credits = divmod(read_size, size_64k) 292 if extra_credits > 0: 293 read_credits_per_op += 1 294 295 # if the sizes are not exact multiples, prepare to read an extra chunk 296 extra_read = None 297 if read_remainder > 0: 298 c, extra_credits = divmod(read_remainder, size_64k) 299 if extra_credits > 0: 300 c += 1 301 extra_read = (read_remainder, c) 302 303 self.info("reading {0} chunks of {1} bytes; {2} credits per op".format( 304 read_chunks, 305 read_size, 306 read_credits_per_op)) 307 fh = chan.create( 308 tree, 309 fname, 310 access=pike.smb2.GENERIC_READ | pike.smb2.DELETE, 311 disposition=pike.smb2.FILE_OPEN, 312 options=pike.smb2.FILE_DELETE_ON_CLOSE).result() 313 read_buffer = array.array("B") 314 for ix in xrange(read_chunks): 315 credit_assert_future = pike.model.Future() 316 with chan.connection.callback( 317 pike.model.EV_REQ_POST_SERIALIZE, 318 post_serialize_credit_assert( 319 read_credits_per_op, 320 credit_assert_future)): 321 result = chan.read(fh, read_size, read_size*ix) 322 read_buffer.extend(result) 323 self.assertEqual(len(result), read_size) 324 self.assertTrue(credit_assert_future.result()) 325 if extra_read is not None: 326 if ix is None: 327 extra_read_offset = 0 328 else: 329 extra_read_offset = read_size*(ix+1) 330 self.info("reading extra chunk of {0} bytes @ {1}; {2} credits".format( 331 extra_read[0], 332 extra_read_offset, 333 extra_read[1])) 334 credit_assert_future = pike.model.Future() 335 with chan.connection.callback( 336 pike.model.EV_REQ_POST_SERIALIZE, 337 post_serialize_credit_assert( 338 extra_read[1], 339 credit_assert_future)): 340 result = chan.read(fh, extra_read[0], extra_read_offset) 341 read_buffer.extend(result) 342 self.assertEqual(len(result), extra_read[0]) 343 self.assertTrue(credit_assert_future.result()) 344 345 chan.close(fh) 346 self.assertEqual(read_buffer.tostring(), file_buf)
347
348 -class PowerOf2CreditTest(CreditTest):
349 352 355 358 361 364 367 370
373
374 -class EdgeCreditTest(CreditTest):
376 """ 377 one client performs requests until the sequence number is > 2048 378 then exhausts it's remaining credits with a single large mtu 379 multicredit op 380 """ 381 fname = "test_sequence_number_wrap" 382 # buf is 64k == 1 credit 383 buf = "\0\1\2\3\4\5\6\7"*8192 384 credits_per_req = 16 385 sequence_number_target = 2080 386 387 chan1, tree1 = self.tree_connect() 388 starting_credits = chan1.connection.negotiate_response.parent.credit_response 389 self.assertEqual(chan1.connection.credits, starting_credits) 390 391 with chan1.let(credit_request=credits_per_req): 392 fh1 = chan1.create( 393 tree1, 394 fname).result() 395 exp_credits = starting_credits + credits_per_req - 1 396 self.assertEqual(chan1.connection.credits, exp_credits) 397 398 # build up the sequence number to the target 399 while chan1.connection._next_mid < sequence_number_target: 400 smb_req = chan1.write_request(fh1, 0, buf*credits_per_req).parent 401 smb_resp = chan1.connection.submit(smb_req.parent)[0].result() 402 # if the server is granting our request, 403 self.assertEqual(smb_resp.credit_response, credits_per_req) 404 # then total number of credits should stay the same 405 self.assertEqual(chan1.connection.credits, exp_credits) 406 407 # at the end, next mid should be > target 408 self.assertGreater(chan1.connection._next_mid, sequence_number_target)
409
410 -class AsyncCreditTest(CreditTest):
411 - def test_async_lock(self):
412 """ 413 establish 2 sessions 414 session 1 opens file1 with exclusive lock 415 session 2 opens file1 with 3 exclusive lock - allowing pending 416 wait for all 3 lock requests to pend 417 session 1 unlocks file1 418 wait for all 3 lock requests to complete 419 verify that credits were not double granted 420 """ 421 chan1, tree1 = self.tree_connect() 422 chan2, tree2 = self.tree_connect() 423 chan2_starting_credits = chan2.connection.negotiate_response.parent.credit_response 424 fname = "test_async_lock" 425 buf = "\0\1\2\3\4\5\6\7" 426 lock1 = (0, 8, pike.smb2.SMB2_LOCKFLAG_EXCLUSIVE_LOCK) 427 contend_locks = [ 428 (0, 2, pike.smb2.SMB2_LOCKFLAG_EXCLUSIVE_LOCK), 429 (2, 2, pike.smb2.SMB2_LOCKFLAG_EXCLUSIVE_LOCK), 430 (4, 4, pike.smb2.SMB2_LOCKFLAG_EXCLUSIVE_LOCK)] 431 credit_req = 3 432 433 fh1 = chan1.create( 434 tree1, 435 fname, 436 share=share_all).result() 437 fh2 = chan2.create( 438 tree2, 439 fname, 440 share=share_all).result() 441 self.assertEqual(chan2.connection.credits, chan2_starting_credits) 442 chan1.lock(fh1, [lock1]).result() 443 444 # send 3 locks, 1 credit charge, 3 credit request 445 lock_futures = [] 446 for l in contend_locks: 447 with chan2.let(credit_request=credit_req): 448 f = chan2.lock(fh2, [l]) 449 lock_futures.append(f) 450 # wait for the interim responses 451 for f in lock_futures: 452 f.wait_interim() 453 if f.interim_response is not None: 454 self.assertEqual(f.interim_response.credit_response, credit_req) 455 456 # at this point, we should have sent 3x 1charge, 3request lock commands 457 exp_credits = chan2_starting_credits + len(contend_locks) * (credit_req - 1) 458 self.assertEqual(chan2.connection.credits, exp_credits) 459 460 # unlock fh1 locks 461 lock1_un = tuple(list(lock1[:2]) + [pike.smb2.SMB2_LOCKFLAG_UN_LOCK]) 462 chan1.lock(fh1, [lock1_un]) 463 464 # these completion responses shouldn't have a carry a 1 credit charge + 0 grant 465 for f in lock_futures: 466 self.assertEqual(f.result().credit_response, 0) 467 self.assertEqual(chan2.connection.credits, exp_credits) 468 buf = "\0\1\2\3\4\5\6\7"*8192 469 470 # send a request for all of our credits 471 chan2.write(fh2, 0, buf*exp_credits) 472 self.assertEqual(chan2.connection.credits, exp_credits) 473 474 # send a request for all of our credits + 1 (this should disconnect the client) 475 with self.assertRaises(EOFError): 476 chan2.write(fh2, 0, buf*(exp_credits + 1)) 477 self.fail("We should have {0} credits, but an {1} credit request succeeds".format( 478 exp_credits, exp_credits + 1))
479
480 - def test_async_write(self):
481 """ 482 establish 2 sessions 483 session 1 opens file1 with read/handle caching lease 484 session 2 opens file1 with no lease 485 session 2 sends several large multi-credit writes triggering a lease break on session 1 486 session 2 write requests will return STATUS_PENDING (server consumes credits) 487 session 2 write requests should complete with STATUS_SUCCESS (server already consumed credit) 488 """ 489 chan1, tree1 = self.tree_connect() 490 chan2, tree2 = self.tree_connect() 491 chan2_starting_credits = chan2.connection.negotiate_response.parent.credit_response 492 fname = "test_async_write" 493 lkey = array.array('B',map(random.randint, [0]*16, [255]*16)) 494 # buf is 64k 495 buf = "\0\1\2\3\4\5\6\7"*8192 496 write_request_multiples = [1,2,3,4] 497 credit_req = 16 498 499 fh1 = chan1.create( 500 tree1, 501 fname, 502 share=share_all, 503 oplock_level=pike.smb2.SMB2_OPLOCK_LEVEL_LEASE, 504 lease_key=lkey, 505 lease_state=lease_rh).result() 506 fh2 = chan2.create( 507 tree2, 508 fname, 509 share=share_all).result() 510 self.assertEqual(chan2.connection.credits, chan2_starting_credits) 511 write_futures = [] 512 for n_credits in write_request_multiples: 513 with chan2.let(credit_request=credit_req): 514 f = chan2.connection.submit(chan2.write_request(fh2, 0, buf*n_credits).parent.parent)[0] 515 f.wait_interim() 516 if f.interim_response is not None: 517 self.assertEqual(f.interim_response.credit_response, credit_req) 518 write_futures.append(f) 519 520 fh1.lease.on_break(lambda s: s) 521 for w in write_futures: 522 smb_resp = w.result() 523 if smb_resp.flags & pike.smb2.SMB2_FLAGS_ASYNC_COMMAND: 524 self.assertEqual(smb_resp.credit_response, 0) 525 else: 526 self.assertEqual(smb_resp.credit_response, credit_req) 527 chan2.close(fh2) 528 chan1.close(fh1)
529
530 -class TestCaseGenerator(object):
531 header = """#!/usr/bin/env python 532 import pike.test 533 import pike.test.credit 534 535 536 class Generated_{name}_{tag}(pike.test.credit.CreditTest): 537 """ 538 footer = """if __name__ == "__main__": 539 pike.test.unittest.main() 540 """ 541 template = """ 542 def test_{name}_{tag}_{ix}(self): 543 self.generic_arbitrary_mc_write_mc_read({file_size}, {write_size}, {read_size})""" 544 @classmethod
545 - def generate_multiple_64k_test_cases(cls, tag, n_cases, size_range_multiple, write_range_multiple, read_range_multiple):
546 name = "Mult64k" 547 print(cls.header.format(**locals())) 548 for ix in xrange(n_cases): 549 file_size = 2**16 * random.randint(*size_range_multiple) 550 write_size = 2**16 * random.randint(*write_range_multiple) 551 read_size = 2**16 * random.randint(*read_range_multiple) 552 print(cls.template.format(**locals())) 553 print(cls.footer.format(**locals()))
554 555 @classmethod
556 - def generate_arbitrary_test_cases(cls, tag, n_cases, size_range, write_range, read_range):
557 name = "Arb" 558 print(cls.header.format(**locals())) 559 for ix in xrange(n_cases): 560 file_size = random.randint(*size_range) 561 write_size = random.randint(*write_range) 562 read_size = random.randint(*read_range) 563 print(cls.template.format(**locals())) 564 print(cls.footer.format(**locals()))
565 566 if __name__ == "__main__": 567 import argparse 568 parser = argparse.ArgumentParser() 569 if len(sys.argv) > 1 and sys.argv[1].startswith("64"): 570 TestCaseGenerator.generate_multiple_64k_test_cases("gen1", 8, (1, 128), (1, 16), (1, 16)) 571 else: 572 TestCaseGenerator.generate_arbitrary_test_cases("iter1", 32, (45*1024, 2**23), (2**15, 2**20), (2**15, 2**20)) 573