1""" 2/***************************************************************************** 3 4 Licensed to Accellera Systems Initiative Inc. (Accellera) under one or 5 more contributor license agreements. See the NOTICE file distributed 6 with this work for additional information regarding copyright ownership. 7 Accellera licenses this file to you under the Apache License, Version 2.0 8 (the "License"); you may not use this file except in compliance with the 9 License. You may obtain a copy of the License at 10 11 http://www.apache.org/licenses/LICENSE-2.0 12 13 Unless required by applicable law or agreed to in writing, software 14 distributed under the License is distributed on an "AS IS" BASIS, 15 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 16 implied. See the License for the specific language governing 17 permissions and limitations under the License. 18 19 *****************************************************************************/ 20 21Python Script to test Endianness Conversion Functions for OSCI TLM-2 22 23There is a simple testbench programme in C++ which runs a single 24transaction through a single conversion function, to a simple target 25memory and back. This script will run the testbench many times and test for 26- incomplete execution, seg-faults, etc 27- distributability of conversion function: each transaction should have 28the same effect on initiator/target memory as a set of smaller transactions 29that sum to it 30- equivalence: all conversion functions should have the same functional 31effect as each other 32 33The approach is to provide the initial state of the initiator and 34target memory (as strings) and to capture their final states, so that 35only the effect of the transaction on the data buffers is measured. 36 37Script works out for itself which conversion functions are legal for a 38given transaction and applies all of them. Note that where data is 39wider than bus, only one conversion function is legal and testing is 40somewhat limited. 41 42Testing space (select a transaction at random from the space): 43- with and without byte-enables (generated at random for each data word 44and can be all-zero) 45- data widths of (1,2,4,8,16) 46- bus widths of (1,2,4,8,16), lower priority for those smaller than data 47width 48- transaction lengths of (1..32) x data width, higher probability for 49lower values 50- base address (0..1023) at bus_width steps 51- offset address (0..bus width) with a higher priority for 0 52- address in initiator buffer uniform random 53- read or write 54- byte-enable length may be smaller than transasction length 55- may be a streaming burst 56 57Transaction breakdown 58- individual words (always) 59- one random breakdown with each segment containing between 1 and N-1 60words, where N is the length in words 61- one breakdown with two segments interleaved, using (additional) byte 62enables to suppress the part where the other segment is active 63 64Data buffer definition: starts at 0, randomly filled 65with lower case letters and numbers. Size 2 kB. Addresses are limited to 661 kB. 67""" 68 69 70import random 71import string 72 73 74class transaction: 75 """ contains read_not_write, address, length, byte_enable, 76 bus_width, data_width, data_pointer, stream_width """ 77 def __init__(self, **a): self.__dict__ = a 78 def __str__(self): 79 if self.read_not_write: a = "R: " 80 else: a = "W: " 81 a += "addr = %d, len = %d, bus = %d, word = %d, data = %d" % \ 82 (self.address, self.length, self.bus_width, self.data_width, \ 83 self.data_pointer) 84 if self.byte_enable: a += ", be = " + self.byte_enable 85 else: a += ", be = x" 86 a += ", sw = %d" % (self.stream_width) 87 return a 88 89 90def txn_generator(nr): 91 pr_read = 0.5 92 pr_byte_enable = 0.5 93 pr_enabled = 0.5 94 bus_widths = [1, 2, 4, 8, 16] 95 data_widths = [1, 2, 4, 8, 16] + [1, 2, 4, 8] + [1, 2, 4] + [1, 2] 96 lengths = list(range(1,33)) + list(range(1,17)) + list(range(1,9)) + list(range(1,5)) + list(range(1,3)) 97 pr_short_be = 0.2 98 pr_stream = 0.1 99 nr_generated = 0 100 while nr_generated < nr: 101 # create a random transaction 102 bus_width = random.choice(bus_widths) 103 while True: 104 data_width = random.choice(data_widths) 105 if data_width <= bus_width: break 106 if random.random() < 0.25: break 107 length = random.choice(lengths) 108 addr_base = random.choice(list(range(0,1024,bus_width))) 109 addr_offset = random.choice(list(range(bus_width))+[0]*(bus_width/2)) 110 txn = transaction( 111 bus_width = bus_width, 112 data_width = data_width, 113 read_not_write = random.random() < pr_read, 114 length = length * data_width, 115 address = addr_base + addr_offset, 116 byte_enable = False, 117 stream_width = length * data_width, 118 data_pointer = random.randint(0,1023) 119 ) 120 if random.random() < pr_byte_enable: 121 belen = length 122 if random.random() < pr_short_be: 123 belen = min(random.choice(lengths), length) 124 bep = ["0" * data_width, "1" * data_width] 125 txn.byte_enable = "".join([random.choice(bep) for x in range(belen)]) 126 if random.random() < pr_stream and length > 1: 127 strlen = length 128 while True: 129 strlen -= 1 130 if strlen == 1 or \ 131 (random.random() < 0.5 and (length/strlen)*strlen == length): 132 break 133 txn.stream_width = strlen * data_width 134 nr_generated += 1 135 yield txn 136 137# test code for transaction generator 138if False: 139 for t in txn_generator(20): 140 print t 141 raise Exception 142# end test code 143 144 145class memory_state_cl: 146 buffer_size = 2048 147 repeats = 10 * buffer_size / 36 148 population = (string.lowercase + string.digits) * repeats 149 def __init__(self): 150 self.initiator = "".join( 151 random.sample(memory_state_cl.population, memory_state_cl.buffer_size)) 152 self.target = "".join( 153 random.sample(memory_state_cl.population, memory_state_cl.buffer_size)) 154 def copy(self): 155 r = memory_state_cl() 156 r.initiator = self.initiator 157 r.target = self.target 158 return r 159 def __eq__(self, golden): 160 return self.initiator==golden.initiator and self.target==golden.target 161 def __ne__(self, golden): 162 return self.initiator!=golden.initiator or self.target!=golden.target 163 def __str__(self): 164 return "initiator = " + self.initiator + "\n" + "target = " + self.target 165 166 167# all fragmentation generators 168def __FRAG__null(txn): 169 yield txn 170 171def __FRAG__word(txn): 172 curr_address = txn.address 173 reset_address = curr_address + txn.stream_width 174 if txn.byte_enable: 175 full_byte_enable = txn.byte_enable * (1+txn.length/len(txn.byte_enable)) 176 be_pos = 0 177 d_pos = txn.data_pointer 178 end = txn.length + d_pos 179 while d_pos < end: 180 new_txn = transaction( 181 bus_width = txn.bus_width, 182 data_width = txn.data_width, 183 read_not_write = txn.read_not_write, 184 length = txn.data_width, 185 address = curr_address, 186 byte_enable = False, 187 stream_width = txn.data_width, 188 data_pointer = d_pos 189 ) 190 curr_address += txn.data_width 191 if curr_address == reset_address: curr_address = txn.address 192 d_pos += txn.data_width 193 if txn.byte_enable: 194 new_txn.byte_enable = full_byte_enable[be_pos:be_pos+txn.data_width] 195 be_pos += txn.data_width 196 yield new_txn 197 198def __FRAG__stream(txn): 199 if txn.byte_enable: 200 full_byte_enable = txn.byte_enable * (1+txn.length/len(txn.byte_enable)) 201 be_pos = 0 202 bytes_done = 0 203 while bytes_done < txn.length: 204 new_txn = transaction( 205 bus_width = txn.bus_width, 206 data_width = txn.data_width, 207 read_not_write = txn.read_not_write, 208 length = txn.stream_width, 209 address = txn.address, 210 byte_enable = False, 211 stream_width = txn.stream_width, 212 data_pointer = bytes_done + txn.data_pointer 213 ) 214 if txn.byte_enable: 215 new_txn.byte_enable = full_byte_enable[be_pos:be_pos+txn.stream_width] 216 be_pos += txn.stream_width 217 yield new_txn 218 bytes_done += txn.stream_width 219 220def __FRAG__random(stream_txn): 221 for txn in __FRAG__stream(stream_txn): 222 # txn has full byte enables and no stream feature guaranteed 223 pr_nofrag = 0.5 224 end_address = txn.address + txn.length 225 curr_address = txn.address 226 be_pos = 0 227 d_pos = txn.data_pointer 228 while curr_address < end_address: 229 new_txn = transaction( 230 bus_width = txn.bus_width, 231 data_width = txn.data_width, 232 read_not_write = txn.read_not_write, 233 length = txn.data_width, 234 address = curr_address, 235 byte_enable = txn.byte_enable, 236 stream_width = txn.data_width, 237 data_pointer = d_pos 238 ) 239 curr_address += txn.data_width 240 d_pos += txn.data_width 241 if txn.byte_enable: 242 new_txn.byte_enable = txn.byte_enable[be_pos:be_pos+txn.data_width] 243 be_pos += txn.data_width 244 while random.random() < pr_nofrag and curr_address < end_address: 245 new_txn.length += txn.data_width 246 new_txn.stream_width += txn.data_width 247 curr_address += txn.data_width 248 d_pos += txn.data_width 249 if txn.byte_enable: 250 new_txn.byte_enable += txn.byte_enable[be_pos:be_pos+txn.data_width] 251 be_pos += txn.data_width 252 yield new_txn 253 254def __FRAG__randinterleave(stream_txn): 255 for txn in __FRAG__stream(stream_txn): 256 # txn has full byte enables and no stream feature guaranteed 257 pr_frag = 0.5 258 txns = [ transaction( 259 bus_width = txn.bus_width, 260 data_width = txn.data_width, 261 read_not_write = txn.read_not_write, 262 length = txn.length, 263 address = txn.address, 264 byte_enable = "", 265 stream_width = txn.length, 266 data_pointer = txn.data_pointer 267 ), transaction( 268 bus_width = txn.bus_width, 269 data_width = txn.data_width, 270 read_not_write = txn.read_not_write, 271 length = txn.length, 272 address = txn.address, 273 byte_enable = "", 274 stream_width = txn.length, 275 data_pointer = txn.data_pointer 276 ) ] 277 curr = 0 278 be_pos = 0 279 on = "1" * txn.data_width 280 off = "0" * txn.data_width 281 while be_pos < txn.length: 282 if txn.byte_enable: bew = txn.byte_enable[be_pos:be_pos+txn.data_width] 283 else: bew = on 284 txns[curr].byte_enable += bew 285 txns[1-curr].byte_enable += off 286 be_pos += txn.data_width 287 if random.random() < pr_frag: curr = 1-curr 288 yield txns[0] 289 yield txns[1] 290 291fragmenters = [globals()[n] for n in globals().keys() if n[:8]=="__FRAG__"] 292 293# test code for fragmenters 294if False: 295 for t in txn_generator(1): 296 print t 297 print 298 for u in fragmenters[4](t): 299 print u 300 raise Exception 301# end test code 302 303 304# conversion functions are determined by an index (shared with C++) and 305# a function that tests if they can be applied to a transaction 306def __CHCK__generic(txn): 307 __CHCK__generic.nr = 0 308 return True 309 310def __CHCK__word(txn): 311 __CHCK__word.nr = 1 312 if txn.data_width > txn.bus_width: return False 313 if txn.stream_width < txn.length: return False 314 if txn.byte_enable and len(txn.byte_enable) < txn.length: return False 315 return True 316 317def __CHCK__aligned(txn): 318 __CHCK__aligned.nr = 2 319 if txn.data_width > txn.bus_width: return False 320 if txn.stream_width < txn.length: return False 321 if txn.byte_enable and len(txn.byte_enable) < txn.length: return False 322 base_addr = txn.address / txn.bus_width 323 if base_addr * txn.bus_width != txn.address: return False 324 nr_bus_words = txn.length / txn.bus_width 325 if nr_bus_words * txn.bus_width != txn.length: return False 326 return True 327 328def __CHCK__single(txn): 329 __CHCK__single.nr = 3 330 if txn.length != txn.data_width: return False 331 base_addr = txn.address / txn.bus_width 332 end_base_addr = (txn.address + txn.length - 1) / txn.bus_width 333 if base_addr != end_base_addr: return False 334 return True 335 336def __CHCK__local_single(txn): 337 __CHCK__local_single.nr = 4 338 if txn.length != txn.data_width: return False 339 return True 340 341all_converters = [globals()[n] for n in globals().keys() if n[:8]=="__CHCK__"] 342for x in all_converters: x.usage = 0 343 344 345class TesterFailure(Exception): pass 346class SystemCFailure(Exception): pass 347class ConverterDifference(Exception): pass 348class FragmenterDifference(Exception): pass 349 350from subprocess import Popen, PIPE 351 352# test a single fragment in multiple ways 353def test_a_fragment(f, ms): 354 # f is the (fragment of a) transaction 355 # ms is the memory state to use at start of test 356 357 # run the same fragment through all applicable conversion functions 358 # and check they all do the same thing 359 # use the same sub-process for all of them 360 361 # build complete stdin 362 convs = [c for c in all_converters if c(f)] 363 if len(convs) == 0: raise TesterFailure(f.str()) 364 txtin = "\n".join( 365 [("%s\n%s\nconverter = %d\n" % (f, ms, c.nr)) for c in convs]) 366 367 # run and get stdout 368 txtout = "no output" 369 try: 370 sp = Popen("../build-unix/test_endian_conv.exe", stdin=PIPE, stdout=PIPE) 371 txtout = sp.communicate(txtin)[0] 372 tmp = txtout.splitlines() 373 initiators = [l.split()[-1] for l in tmp if l[:14] == " initiator = "] 374 targets = [l.split()[-1] for l in tmp if l[:11] == " target = "] 375 except: 376 raise SystemCFailure("\n" + txtin + txtout) 377 if sp.returncode != 0: raise SystemCFailure("\n" + txtin + txtout) 378 if len(initiators) != len(convs): raise SystemCFailure("\n" + txtin + txtout) 379 if len(targets) != len(convs): raise SystemCFailure("\n" + txtin + txtout) 380 for c in convs: c.usage += 1 381 382 ms_out = memory_state_cl() 383 ms_out.initiator = initiators[0] 384 ms_out.target = targets[0] 385 for i in range(1,len(convs)): 386 if initiators[i]!=ms_out.initiator or targets[i]!=ms_out.target: 387 raise ConverterDifference(""" 388%s 389start memory: 390%s 391converter = %d 392golden memory: 393%s 394actual memory: 395%s""" % (f, ms, i, golden_ms, ms_out)) 396 397 return ms_out 398 399 400# main loop 401 402from sys import argv 403 404print "Testing Endianness Conversion Functions" 405print "March 2008" 406print "OSCI TLM-2" 407 408try: nr_txns_to_test = int(argv[1]) 409except: 410 print "No command line input for number of tests, using default" 411 nr_txns_to_test = 1000 412 413print "Number to test:", nr_txns_to_test 414 415# generate and test a number of transactions 416for txn in txn_generator(nr_txns_to_test): 417 418 # each transaction has a random initial memory state 419 initial_memory = memory_state_cl() 420 421 # iterate over all defined fragmentation functions 422 first_time = True 423 for fragmenter in fragmenters: 424 425 # all versions of the transaction start in the same place 426 memory_state = initial_memory.copy() 427 428 # now iterate over the fragments of the transaction, accumulating 429 # the memory state 430 for partial_txn in fragmenter(txn): 431 memory_state = test_a_fragment(partial_txn, memory_state) 432 433 if first_time: 434 golden_memory_state = memory_state.copy() 435 first_time = False 436 else: 437 if memory_state != golden_memory_state: raise FragmenterDifference(""" 438fragmenter: %s 439transaction: 440%s 441start memory: 442%s 443golden memory: 444%s 445actual memory: 446%s""" % (fragmenter, txn, initial_memory, golden_memory_state, memory_state)) 447 448 print ".", 449print 450 451 452print "Conversion functions usage frequency:" 453for c in all_converters: 454 print c.nr, c.__name__, c.usage 455 456 457