1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 """
20 Contains the base test class for test execution and validation.
21
22 For more information see the L{pysys.basetest.BaseTest} API documentation.
23
24 """
25 import sys, os, os.path, re, string, time, thread, logging, copy, math, stat, inspect
26
27 from pysys import log
28 from pysys.constants import *
29 from pysys.exceptions import *
30 from pysys.utils.filecopy import filecopy
31 from pysys.utils.filegrep import filegrep
32 from pysys.utils.filegrep import lastgrep
33 from pysys.utils.filediff import filediff
34 from pysys.utils.filegrep import orderedgrep
35 from pysys.utils.linecount import linecount
36 from pysys.process.user import ProcessUser
37 from pysys.process.helper import ProcessWrapper
38 from pysys.process.monitor import ProcessMonitor
39 from pysys.manual.ui import ManualTester
40 from pysys.process.user import ProcessUser
41
42
43 TEST_TEMPLATE = '''%s
44 %s
45
46 class %s(%s):
47 def execute(self):
48 pass
49
50 def validate(self):
51 pass
52 '''
53
54
56 """The base class for all PySys testcases.
57
58 BaseTest is the parent class of all PySys system testcases. The class provides utility functions for
59 cross-platform process management and manipulation, test timing, and test validation. Any PySys testcase
60 should inherit from the base test and provide an implementation of the abstract L{execute} method
61 defined in this class. Child classes can also overide the L{setup}, L{cleanup} and L{validate}
62 methods of the class to provide custom setup and cleanup actions for a particual test, and to perform
63 all validation steps in a single method should this prove logically more simple.
64
65 Execution of a PySys testcase is performed through an instance of the L{pysys.baserunner.BaseRunner}
66 class, or a subclass thereof. The base runner instantiates an instance of the testcase, and then calls
67 the C{setup}, C{execute}, C{validate} and C{cleanup} methods of the instance. All processes started during
68 the test execution are reference counted within the base test, and terminated within the C{cleanup} method.
69
70 Validation of the testcase is through the C{assert*} methods. Execution of many methods appends an outcome
71 to the outcome data structure maintained by the ProcessUser base class, thus building up a record of the
72 individual validation outcomes. Several potential outcomes are supported by the PySys framework
73 (C{SKIPPED}, C{BLOCKED}, C{DUMPEDCORE}, C{TIMEDOUT}, C{FAILED}, C{NOTVERIFIED}, and C{PASSED}) and the
74 overall outcome of the testcase is determined using aprecedence order of the individual outcomes.
75
76 All C{assert*} methods except for C{assertThat} support variable argument lists for common non-default parameters.
77 Currently this includes the C{assertMessage} parameter, to override the default statement logged by the framework
78 to stdout and the run log, and the C{abortOnError} parameter, to override the defaultAbortOnError project
79 setting.
80
81 @ivar mode: The user defined mode the test is running within. Subclasses can use this in conditional checks
82 to modify the test execution based upon the mode.
83 @type mode: string
84 @ivar input: Full path to the input directory of the testcase. This is used both by the class and its
85 subclasses to locate the default directory containing all input data to the testcase, as defined
86 in the testcase descriptor.
87 @type input: string
88 @ivar output: Full path to the output sub-directory of the testcase. This is used both by the class and its
89 subclasses to locate the default directory for output produced by the testcase. Note that this
90 is the actual directory where all output is written, as modified from that defined in the testcase
91 descriptor to accomodate for the sub-directory used within this location to sandbox concurrent
92 execution of the test, and/or to denote the run number.
93 @type output: string
94 @ivar reference: Full path to the reference directory of the testcase. This is used both by the class and its
95 subclasses to locate the default directory containing all reference data to the testcase, as defined
96 in the testcase descriptor.
97 @type reference: string
98 @ivar log: Reference to the logger instance of this class
99 @type log: logging.Logger
100 @ivar project: Reference to the project details as set on the module load of the launching executable
101 @type project: L{Project}
102
103 """
104
105 - def __init__ (self, descriptor, outsubdir, runner):
106 """Create an instance of the BaseTest class.
107
108 @param descriptor: The descriptor for the test giving all test details
109 @param outsubdir: The output subdirectory the test output will be written to
110 @param runner: Reference to the runner responsable for executing the testcase
111
112 """
113 ProcessUser.__init__(self)
114 self.descriptor = descriptor
115 self.input = descriptor.input
116 self.output = os.path.join(descriptor.output, outsubdir)
117 self.reference = descriptor.reference
118 self.runner = runner
119 self.mode = runner.mode
120 self.setKeywordArgs(runner.xargs)
121 self.monitorList = []
122 self.manualTester = None
123 self.resources = []
124
125
127 """Set the xargs as data attributes of the test class.
128
129 Values in the xargs dictionary are set as data attributes using the builtin C{setattr} method.
130 Thus an xargs dictionary of the form C{{'foo': 'bar'}} will result in a data attribute of the
131 form C{self.foo} with C{value bar}. This is used so that subclasses can define default values of
132 data attributes, which can be overriden on instantiation e.g. using the -X options to the
133 runTest.py launch executable.
134
135 @param xargs: A dictionary of the user defined extra arguments
136
137 """
138 for key in xargs.keys():
139 setattr(self, key, xargs[key])
140
141
142
143
145 """Setup method which may optionally be overridden to perform custom setup operations prior to test execution.
146
147 """
148 pass
149
150
152 """Execute method which must be overridden to perform the test execution steps.
153
154 @raises NotImplementedError: Raised exeception should the method not be overridden
155 """
156 raise NotImplementedError, "The execute method of the BaseTest class must be implemented in a subclass"
157
158
160 """Validate method which may optionally be overridden to group all validation steps.
161
162 """
163 pass
164
165
167 """Cleanup method which performs cleanup actions after execution and validation of the test.
168
169 The cleanup method performs actions to stop all processes started in the background and not
170 explicitly killed during the test execution. It also stops all process monitors running in
171 seperate threads, and any instances of the manual tester user interface.
172
173 Should a custom cleanup for a subclass be required, use
174 L{addCleanupFunction} instead of overriding this method.
175
176 """
177 try:
178 if self.manualTester and self.manualTester.running():
179 self.stopManualTester()
180
181 for monitor in self.monitorList:
182 if monitor.running(): monitor.stop()
183
184 while len(self.resources) > 0:
185 self.resources.pop()
186 finally:
187 ProcessUser.cleanup(self)
188
189
191 """Add a resource which is owned by the test and is therefore
192 cleaned up (deleted) when the test is cleaned up.
193
194 Deprecated - please use addCleanupFunction instead of this function.
195 """
196 self.resources.append(resource)
197
198
200 """Start a separate thread to log process statistics to logfile, and return a handle to the process monitor.
201
202 This method uses the L{pysys.process.monitor} module to perform logging of the process statistics,
203 starting the monitor as a seperate background thread. Should the request to log the statistics fail
204 a C{BLOCKED} outcome will be added to the test outcome list. All process monitors not explicitly
205 stopped using the returned handle are automatically stopped on completion of the test via the L{cleanup}
206 method of the BaseTest.
207
208 @param process: The process handle returned from the L{startProcess} method
209 @param interval: The interval in seconds between collecting and logging the process statistics
210 @param file: The full path to the filename used for logging the process statistics
211 @param kwargs: Keyword arguments to allow platform specific configurations
212
213 @return: A handle to the process monitor (L{pysys.process.monitor.ProcessMonitor})
214 @rtype: handle
215
216 """
217 monitor = ProcessMonitor(process.pid, interval, file, **kwargs)
218 try:
219 self.log.info("Starting process monitor on process with id = %d", process.pid)
220 monitor.start()
221 except ProcessError, e:
222 self.addOutcome(BLOCKED, 'Unable to start process monitor for %s: %s'%(process, e))
223 else:
224 self.monitorList.append(monitor)
225 return monitor
226
227
229 """Stop a process monitor.
230
231 @param monitor: The process monitor handle returned from the L{startProcessMonitor} method
232
233 """
234 if monitor.running: monitor.stop()
235
236
237
239 """Start the manual tester.
240
241 The manual tester user interface (UI) is used to describe a series of manual steps to be performed
242 to execute and validate a test. Only a single instance of the UI can be running at any given time, and
243 can be run either in the C{FOREGROUND} (method will not return until the UI is closed or the timeout
244 occurs) or in the C{BACKGROUND} (method will return straight away so automated actions may be performed
245 concurrently). Should the UI be terminated due to expiry of the timeout, a C{TIMEDOUT} outcome will be
246 added to the outcome list. The UI can be stopped via the L{stopManualTester} method. An instance of the
247 UI not explicitly stopped within a test will automatically be stopped via the L{cleanup} method of the
248 BaseTest.
249
250 @param file: The name of the manual test xml input file (see L{pysys.xml.manual} for details on the DTD)
251 @param filedir: The directory containing the manual test xml input file (defaults to the output subdirectory)
252 @param state: Start the manual tester either in the C{FOREGROUND} or C{BACKGROUND} (defaults to C{FOREGROUND})
253 @param timeout: The timeout period after which to termintate a manual tester running in the C{FOREGROUND}
254
255 """
256 if filedir is None: filedir = self.input
257
258 if not self.manualTester or self.manualTester.running() == 0:
259 self.manualTester = ManualTester(self, os.path.join(filedir, file))
260 thread.start_new_thread(self.manualTester.start, ())
261
262 if state == FOREGROUND:
263 startTime = time.time()
264 while self.manualTester.running() == 1:
265 currentTime = time.time()
266 if currentTime > startTime + timeout:
267 self.addOutcome(TIMEDOUT, 'Manual tester timed out')
268 self.manualTester.stop()
269 return
270 time.sleep(1)
271 else:
272 time.sleep(1)
273 else:
274 self.addOutcome(BLOCKED, 'Manual tester failed')
275
276
278 """Stop the manual tester if running.
279
280 """
281 if self.manualTester and self.manualTester.running():
282 self.manualTester.stop()
283 time.sleep(1)
284 else:
285 self.addOutcome(BLOCKED, 'Manual tester could not be stopped')
286
287
289 """Wait for the manual tester to be stopped via user interaction.
290
291 """
292 if self.manualTester and self.manualTester.running():
293 startTime = time.time()
294 while self.manualTester.running() == 1:
295 currentTime = time.time()
296 if currentTime > startTime + timeout:
297 self.addOutcome(TIMEDOUT, 'Timed out waiting for manual tester')
298 self.manualTester.stop()
299 return
300 time.sleep(1)
301
302
303
304
305
306 - def wait(self, interval):
307 """Wait for a specified period of time.
308
309 @param interval: The time interval in seconds to wait
310
311 """
312 log.info('Waiting for %0.1f seconds'%interval)
313 time.sleep(interval)
314
315
316
318 """Perform a validation based on a python eval string.
319
320 The eval string should be specified as a format string, with zero or more %s-style
321 arguments. This provides an easy way to check conditions that also produces clear
322 outcome messages.
323
324 e.g. self.assertThat('%d >= 5 or "%s"=="foobar"', myvalue, myothervalue)
325
326 @param conditionstring: A string will have any following args
327 substituted into it and then be evaluated as a boolean python
328 expression. If your args are strings that could contain double-quotes,
329 put single quotes around the %s in the conditionstring, and vice-versa.
330 @param args: Zero or more arguments to be substituted into the format
331 string
332
333 """
334 try:
335 expr = conditionstring
336 if args:
337 expr = expr % args
338
339 result = bool(eval(expr))
340 except Exception, e:
341 self.addOutcome(BLOCKED, 'Failed to evaluate "%s" with args %r: %s'%(conditionstring, args, e))
342 return
343
344 if result:
345 self.addOutcome(PASSED, 'Assertion on %s'%expr)
346 else:
347 self.addOutcome(FAILED, 'Assertion on %s'%expr)
348
349
351 """Perform a validation assert on the supplied expression evaluating to true.
352
353 If the supplied expression evaluates to true a C{PASSED} outcome is added to the
354 outcome list. Should the expression evaluate to false, a C{FAILED} outcome is added.
355
356 @param expr: The expression, as a boolean, to check for the True | False value
357 @param xargs: Variable argument list (see class description for supported parameters)
358
359 """
360 msg = self.__assertMsg(xargs, 'Assertion on boolean expression equal to true')
361 if expr == True:
362 self.addOutcome(PASSED, msg, abortOnError=self.__abortOnError(xargs))
363 else:
364 self.addOutcome(FAILED, msg, abortOnError=self.__abortOnError(xargs))
365
366
368 """Perform a validation assert on the supplied expression evaluating to false.
369
370 If the supplied expression evaluates to false a C{PASSED} outcome is added to the
371 outcome list. Should the expression evaluate to true, a C{FAILED} outcome is added.
372
373 @param expr: The expression to check for the true | false value
374 @param xargs: Variable argument list (see class description for supported parameters)
375
376 """
377 msg = self.__assertMsg(xargs, 'Assertion on boolean expression equal to false')
378 if expr == False:
379 self.addOutcome(PASSED, msg, abortOnError=self.__abortOnError(xargs))
380 else:
381 self.addOutcome(FAILED, msg, abortOnError=self.__abortOnError(xargs))
382
383
384 - def assertDiff(self, file1, file2, filedir1=None, filedir2=None, ignores=[], sort=False, replace=[], includes=[], **xargs):
385 """Perform a validation assert on the comparison of two input text files.
386
387 This method performs a file comparison on two input files. The files are pre-processed prior to the
388 comparison to either ignore particular lines, sort their constituent lines, replace matches to regular
389 expressions in a line with an alternate value, or to only include particular lines. Should the files
390 after pre-processing be equivalent a C{PASSED} outcome is added to the test outcome list, otherwise
391 a C{FAILED} outcome is added.
392
393 @param file1: The basename of the first file used in the file comparison
394 @param file2: The basename of the second file used in the file comparison (often a reference file)
395 @param filedir1: The dirname of the first file (defaults to the testcase output subdirectory)
396 @param filedir2: The dirname of the second file (defaults to the testcase reference directory)
397 @param ignores: A list of regular expressions used to denote lines in the files which should be ignored
398 @param sort: Boolean flag to indicate if the lines in the files should be sorted prior to the comparison
399 @param replace: List of tuples of the form ('regexpr', 'replacement'). For each regular expression in the
400 list, any occurences in the files is replaced with the replacement value prior to the comparison being
401 carried out. This is often useful to replace timestamps in logfiles etc.
402 @param includes: A list of regular expressions used to denote lines in the files which should be used in the
403 comparison. Only lines which match an expression in the list are used for the comparison
404 @param xargs: Variable argument list (see class description for supported parameters)
405
406 """
407 if filedir1 is None: filedir1 = self.output
408 if filedir2 is None: filedir2 = self.reference
409 f1 = os.path.join(filedir1, file1)
410 f2 = os.path.join(filedir2, file2)
411
412 log.debug("Performing file comparison:")
413 log.debug(" file1: %s" % file1)
414 log.debug(" filedir1: %s" % filedir1)
415 log.debug(" file2: %s" % file2)
416 log.debug(" filedir2: %s" % filedir2)
417
418 msg = self.__assertMsg(xargs, 'File comparison between %s and %s'%(file1, file2))
419 unifiedDiffOutput=os.path.join(self.output, os.path.basename(f1)+'.diff')
420 try:
421 result = filediff(f1, f2, ignores, sort, replace, includes, unifiedDiffOutput=unifiedDiffOutput)
422 except:
423 log.warn("caught %s: %s", sys.exc_info()[0], sys.exc_info()[1], exc_info=1)
424 self.addOutcome(BLOCKED, '%s failed due to %s: %s'%(msg, sys.exc_info()[0], sys.exc_info()[1]), abortOnError=self.__abortOnError(xargs))
425 else:
426 self.addOutcome(PASSED if result else FAILED, msg, abortOnError=self.__abortOnError(xargs))
427 if not result:
428 self.logFileContents(unifiedDiffOutput)
429
430
431 - def assertGrep(self, file, filedir=None, expr='', contains=True, ignores=None, literal=False, **xargs):
432 """Perform a validation assert on a regular expression occurring in a text file.
433
434 When the C{contains} input argument is set to true, this method will add a C{PASSED} outcome
435 to the test outcome list if the supplied regular expression is seen in the file; otherwise a
436 C{FAILED} outcome is added. Should C{contains} be set to false, a C{PASSED} outcome will only
437 be added should the regular expression not be seen in the file.
438
439 @param file: The basename of the file used in the grep
440 @param filedir: The dirname of the file (defaults to the testcase output subdirectory)
441 @param expr: The regular expression to check for in the file (or a string literal if literal=True).
442 If the match fails, the matching regex will be reported as the test outcome
443 @param contains: Boolean flag to denote if the expression should or should not be seen in the file
444 @param ignores: Optional list of regular expressions that will be
445 ignored when reading the file.
446 @param literal: By default expr is treated as a regex, but set this to True to pass in
447 a string literal instead
448 @param xargs: Variable argument list (see class description for supported parameters)
449
450 """
451 if filedir is None: filedir = self.output
452 f = os.path.join(filedir, file)
453
454 if literal:
455 def escapeRegex(expr):
456
457 regex = expr
458 expr = ''
459 for c in regex:
460 if c in '\\{}[]+?^$':
461 expr += '\\'+c
462 elif c in '().*/':
463 expr += '['+c+']'
464 else:
465 expr += c
466 return expr
467 expr = escapeRegex(expr)
468
469 log.debug("Performing grep on file:")
470 log.debug(" file: %s" % file)
471 log.debug(" filedir: %s" % filedir)
472 log.debug(" expr: %s" % expr)
473 log.debug(" contains: %s" % LOOKUP[contains])
474 try:
475 result = filegrep(f, expr, ignores=ignores, returnMatch=True)
476 except:
477 log.warn("caught %s: %s", sys.exc_info()[0], sys.exc_info()[1], exc_info=1)
478 msg = self.__assertMsg(xargs, 'Grep on %s %s "%s"'%(file, 'contains' if contains else 'does not contain', expr))
479 self.addOutcome(BLOCKED, '%s failed due to %s: %s'%(msg, sys.exc_info()[0], sys.exc_info()[1]), abortOnError=self.__abortOnError(xargs))
480 else:
481
482
483 outcome = PASSED if (result!=None) == contains else FAILED
484 if outcome == PASSED:
485 msg = self.__assertMsg(xargs, 'Grep on input file %s' % file)
486 else:
487 msg = self.__assertMsg(xargs, 'Grep on %s %s "%s"'%(file, 'contains' if contains else 'does not contain',
488 result.group(0) if result else expr))
489 self.addOutcome(outcome, msg, abortOnError=self.__abortOnError(xargs))
490
491
492 - def assertLastGrep(self, file, filedir=None, expr='', contains=True, ignores=[], includes=[], **xargs):
493 """Perform a validation assert on a regular expression occurring in the last line of a text file.
494
495 When the C{contains} input argument is set to true, this method will add a C{PASSED} outcome
496 to the test outcome list if the supplied regular expression is seen in the file; otherwise a
497 C{FAILED} outcome is added. Should C{contains} be set to false, a C{PASSED} outcome will only
498 be added should the regular expression not be seen in the file.
499
500 @param file: The basename of the file used in the grep
501 @param filedir: The dirname of the file (defaults to the testcase output subdirectory)
502 @param expr: The regular expression to check for in the last line of the file
503 @param contains: Boolean flag to denote if the expression should or should not be seen in the file
504 @param ignores: A list of regular expressions used to denote lines in the file which should be ignored
505 @param includes: A list of regular expressions used to denote lines in the file which should be used in the assertion.
506 @param xargs: Variable argument list (see class description for supported parameters)
507
508 """
509 if filedir is None: filedir = self.output
510 f = os.path.join(filedir, file)
511
512 log.debug("Performing grep on file:")
513 log.debug(" file: %s" % file)
514 log.debug(" filedir: %s" % filedir)
515 log.debug(" expr: %s" % expr)
516 log.debug(" contains: %s" % LOOKUP[contains])
517
518 msg = self.__assertMsg(xargs, 'Grep on last line of %s %s "%s"'%(file, 'contains' if contains else 'not contains', expr))
519 try:
520 result = lastgrep(f, expr, ignores, includes) == contains
521 except:
522 log.warn("caught %s: %s", sys.exc_info()[0], sys.exc_info()[1], exc_info=1)
523 self.addOutcome(BLOCKED, '%s failed due to %s: %s'%(msg, sys.exc_info()[0], sys.exc_info()[1]), abortOnError=self.__abortOnError(xargs))
524 else:
525 if result: msg = self.__assertMsg(xargs, 'Grep on input file %s' % file)
526 self.addOutcome(PASSED if result else FAILED, msg, abortOnError=self.__abortOnError(xargs))
527
528
529 - def assertOrderedGrep(self, file, filedir=None, exprList=[], contains=True, **xargs):
530 """Perform a validation assert on a list of regular expressions occurring in specified order in a text file.
531
532 When the C{contains} input argument is set to true, this method will append a C{PASSED} outcome
533 to the test outcome list if the supplied regular expressions in the C{exprList} are seen in the file
534 in the order they appear in the list; otherwise a C{FAILED} outcome is added. Should C{contains} be set
535 to false, a C{PASSED} outcome will only be added should the regular expressions not be seen in the file in
536 the order they appear in the list.
537
538 @param file: The basename of the file used in the ordered grep
539 @param filedir: The dirname of the file (defaults to the testcase output subdirectory)
540 @param exprList: A list of regular expressions which should occur in the file in the order they appear in the list
541 @param contains: Boolean flag to denote if the expressions should or should not be seen in the file in the order specified
542 @param xargs: Variable argument list (see class description for supported parameters)
543
544 """
545 if filedir is None: filedir = self.output
546 f = os.path.join(filedir, file)
547
548 log.debug("Performing ordered grep on file:")
549 log.debug(" file: %s" % file)
550 log.debug(" filedir: %s" % filedir)
551 for expr in exprList: log.debug(" exprList: %s" % expr)
552 log.debug(" contains: %s" % LOOKUP[contains])
553
554 msg = self.__assertMsg(xargs, 'Ordered grep on input file %s' % file)
555 try:
556 expr = orderedgrep(f, exprList)
557 except:
558 log.warn("caught %s: %s", sys.exc_info()[0], sys.exc_info()[1], exc_info=1)
559 self.addOutcome(BLOCKED, '%s failed due to %s: %s'%(msg, sys.exc_info()[0], sys.exc_info()[1]), abortOnError=self.__abortOnError(xargs))
560 else:
561 if expr is None and contains:
562 result = PASSED
563 elif expr is None and not contains:
564 result = FAILED
565 elif expr is not None and not contains:
566 result = PASSED
567 else:
568 result = FAILED
569
570 self.addOutcome(result, msg, abortOnError=self.__abortOnError(xargs))
571 if result == FAILED: log.warn("Ordered grep failed on expression \"%s\"", expr)
572
573
574 - def assertLineCount(self, file, filedir=None, expr='', condition=">=1", ignores=None, **xargs):
575 """Perform a validation assert on the number of lines in a text file matching a specific regular expression.
576
577 This method will add a C{PASSED} outcome to the outcome list if the number of lines in the
578 input file matching the specified regular expression evaluate to true when evaluated against
579 the supplied condition.
580
581 @param file: The basename of the file used in the line count
582 @param filedir: The dirname of the file (defaults to the testcase output subdirectory)
583 @param expr: The regular expression used to match a line of the input file
584 @param condition: The condition to be met for the number of lines matching the regular expression
585 @param ignores: A list of regular expressions that will cause lines to be excluded from the count
586 @param xargs: Variable argument list (see class description for supported parameters)
587
588 """
589 if filedir is None: filedir = self.output
590 f = os.path.join(filedir, file)
591
592 try:
593 numberLines = linecount(f, expr, ignores=ignores)
594 log.debug("Number of matching lines is %d"%numberLines)
595 except:
596 log.warn("caught %s: %s", sys.exc_info()[0], sys.exc_info()[1], exc_info=1)
597 self.addOutcome(BLOCKED, '%s failed due to %s: %s'%(msg, sys.exc_info()[0], sys.exc_info()[1]), abortOnError=self.__abortOnError(xargs))
598 else:
599 if (eval("%d %s" % (numberLines, condition))):
600 msg = self.__assertMsg(xargs, 'Line count on input file %s' % file)
601 self.addOutcome(PASSED, msg, abortOnError=self.__abortOnError(xargs))
602 else:
603 msg = self.__assertMsg(xargs, 'Line count on %s for "%s"%s (actual =%d) '%(file, expr, condition, numberLines))
604 self.addOutcome(FAILED, msg, abortOnError=self.__abortOnError(xargs))
605
606
608 """Return an assert statement requested to override the default value.
609
610 @param xargs: Variable argument list to an assert method
611 @param default: Default assert statement to return if a parameter is not supplied
612
613 """
614 if xargs.has_key('assertMessage'): return xargs['assertMessage']
615 return default
616
617
619 """Return an assert statement requested to override the default value.
620
621 @param xargs: Variable argument list to an assert method
622
623 """
624 if xargs.has_key('abortOnError'): return xargs['abortOnError']
625 return PROJECT.defaultAbortOnError.lower()=='true' if hasattr(PROJECT, 'defaultAbortOnError') else DEFAULT_ABORT_ON_ERROR
626