class Beaker::TestSuiteResult
Holds the output of a test suite, formats in plain text or xml
Attributes
Public Class Methods
Source
# File lib/beaker/test_suite_result.rb, line 15 def initialize(options, name) @options = options @logger = options[:logger] @name = name @test_cases = [] end
Create a {TestSuiteResult} instance. @param [Hash{Symbol=>String}] options Options
for this object @option options [Logger] :logger The Logger
object to report information to @param [String] name The name of the {TestSuite} that the results are for
Public Instance Methods
Source
# File lib/beaker/test_suite_result.rb, line 24 def add_test_case(test_case) @test_cases << test_case end
Add a {TestCase} to this {TestSuiteResult} instance, used in calculating {TestSuiteResult} data. @param [TestCase] test_case An individual, completed {TestCase} to be included in this set of {TestSuiteResult}.
Source
# File lib/beaker/test_suite_result.rb, line 74 def elapsed_time @test_cases.inject(0.0) { |r, t| r + t.runtime.to_f } end
The sum of all {TestCase} runtimes in this {TestSuiteResult}
Source
# File lib/beaker/test_suite_result.rb, line 39 def errored_tests @test_cases.count { |c| c.test_status == :error } end
How many errored {TestCase} instances are in this {TestSuiteResult}
Source
# File lib/beaker/test_suite_result.rb, line 69 def failed? !success? end
Did one or more {TestCase} instances in this {TestSuiteResult} fail?
Source
# File lib/beaker/test_suite_result.rb, line 44 def failed_tests @test_cases.count { |c| c.test_status == :fail } end
How many failed {TestCase} instances are in this {TestSuiteResult}
Source
# File lib/beaker/test_suite_result.rb, line 34 def passed_tests @test_cases.count { |c| c.test_status == :pass } end
How many passed {TestCase} instances are in this {TestSuiteResult}
Source
# File lib/beaker/test_suite_result.rb, line 54 def pending_tests @test_cases.count { |c| c.test_status == :pending } end
How many pending {TestCase} instances are in this {TestSuiteResult}
Source
# File lib/beaker/test_suite_result.rb, line 152 def persist_test_results(filepath) return if filepath.empty? results = @test_cases.select { |c| %i[fail error].include? c.test_status }.map(&:path) File.open(filepath, 'w') { |file| file.puts JSON.dump(results) } end
Saves failure and error cases as a JSON file for only-failures processing
@param [String] filepath Where to put the results
Source
# File lib/beaker/test_suite_result.rb, line 132 def print_test_result(test_case) if test_case.exception test_file_trace = "" test_case.exception.backtrace.each do |line| if line.include?(test_case.path) test_file_trace = "\r\n Test line: #{line}" break end end if test_case.exception.backtrace && test_case.path test_reported = "reported: #{test_case.exception.inspect}#{test_file_trace}" else test_case.test_status end " Test Case #{test_case.path} #{test_reported}" end
A convenience method for printing the results of a {TestCase} @param [TestCase] test_case The {TestCase} to examine and print results for
Source
# File lib/beaker/test_suite_result.rb, line 49 def skipped_tests @test_cases.count { |c| c.test_status == :skip } end
How many skipped {TestCase} instances are in this {TestSuiteResult}
Source
# File lib/beaker/test_suite_result.rb, line 64 def success? sum_failed == 0 end
Did all the {TestCase} instances in this {TestSuiteResult} pass?
Source
# File lib/beaker/test_suite_result.rb, line 59 def sum_failed failed_tests + errored_tests end
How many {TestCase} instances failed in this {TestSuiteResult}
Source
# File lib/beaker/test_suite_result.rb, line 80 def summarize(summary_logger) summary_logger.notify <<-HEREDOC Test Suite: #{@name} @ #{start_time} - Host Configuration Summary - HEREDOC average_test_time = elapsed_time / test_count summary_logger.notify format(%[ - Test Case Summary for suite '#{@name}' - Total Suite Time: %.2f seconds Average Test Time: %.2f seconds Attempted: #{test_count} Passed: #{passed_tests} Failed: #{failed_tests} Errored: #{errored_tests} Skipped: #{skipped_tests} Pending: #{pending_tests} Total: #{@total_tests} - Specific Test Case Status - ], elapsed_time, average_test_time) grouped_summary = @test_cases.group_by { |test_case| test_case.test_status } summary_logger.notify "Failed Tests Cases:" (grouped_summary[:fail] || []).each do |test_case| summary_logger.notify print_test_result(test_case) end summary_logger.notify "Errored Tests Cases:" (grouped_summary[:error] || []).each do |test_case| summary_logger.notify print_test_result(test_case) end summary_logger.notify "Skipped Tests Cases:" (grouped_summary[:skip] || []).each do |test_case| summary_logger.notify print_test_result(test_case) end summary_logger.notify "Pending Tests Cases:" (grouped_summary[:pending] || []).each do |test_case| summary_logger.notify print_test_result(test_case) end summary_logger.notify("\n\n") end
Plain text summay of test suite @param [Logger] summary_logger The logger we will print the summary to
Source
# File lib/beaker/test_suite_result.rb, line 29 def test_count @test_cases.length end
How many {TestCase} instances are in this {TestSuiteResult}
Source
# File lib/beaker/test_suite_result.rb, line 172 def write_junit_xml(xml_file, file_to_link = nil, time_sort = false) stylesheet = File.join(@options[:project_root], @options[:xml_stylesheet]) begin LoggerJunit.write_xml(xml_file, stylesheet) do |_doc, suites| meta_info = suites.add_element(REXML::Element.new('meta_test_info')) if file_to_link.nil? meta_info.add_attribute('page_active', 'no-links') meta_info.add_attribute('link_url', '') else time_sort ? meta_info.add_attribute('page_active', 'performance') : meta_info.add_attribute('page_active', 'execution') meta_info.add_attribute('link_url', file_to_link) end suite = suites.add_element(REXML::Element.new('testsuite')) suite.add_attributes( [ ['name', @name], ['tests', test_count], ['errors', errored_tests], ['failures', failed_tests], ['skipped', skipped_tests], ['pending', pending_tests], ['total', @total_tests], ['time', format("%f", (stop_time - start_time))], ], ) properties = suite.add_element(REXML::Element.new('properties')) @options.each_pair do |name, value| property = properties.add_element(REXML::Element.new('property')) property.add_attributes([['name', name], ['value', value.to_s || '']]) end test_cases_to_report = @test_cases test_cases_to_report = @test_cases.sort { |x, y| y.runtime <=> x.runtime } if time_sort test_cases_to_report.each do |test| item = suite.add_element(REXML::Element.new('testcase')) item.add_attributes( [ ['classname', File.dirname(test.path)], ['name', File.basename(test.path)], ['time', "%f" % test.runtime], ], ) test.exports.each do |export| export.keys.each do |key| item.add_attribute(key.to_s.tr(" ", "_"), export[key]) end end # Report failures if test.test_status == :fail || test.test_status == :error status = item.add_element(REXML::Element.new('failure')) status.add_attribute('type', test.test_status.to_s) if test.exception status.add_attribute('message', test.exception.to_s.delete("\e")) data = LoggerJunit.format_cdata(test.exception.backtrace.join('\n')) REXML::CData.new(data, true, status) end end if test.test_status == :skip status = item.add_element(REXML::Element.new('skipped')) status.add_attribute('type', test.test_status.to_s) end if test.test_status == :pending status = item.add_element(REXML::Element.new('pending')) status.add_attribute('type', test.test_status.to_s) end if test.sublog stdout = item.add_element(REXML::Element.new('system-out')) data = LoggerJunit.format_cdata(test.sublog) REXML::CData.new(data, true, stdout) end if test.last_result and test.last_result.stderr and not test.last_result.stderr.empty? stderr = item.add_element('system-err') data = LoggerJunit.format_cdata(test.last_result.stderr) REXML::CData.new(data, true, stderr) end end end rescue StandardError => e @logger.error "failure in XML output: \n#{e}" + e.backtrace.join("\n") end end
Writes Junit XML of this {TestSuiteResult}
@param [String] xml_file Path to the XML file (from Beaker’s running directory) @param [String] file_to_link Path to the paired file that should be linked
from this one (this is relative to the XML file itself, so it would just be the different file name if they're in the same directory)
@param [Boolean] time_sort Whether the test results should be output in
order of time spent in the test, or in the order of test execution (default)
@return nil @api private