diff options
-rw-r--r-- | lib/Test.pm | 379 | ||||
-rw-r--r-- | lib/Test/t/fail.t | 11 | ||||
-rw-r--r-- | lib/Test/t/mix.t | 10 | ||||
-rw-r--r-- | lib/Test/t/todo.t | 21 |
4 files changed, 333 insertions, 88 deletions
diff --git a/lib/Test.pm b/lib/Test.pm index d497217ff1..47ed888d79 100644 --- a/lib/Test.pm +++ b/lib/Test.pm @@ -1,12 +1,13 @@ -package Test; require 5.004; +package Test; +# Time-stamp: "2002-08-26 03:09:51 MDT" use strict; use Carp; use vars (qw($VERSION @ISA @EXPORT @EXPORT_OK $ntest $TestLevel), #public-ish - qw($TESTOUT $TESTERR + qw($TESTOUT $TESTERR %Program_Lines $ONFAIL %todo %history $planned @FAILDETAIL) #private-ish ); @@ -20,7 +21,7 @@ sub _reset_globals { $planned = 0; } -$VERSION = '1.20'; +$VERSION = '1.21'; require Exporter; @ISA=('Exporter'); @@ -51,6 +52,9 @@ Test - provides a simple framework for writing test scripts # load your module... use MyModule; + # Helpful notes. All note-lines must start with a "#". + print "# I'm testing MyModule version $MyModule::VERSION\n"; + ok(0); # failure ok(1); # success @@ -64,54 +68,78 @@ Test - provides a simple framework for writing test scripts ok(sub { 1+1 }, 2); # success: '2' eq '2' ok(sub { 1+1 }, 3); # failure: '2' ne '3' - ok(0, int(rand(2)); # (just kidding :-) my @list = (0,0); - ok @list, 3, "\@list=".join(',',@list); #extra diagnostics + ok @list, 3, "\@list=".join(',',@list); #extra notes ok 'segmentation fault', '/(?i)success/'; #regex match - skip($feature_is_missing, ...); #do platform specific test + skip( + $^O eq 'MSWin' ? "Not for MSWin" : 0, # whether to skip + $foo, $bar # arguments just like for ok(...) + ); =head1 DESCRIPTION -B<STOP!> If you are writing a new test, we I<highly suggest> you use -the new Test::Simple and Test::More modules instead. +This module simplifies the task of writing test files for Perl modules, +such that their output is in the format that +L<Test::Harness|Test::Harness> expects to see. -L<Test::Harness|Test::Harness> expects to see particular output when it -executes tests. This module aims to make writing proper test scripts just -a little bit easier (and less error prone :-). +=head1 QUICK START GUIDE +To write a test for your new (and probably not even done) module, create +a new file called F<t/test.t> (in a new F<t> directory). If you have +multiple test files, to test the "foo", "bar", and "baz" feature sets, +then feel free to call your files F<t/foo.t>, F<t/bar.t>, and +F<t/baz.t> =head2 Functions -All the following are exported by Test by default. +This module defines three public functions, C<plan(...)>, C<ok(...)>, +and C<skip(...)>. By default, all three are exported by +the C<use Test;> statement. =over 4 -=item B<plan> +=item C<plan(...)> BEGIN { plan %theplan; } This should be the first thing you call in your test script. It declares your testing plan, how many there will be, if any of them -should be allowed to fail, etc... +should be allowed to fail, and so on. Typical usage is just: use Test; BEGIN { plan tests => 23 } -Things you can put in the plan: +These are the things that you can put in the parameters to plan: + +=over + +=item C<tests =E<gt> I<number>> + +The number of tests in your script. +This means all ok() and skip() calls. + +=item C<todo =E<gt> [I<1,5,14>]> + +A reference to a list of tests which are allowed to fail. +See L</TODO TESTS>. + +=item C<onfail =E<gt> sub { ... }> - tests The number of tests in your script. - This means all ok() and skip() calls. - todo A reference to a list of tests which are allowed - to fail. See L</TODO TESTS>. - onfail A subroutine reference to be run at the end of - the test script should any of the tests fail. - See L</ONFAIL>. +=item C<onfail =E<gt> \&some_sub> -You must call plan() once and only once. +A subroutine reference to be run at the end of the test script, if +any of the tests fail. See L</ONFAIL>. + +=back + +You must call C<plan(...)> once and only once. You should call it +in a C<BEGIN {...}> block, like so: + + BEGIN { plan tests => 23 } =cut @@ -124,6 +152,8 @@ sub plan { _reset_globals(); + _read_program( (caller)[1] ); + my $max=0; for (my $x=0; $x < @_; $x+=2) { my ($k,$v) = @_[$x,$x+1]; @@ -143,11 +173,39 @@ sub plan { print $TESTOUT "1..$max\n"; } ++$planned; + print $TESTOUT "# Running under perl version $] for $^O", + (chr(65) eq 'A') ? "\n" : " in a non-ASCII world\n"; + + print $TESTOUT "# Win32::BuildNumber ", &Win32::BuildNumber(), "\n" + if defined(&Win32::BuildNumber) and defined &Win32::BuildNumber(); + + print $TESTOUT "# MacPerl verison $MacPerl::Version\n" + if defined $MacPerl::Version; + + printf $TESTOUT + "# Current time local: %s\n# Current time GMT: %s\n", + scalar( gmtime($^T)), scalar(localtime($^T)); + + print $TESTOUT "# Using Test.pm version $VERSION\n"; - # Never used. + # Retval never used: return undef; } +sub _read_program { + my($file) = shift; + return unless defined $file and length $file + and -e $file and -f _ and -r _; + open(SOURCEFILE, "<$file") || return; + $Program_Lines{$file} = [<SOURCEFILE>]; + close(SOURCEFILE); + + foreach my $x (@{$Program_Lines{$file}}) + { $x =~ tr/[\cm\cj\n\r]//d } + + unshift @{$Program_Lines{$file}}, ''; + return 1; +} =begin _private @@ -155,8 +213,8 @@ sub plan { my $value = _to_value($input); -Converts an ok parameter to its value. Typically this just means -running it if its a code reference. You should run all inputed +Converts an C<ok> parameter to its value. Typically this just means +running it, if it's a code reference. You should run all inputted values through this. =cut @@ -168,17 +226,22 @@ sub _to_value { =end _private -=item B<ok> +=item C<ok(...)> ok(1 + 1 == 2); ok($have, $expect); ok($have, $expect, $diagnostics); -This is the reason for Test's existance. Its the basic function that -handles printing "ok" or "not ok" along with the current test number. +This function is the reason for C<Test>'s existence. It's +the basic function that +handles printing "C<ok>" or "C<not ok>", along with the +current test number. (That's what C<Test::Harness> wants to see.) + +In its most basic usage, C<ok(...)> simply takes a single scalar +expression. If its value is true, the test passes; if false, +the test fails. Examples: -In its most basic usage, it simply takes an expression. If its true, -the test passes, if false, the test fails. Simp. + # Examples of ok(scalar) ok( 1 + 1 == 2 ); # ok if 1 + 1 == 2 ok( $foo =~ /bar/ ); # ok if $foo contains 'bar' @@ -193,43 +256,78 @@ work: ok( !grep !defined $_, @stuff ); # ok if everything in @stuff is # defined. -A special case is if the expression is a subroutine reference. In +A special case is if the expression is a subroutine reference (in either +C<sub {...}> syntax or C<\&foo> syntax). In that case, it is executed and its value (true or false) determines if -the test passes or fails. +the test passes or fails. For example, -In its two argument form it compares the two values to see if they -equal (with C<eq>). + ok( sub { # See whether sleep works at least passably + my $start_time = time; + sleep 5; + time() - $start_time >= 4 + }); - ok( "this", "that" ); # not ok, 'this' ne 'that' +In its two-argument form, C<ok(I<arg1>,I<arg2>)> compares the two scalar +values to see if they equal. (The equality is checked with C<eq>). -If either is a subroutine reference, that is run and used as a -comparison. + # Example of ok(scalar, scalar) + + ok( "this", "that" ); # not ok, 'this' ne 'that' -Should $expect either be a regex reference (ie. qr//) or a string that -looks like a regex (ie. '/foo/') ok() will perform a pattern match -against it rather than using eq. +If either (or both!) is a subroutine reference, it is run and used +as the value for comparing. For example: + + ok 4, sub { + open(OUT, ">x.dat") || die $!; + print OUT "\x{e000}"; + close OUT; + my $bytecount = -s 'x.dat'; + unlink 'x.dat' or warn "Can't unlink : $!"; + return $bytecount; + }, + ; + +The above test passes two values to C<ok(arg1, arg2)> -- the first is +the number 4, and the second is a coderef. Before C<ok> compares them, +it calls the coderef, and uses its return value as the real value of +this parameter. Assuming that C<$bytecount> returns 4, C<ok> ends up +testing C<4 eq 4>. Since that's true, this test passes. + +If C<arg2> is either a regex object (i.e., C<qr/.../>) or a string +that I<looks like> a regex (e.g., C<'/foo/'>), then +C<ok(I<arg1>,I<arg2>)> will perform a pattern +match against it, instead of using C<eq>. ok( 'JaffO', '/Jaff/' ); # ok, 'JaffO' =~ /Jaff/ ok( 'JaffO', qr/Jaff/ ); # ok, 'JaffO' =~ qr/Jaff/; ok( 'JaffO', '/(?i)jaff/ ); # ok, 'JaffO' =~ /jaff/i; -Finally, an optional set of $diagnostics will be printed should the -test fail. This should usually be some useful information about the -test pertaining to why it failed or perhaps a description of the test. -Or both. +Finally, you can append an optional third argument, in +C<ok(I<arg1>,I<arg2>, I<note>)>, where I<note> is a string value that +will be printed if the test fails. This should be some useful +information about the test, pertaining to why it failed, and/or +a description of the test. For example: ok( grep($_ eq 'something unique', @stuff), 1, "Something that should be unique isn't!\n". '@stuff = '.join ', ', @stuff ); -Unfortunately, a diagnostic cannot be used with the single argument -style of ok(). +Unfortunately, a note cannot be used with the single argument +style of C<ok()>. That is, if you try C<ok(I<arg1>, I<note>)>, then +C<Test> will interpret this as C<ok(I<arg1>, I<arg2>)>, and probably +end up testing C<I<arg1> eq I<arg2>> -- and that's not what you want! -All these special cases can cause some problems. See L</BUGS and CAVEATS>. +All of the above special cases can occasionally cause some +problems. See L</BUGS and CAVEATS>. =cut +# A past maintainer of this module said: +# <<ok(...)'s special handling of subroutine references is an unfortunate +# "feature" that can't be removed due to compatibility.>> +# + sub ok ($;$$) { croak "ok: plan before you test!" if !$planned; @@ -240,6 +338,7 @@ sub ok ($;$$) { my $repetition = ++$history{"$file:$line"}; my $context = ("$file at line $line". ($repetition > 1 ? " fail \#$repetition" : '')); + my $ok=0; my $result = _to_value(shift); my ($expected,$diag,$isregex,$regex); @@ -308,6 +407,19 @@ sub ok ($;$$) { print $TESTERR "# $prefix Expected: $expected ($diag)\n"; } } + + if(defined $Program_Lines{$file}[$line]) { + print $TESTERR + "# $file line $line is: $Program_Lines{$file}[$line]\n" + if + $Program_Lines{$file}[$line] =~ m/[^\s\#\(\)\{\}\[\]\;]/ + # Otherwise it's a pretty uninteresting line! + ; + + undef $Program_Lines{$file}[$line]; + # So we won't repeat it. + } + push @FAILDETAIL, $detail; } } @@ -315,6 +427,83 @@ sub ok ($;$$) { $ok; } +=item C<skip(I<skip_if_true>, I<args...>)> + +This is used for tests that under some conditions can be skipped. It's +basically equivalent to: + + if( $skip_if_true ) { + ok(1); + } else { + ok( args... ); + } + +...except that the C<ok(1)> emits not just "C<ok I<testnum>>" but +actually "C<ok I<testnum> # I<skip_if_true_value>>". + +The arguments after the I<skip_if_true> are what is fed to C<ok(...)> if +this test isn't skipped. + +Example usage: + + my $if_MSWin = + $^O eq 'MSWin' ? 'Skip if under MSWin' : ''; + + # A test to be run EXCEPT under MSWin: + skip($if_MSWin, thing($foo), thing($bar) ); + +Or, going the other way: + + my $unless_MSWin = + $^O eq 'MSWin' ? 'Skip unless under MSWin' : ''; + + # A test to be run EXCEPT under MSWin: + skip($unless_MSWin, thing($foo), thing($bar) ); + +The only tricky thing to remember is that the first parameter is true if +you want to I<skip> the test, not I<run> it; and it also doubles as a +note about why it's being skipped. So in the first codeblock above, read +the code as "skip if MSWin -- (otherwise) test whether C<thing($foo)> is +C<thing($bar)>" or for the second case, "skip unless MSWin...". + +Also, when your I<skip_if_reason> string is true, it really should (for +backwards compatibility with older Test.pm versions) start with the +string "Skip", as shown in the above examples. + +Note that in the above cases, C<thing($foo)> and C<thing($bar)> +I<are> evaluated -- but as long as the C<skip_if_true> is true, +then we C<skip(...)> just tosses out their value (i.e., not +bothering to treat them like values to C<ok(...)>. But if +you need to I<not> eval the arguments when skipping the +test, use +this format: + + skip( $unless_MSWin, + sub { + # This code returns true if the test passes. + # (But it doesn't even get called if the test is skipped.) + thing($foo) eq thing($bar) + } + ); + +or even this, which is basically equivalent: + + skip( $unless_MSWin, + sub { thing($foo) }, sub { thing($bar) } + ); + +That is, both are like this: + + if( $unless_MSWin ) { + ok(1); # but it actually appends "# $unless_MSWin" + # so that Test::Harness can tell it's a skip + } else { + # Not skipping, so actually call and evaluate... + ok( sub { thing($foo) }, sub { thing($bar) } ); + } + +=cut + sub skip ($;$$$) { local($\, $,); # guard against -l and other things that screw with # print @@ -341,7 +530,7 @@ sub skip ($;$$$) { #the documented interface as this has been deprecated. #WARN - local($TestLevel) = $TestLevel+1; #ignore this stack frame + local($TestLevel) = $TestLevel+1; #to ignore this stack frame return &ok(@_); } } @@ -363,27 +552,30 @@ __END__ =item * NORMAL TESTS -These tests are expected to succeed. If they don't something's -screwed up! +These tests are expected to succeed. Usually, most or all of your tests +are in this category. If a normal test doesn't succeed, then that +means that something is I<wrong>. =item * SKIPPED TESTS -Skip is for tests that might or might not be possible to run depending -on the availability of platform specific features. The first argument +The C<skip(...)> function is for tests that might or might not be +possible to run, depending +on the availability of platform-specific features. The first argument should evaluate to true (think "yes, please skip") if the required -feature is not available. After the first argument, skip works -exactly the same way as do normal tests. +feature is I<not> available. After the first argument, C<skip(...)> works +exactly the same way as C<ok(...)> does. =item * TODO TESTS TODO tests are designed for maintaining an B<executable TODO list>. -These tests are expected NOT to succeed. If a TODO test does succeed, -the feature in question should not be on the TODO list, now should it? +These tests are I<expected to fail.> If a TODO test does succeed, +then the feature in question shouldn't be on the TODO list, now +should it? Packages should NOT be released with succeeding TODO tests. As soon -as a TODO test starts working, it should be promoted to a normal test +as a TODO test starts working, it should be promoted to a normal test, and the newly working feature should be documented in the release -notes or change log. +notes or in the change log. =back @@ -391,16 +583,16 @@ notes or change log. BEGIN { plan test => 4, onfail => sub { warn "CALL 911!" } } -While test failures should be enough, extra diagnostics can be +Although test failures should be enough, extra diagnostics can be triggered at the end of a test run. C<onfail> is passed an array ref of hash refs that describe each test failure. Each hash will contain at least the following fields: C<package>, C<repetition>, and C<result>. (The file, line, and test number are not included because their correspondence to a particular test is tenuous.) If the test -had an expected value or a diagnostic string, these will also be +had an expected value or a diagnostic (or "note") string, these will also be included. -The B<optional> C<onfail> hook might be used simply to print out the +The I<optional> C<onfail> hook might be used simply to print out the version of your package and/or how to report problems. It might also be used to generate extremely sophisticated diagnostics for a particularly bizarre test failure. However it's not a panacea. Core @@ -412,10 +604,24 @@ than the code it is testing, yes?) =head1 BUGS and CAVEATS -ok()'s special handling of subroutine references is an unfortunate -"feature" that can't be removed due to compatibility. +=over + +=item * + +C<ok(...)>'s special handing of strings which look like they might be +regexes can also cause unexpected behavior. An innocent: + + ok( $fileglob, '/path/to/some/*stuff/' ); + +will fail, since Test.pm considers the second argument to be a regex! +The best bet is to use the one-argument form: + + ok( $fileglob eq '/path/to/some/*stuff/' ); -ok()'s use of string eq can sometimes cause odd problems when comparing +=item * + +C<ok(...)>'s use of string C<eq> can sometimes cause odd problems +when comparing numbers, especially if you're casting a string to a number: $foo = "1.0"; @@ -425,27 +631,36 @@ Your best bet is to use the single argument form: ok( $foo == 1 ); # ok "1.0" == 1 -ok()'s special handing of strings which look like they might be -regexes can also cause unexpected behavior. An innocent: - - ok( $fileglob, '/path/to/some/*stuff/' ); +=item * -will fail since Test.pm considers the second argument to a regex. -Again, best bet is to use the single argument form: - - ok( $fileglob eq '/path/to/some/*stuff/' ); +As you may have inferred from the above documentation and examples, +C<ok>'s prototype is C<($;$$)> (and, incidentally, C<skip>'s is +C<($;$$$)>). This means, for example, that you can do C<ok @foo, @bar> +to compare the I<size> of the two arrays. But don't be fooled into +thinking that C<ok @foo, @bar> means a comparison of the contents of two +arrays -- you're comparing I<just> the number of elements of each. It's +so easy to make that mistake in reading C<ok @foo, @bar> that you might +want to be very explicit about it, and instead write C<ok scalar(@foo), +scalar(@bar)>. +=back =head1 NOTE -This module is no longer actively being developed, only bug fixes and -small tweaks (I'll still accept patches). If you desire additional -functionality, consider L<Test::More> or L<Test::Unit>. +A past developer of this module once said that it was no longer being +actively developed. However, rumors of its demise were greatly +exaggerated. Feedback and suggestions are quite welcome. + +Be aware that the main value of this module is its simplicity. Note +that there are already more ambitious modules out there, such as +L<Test::More> and L<Test::Unit>. =head1 SEE ALSO -L<Test::Simple>, L<Test::More>, L<Test::Harness>, L<Devel::Cover> +L<Test::Harness> + +L<Test::Simple>, L<Test::More>, L<Devel::Cover> L<Test::Builder> for building your own testing library. @@ -457,12 +672,18 @@ L<Test::Inline> and L<SelfTest> let you embed tests in code. =head1 AUTHOR Copyright (c) 1998-2000 Joshua Nathaniel Pritikin. All rights reserved. -Copyright (c) 2001-2002 Michael G Schwern. -Current maintainer, Michael G Schwern <schwern@pobox.com> +Copyright (c) 2001-2002 Michael G. Schwern. + +Copyright (c) 2002 Sean M. Burke. + +Current maintainer: Sean M. Burke. E<lt>sburke@cpan.orgE<gt> This package is free software and is provided "as is" without express or implied warranty. It may be used, redistributed and/or modified under the same terms as Perl itself. =cut + +# "Your mistake was a hidden intention." +# -- /Oblique Strategies/, Brian Eno and Peter Schmidt diff --git a/lib/Test/t/fail.t b/lib/Test/t/fail.t index ce37464d04..9051a1f481 100644 --- a/lib/Test/t/fail.t +++ b/lib/Test/t/fail.t @@ -51,8 +51,17 @@ shift @got; $Expect =~ s/\n+$//; my @expect = split /\n\n/, $Expect; + +sub commentless { + my $in = $_[0]; + $in =~ s/^#[^\n]*\n//mg; + $in =~ s/\n#[^\n]*$//mg; + return $in; +} + + for (my $x=0; $x < @got; $x++) { - ok $got[$x], $expect[$x]."\n"; + ok commentless($got[$x]), commentless($expect[$x]."\n"); } diff --git a/lib/Test/t/mix.t b/lib/Test/t/mix.t index a746ba66b5..5298338318 100644 --- a/lib/Test/t/mix.t +++ b/lib/Test/t/mix.t @@ -46,5 +46,13 @@ ok 4 # skip EXPECT +sub commentless { + my $in = $_[0]; + $in =~ s/^#[^\n]*\n//mg; + $in =~ s/\n#[^\n]*$//mg; + return $in; +} + + print "1..1\n"; -ok( $out, $expect ); +ok( commentless($out), commentless($expect) ); diff --git a/lib/Test/t/todo.t b/lib/Test/t/todo.t index 2f179e4547..74f9aefb8b 100644 --- a/lib/Test/t/todo.t +++ b/lib/Test/t/todo.t @@ -9,11 +9,11 @@ use Test qw(:DEFAULT $TESTOUT $TESTERR $ntest); open F, ">todo"; $TESTOUT = *F{IO}; $TESTERR = *F{IO}; - my $tests = 5; plan tests => $tests, todo => [2..$tests]; -# line 11 + +# tests to go to the output file ok(1); ok(1); ok(0,1); @@ -33,16 +33,23 @@ unlink "todo"; my $expect = <<"EXPECT"; 1..5 todo 2 3 4 5; ok 1 -ok 2 # ($0 at line 12 TODO?!) +ok 2 # ($0 at line 18 TODO?!) not ok 3 -# Test 3 got: '0' ($0 at line 13 *TODO*) +# Test 3 got: '0' ($0 at line 19 *TODO*) # Expected: '1' not ok 4 -# Test 4 got: '0' ($0 at line 14 *TODO*) +# Test 4 got: '0' ($0 at line 20 *TODO*) # Expected: '1' (need more tuits) -ok 5 # ($0 at line 15 TODO?!) +ok 5 # ($0 at line 21 TODO?!) EXPECT +sub commentless { + my $in = $_[0]; + $in =~ s/^#[^\n]*\n//mg; + $in =~ s/\n#[^\n]*$//mg; + return $in; +} + print "1..1\n"; -ok( $out, $expect ); +ok( commentless($out), commentless($expect) ); |