summaryrefslogtreecommitdiff
path: root/Lib/test/test_robotparser.py
diff options
context:
space:
mode:
Diffstat (limited to 'Lib/test/test_robotparser.py')
-rw-r--r--Lib/test/test_robotparser.py90
1 files changed, 69 insertions, 21 deletions
diff --git a/Lib/test/test_robotparser.py b/Lib/test/test_robotparser.py
index d01266f330..90b30722da 100644
--- a/Lib/test/test_robotparser.py
+++ b/Lib/test/test_robotparser.py
@@ -1,6 +1,7 @@
import io
import unittest
import urllib.robotparser
+from collections import namedtuple
from urllib.error import URLError, HTTPError
from urllib.request import urlopen
from test import support
@@ -12,7 +13,8 @@ except ImportError:
class RobotTestCase(unittest.TestCase):
- def __init__(self, index=None, parser=None, url=None, good=None, agent=None):
+ def __init__(self, index=None, parser=None, url=None, good=None,
+ agent=None, request_rate=None, crawl_delay=None):
# workaround to make unittest discovery work (see #17066)
if not isinstance(index, int):
return
@@ -25,6 +27,8 @@ class RobotTestCase(unittest.TestCase):
self.url = url
self.good = good
self.agent = agent
+ self.request_rate = request_rate
+ self.crawl_delay = crawl_delay
def runTest(self):
if isinstance(self.url, tuple):
@@ -34,6 +38,18 @@ class RobotTestCase(unittest.TestCase):
agent = self.agent
if self.good:
self.assertTrue(self.parser.can_fetch(agent, url))
+ self.assertEqual(self.parser.crawl_delay(agent), self.crawl_delay)
+ # if we have actual values for request rate
+ if self.request_rate and self.parser.request_rate(agent):
+ self.assertEqual(
+ self.parser.request_rate(agent).requests,
+ self.request_rate.requests
+ )
+ self.assertEqual(
+ self.parser.request_rate(agent).seconds,
+ self.request_rate.seconds
+ )
+ self.assertEqual(self.parser.request_rate(agent), self.request_rate)
else:
self.assertFalse(self.parser.can_fetch(agent, url))
@@ -43,15 +59,17 @@ class RobotTestCase(unittest.TestCase):
tests = unittest.TestSuite()
def RobotTest(index, robots_txt, good_urls, bad_urls,
- agent="test_robotparser"):
+ request_rate, crawl_delay, agent="test_robotparser"):
lines = io.StringIO(robots_txt).readlines()
parser = urllib.robotparser.RobotFileParser()
parser.parse(lines)
for url in good_urls:
- tests.addTest(RobotTestCase(index, parser, url, 1, agent))
+ tests.addTest(RobotTestCase(index, parser, url, 1, agent,
+ request_rate, crawl_delay))
for url in bad_urls:
- tests.addTest(RobotTestCase(index, parser, url, 0, agent))
+ tests.addTest(RobotTestCase(index, parser, url, 0, agent,
+ request_rate, crawl_delay))
# Examples from http://www.robotstxt.org/wc/norobots.html (fetched 2002)
@@ -65,14 +83,18 @@ Disallow: /foo.html
good = ['/','/test.html']
bad = ['/cyberworld/map/index.html','/tmp/xxx','/foo.html']
+request_rate = None
+crawl_delay = None
-RobotTest(1, doc, good, bad)
+RobotTest(1, doc, good, bad, request_rate, crawl_delay)
# 2.
doc = """
# robots.txt for http://www.example.com/
User-agent: *
+Crawl-delay: 1
+Request-rate: 3/15
Disallow: /cyberworld/map/ # This is an infinite virtual URL space
# Cybermapper knows where to go.
@@ -83,8 +105,10 @@ Disallow:
good = ['/','/test.html',('cybermapper','/cyberworld/map/index.html')]
bad = ['/cyberworld/map/index.html']
+request_rate = None # The parameters should be equal to None since they
+crawl_delay = None # don't apply to the cybermapper user agent
-RobotTest(2, doc, good, bad)
+RobotTest(2, doc, good, bad, request_rate, crawl_delay)
# 3.
doc = """
@@ -95,14 +119,18 @@ Disallow: /
good = []
bad = ['/cyberworld/map/index.html','/','/tmp/']
+request_rate = None
+crawl_delay = None
-RobotTest(3, doc, good, bad)
+RobotTest(3, doc, good, bad, request_rate, crawl_delay)
# Examples from http://www.robotstxt.org/wc/norobots-rfc.html (fetched 2002)
# 4.
doc = """
User-agent: figtree
+Crawl-delay: 3
+Request-rate: 9/30
Disallow: /tmp
Disallow: /a%3cd.html
Disallow: /a%2fb.html
@@ -115,8 +143,17 @@ bad = ['/tmp','/tmp.html','/tmp/a.html',
'/~joe/index.html'
]
-RobotTest(4, doc, good, bad, 'figtree')
-RobotTest(5, doc, good, bad, 'FigTree Robot libwww-perl/5.04')
+request_rate = namedtuple('req_rate', 'requests seconds')
+request_rate.requests = 9
+request_rate.seconds = 30
+crawl_delay = 3
+request_rate_bad = None # not actually tested, but we still need to parse it
+crawl_delay_bad = None # in order to accommodate the input parameters
+
+
+RobotTest(4, doc, good, bad, request_rate, crawl_delay, 'figtree' )
+RobotTest(5, doc, good, bad, request_rate_bad, crawl_delay_bad,
+ 'FigTree Robot libwww-perl/5.04')
# 6.
doc = """
@@ -125,14 +162,18 @@ Disallow: /tmp/
Disallow: /a%3Cd.html
Disallow: /a/b.html
Disallow: /%7ejoe/index.html
+Crawl-delay: 3
+Request-rate: 9/banana
"""
good = ['/tmp',] # XFAIL: '/a%2fb.html'
bad = ['/tmp/','/tmp/a.html',
'/a%3cd.html','/a%3Cd.html',"/a/b.html",
'/%7Ejoe/index.html']
+crawl_delay = 3
+request_rate = None # since request rate has invalid syntax, return None
-RobotTest(6, doc, good, bad)
+RobotTest(6, doc, good, bad, None, None)
# From bug report #523041
@@ -140,12 +181,16 @@ RobotTest(6, doc, good, bad)
doc = """
User-Agent: *
Disallow: /.
+Crawl-delay: pears
"""
good = ['/foo.html']
-bad = [] # Bug report says "/" should be denied, but that is not in the RFC
+bad = [] # bug report says "/" should be denied, but that is not in the RFC
+
+crawl_delay = None # since crawl delay has invalid syntax, return None
+request_rate = None
-RobotTest(7, doc, good, bad)
+RobotTest(7, doc, good, bad, crawl_delay, request_rate)
# From Google: http://www.google.com/support/webmasters/bin/answer.py?hl=en&answer=40364
@@ -154,12 +199,15 @@ doc = """
User-agent: Googlebot
Allow: /folder1/myfile.html
Disallow: /folder1/
+Request-rate: whale/banana
"""
good = ['/folder1/myfile.html']
bad = ['/folder1/anotherfile.html']
+crawl_delay = None
+request_rate = None # invalid syntax, return none
-RobotTest(8, doc, good, bad, agent="Googlebot")
+RobotTest(8, doc, good, bad, crawl_delay, request_rate, agent="Googlebot")
# 9. This file is incorrect because "Googlebot" is a substring of
# "Googlebot-Mobile", so test 10 works just like test 9.
@@ -174,12 +222,12 @@ Allow: /
good = []
bad = ['/something.jpg']
-RobotTest(9, doc, good, bad, agent="Googlebot")
+RobotTest(9, doc, good, bad, None, None, agent="Googlebot")
good = []
bad = ['/something.jpg']
-RobotTest(10, doc, good, bad, agent="Googlebot-Mobile")
+RobotTest(10, doc, good, bad, None, None, agent="Googlebot-Mobile")
# 11. Get the order correct.
doc = """
@@ -193,12 +241,12 @@ Disallow: /
good = []
bad = ['/something.jpg']
-RobotTest(11, doc, good, bad, agent="Googlebot")
+RobotTest(11, doc, good, bad, None, None, agent="Googlebot")
good = ['/something.jpg']
bad = []
-RobotTest(12, doc, good, bad, agent="Googlebot-Mobile")
+RobotTest(12, doc, good, bad, None, None, agent="Googlebot-Mobile")
# 13. Google also got the order wrong in #8. You need to specify the
@@ -212,7 +260,7 @@ Disallow: /folder1/
good = ['/folder1/myfile.html']
bad = ['/folder1/anotherfile.html']
-RobotTest(13, doc, good, bad, agent="googlebot")
+RobotTest(13, doc, good, bad, None, None, agent="googlebot")
# 14. For issue #6325 (query string support)
@@ -224,7 +272,7 @@ Disallow: /some/path?name=value
good = ['/some/path']
bad = ['/some/path?name=value']
-RobotTest(14, doc, good, bad)
+RobotTest(14, doc, good, bad, None, None)
# 15. For issue #4108 (obey first * entry)
doc = """
@@ -238,7 +286,7 @@ Disallow: /another/path
good = ['/another/path']
bad = ['/some/path']
-RobotTest(15, doc, good, bad)
+RobotTest(15, doc, good, bad, None, None)
# 16. Empty query (issue #17403). Normalizing the url first.
doc = """
@@ -250,7 +298,7 @@ Disallow: /another/path?
good = ['/some/path?']
bad = ['/another/path?']
-RobotTest(16, doc, good, bad)
+RobotTest(16, doc, good, bad, None, None)
class RobotHandler(BaseHTTPRequestHandler):