1997-01-30 02:44:48 +00:00
|
|
|
#! /usr/bin/env python
|
|
|
|
|
|
|
|
"""Web tree checker.
|
|
|
|
|
|
|
|
This utility is handy to check a subweb of the world-wide web for
|
|
|
|
errors. A subweb is specified by giving one or more ``root URLs''; a
|
|
|
|
page belongs to the subweb if one of the root URLs is an initial
|
|
|
|
prefix of it.
|
|
|
|
|
|
|
|
File URL extension:
|
|
|
|
|
|
|
|
In order to easy the checking of subwebs via the local file system,
|
|
|
|
the interpretation of ``file:'' URLs is extended to mimic the behavior
|
|
|
|
of your average HTTP daemon: if a directory pathname is given, the
|
|
|
|
file index.html in that directory is returned if it exists, otherwise
|
|
|
|
a directory listing is returned. Now, you can point webchecker to the
|
|
|
|
document tree in the local file system of your HTTP daemon, and have
|
|
|
|
most of it checked. In fact the default works this way if your local
|
|
|
|
web tree is located at /usr/local/etc/httpd/htdpcs (the default for
|
|
|
|
the NCSA HTTP daemon and probably others).
|
|
|
|
|
1997-02-02 23:30:32 +00:00
|
|
|
Report printed:
|
1997-01-30 02:44:48 +00:00
|
|
|
|
1997-02-02 23:30:32 +00:00
|
|
|
When done, it reports pages with bad links within the subweb. When
|
|
|
|
interrupted, it reports for the pages that it has checked already.
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
In verbose mode, additional messages are printed during the
|
|
|
|
information gathering phase. By default, it prints a summary of its
|
|
|
|
work status every 50 URLs (adjustable with the -r option), and it
|
|
|
|
reports errors as they are encountered. Use the -q option to disable
|
|
|
|
this output.
|
|
|
|
|
|
|
|
Checkpoint feature:
|
|
|
|
|
|
|
|
Whether interrupted or not, it dumps its state (a Python pickle) to a
|
|
|
|
checkpoint file and the -R option allows it to restart from the
|
|
|
|
checkpoint (assuming that the pages on the subweb that were already
|
|
|
|
processed haven't changed). Even when it has run till completion, -R
|
|
|
|
can still be useful -- it will print the reports again, and -Rq prints
|
|
|
|
the errors only. In this case, the checkpoint file is not written
|
|
|
|
again. The checkpoint file can be set with the -d option.
|
|
|
|
|
|
|
|
The checkpoint file is written as a Python pickle. Remember that
|
|
|
|
Python's pickle module is currently quite slow. Give it the time it
|
|
|
|
needs to load and save the checkpoint file. When interrupted while
|
|
|
|
writing the checkpoint file, the old checkpoint file is not
|
|
|
|
overwritten, but all work done in the current run is lost.
|
|
|
|
|
|
|
|
Miscellaneous:
|
|
|
|
|
1997-02-02 23:30:32 +00:00
|
|
|
- You may find the (Tk-based) GUI version easier to use. See wcgui.py.
|
|
|
|
|
1997-01-30 03:19:41 +00:00
|
|
|
- Webchecker honors the "robots.txt" convention. Thanks to Skip
|
|
|
|
Montanaro for his robotparser.py module (included in this directory)!
|
|
|
|
The agent name is hardwired to "webchecker". URLs that are disallowed
|
|
|
|
by the robots.txt file are reported as external URLs.
|
|
|
|
|
1997-02-02 23:30:32 +00:00
|
|
|
- Because the SGML parser is a bit slow, very large SGML files are
|
1997-01-30 03:19:41 +00:00
|
|
|
skipped. The size limit can be set with the -m option.
|
1997-01-30 02:44:48 +00:00
|
|
|
|
1997-02-02 23:30:32 +00:00
|
|
|
- When the server or protocol does not tell us a file's type, we guess
|
|
|
|
it based on the URL's suffix. The mimetypes.py module (also in this
|
|
|
|
directory) has a built-in table mapping most currently known suffixes,
|
|
|
|
and in addition attempts to read the mime.types configuration files in
|
|
|
|
the default locations of Netscape and the NCSA HTTP daemon.
|
1997-01-30 02:44:48 +00:00
|
|
|
|
1997-02-02 23:30:32 +00:00
|
|
|
- We follows links indicated by <A>, <FRAME> and <IMG> tags. We also
|
|
|
|
honor the <BASE> tag.
|
1997-01-30 02:44:48 +00:00
|
|
|
|
1997-02-02 23:30:32 +00:00
|
|
|
- Checking external links is now done by default; use -x to *disable*
|
|
|
|
this feature. External links are now checked during normal
|
|
|
|
processing. (XXX The status of a checked link could be categorized
|
|
|
|
better. Later...)
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
Usage: webchecker.py [option] ... [rooturl] ...
|
|
|
|
|
|
|
|
Options:
|
|
|
|
|
|
|
|
-R -- restart from checkpoint file
|
|
|
|
-d file -- checkpoint filename (default %(DUMPFILE)s)
|
|
|
|
-m bytes -- skip HTML pages larger than this size (default %(MAXPAGE)d)
|
1997-01-31 14:43:15 +00:00
|
|
|
-n -- reports only, no checking (use with -R)
|
1997-01-30 02:44:48 +00:00
|
|
|
-q -- quiet operation (also suppresses external links report)
|
|
|
|
-r number -- number of links processed per round (default %(ROUNDSIZE)d)
|
|
|
|
-v -- verbose operation; repeating -v will increase verbosity
|
1997-02-02 23:30:32 +00:00
|
|
|
-x -- don't check external links (these are often slow to check)
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
Arguments:
|
|
|
|
|
|
|
|
rooturl -- URL to start checking
|
|
|
|
(default %(DEFROOT)s)
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
1997-01-31 14:43:15 +00:00
|
|
|
|
1998-02-21 20:02:09 +00:00
|
|
|
__version__ = "$Revision$"
|
1997-01-30 03:30:20 +00:00
|
|
|
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
import sys
|
|
|
|
import os
|
|
|
|
from types import *
|
|
|
|
import string
|
|
|
|
import StringIO
|
|
|
|
import getopt
|
|
|
|
import pickle
|
|
|
|
|
|
|
|
import urllib
|
|
|
|
import urlparse
|
1997-01-31 14:43:15 +00:00
|
|
|
import sgmllib
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
import mimetypes
|
1997-01-30 03:19:41 +00:00
|
|
|
import robotparser
|
1997-01-30 02:44:48 +00:00
|
|
|
|
1998-02-21 20:02:09 +00:00
|
|
|
# Extract real version number if necessary
|
|
|
|
if __version__[0] == '$':
|
|
|
|
_v = string.split(__version__)
|
|
|
|
if len(_v) == 3:
|
|
|
|
__version__ = _v[1]
|
|
|
|
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
# Tunable parameters
|
|
|
|
DEFROOT = "file:/usr/local/etc/httpd/htdocs/" # Default root URL
|
1998-02-21 20:02:09 +00:00
|
|
|
CHECKEXT = 1 # Check external references (1 deep)
|
|
|
|
VERBOSE = 1 # Verbosity level (0-3)
|
1997-01-31 18:57:23 +00:00
|
|
|
MAXPAGE = 150000 # Ignore files bigger than this
|
1997-01-30 02:44:48 +00:00
|
|
|
ROUNDSIZE = 50 # Number of links processed per round
|
|
|
|
DUMPFILE = "@webchecker.pickle" # Pickled checkpoint
|
1997-01-30 03:19:41 +00:00
|
|
|
AGENTNAME = "webchecker" # Agent name for robots.txt parser
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
# Global variables
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
1998-02-21 20:02:09 +00:00
|
|
|
checkext = CHECKEXT
|
|
|
|
verbose = VERBOSE
|
|
|
|
maxpage = MAXPAGE
|
|
|
|
roundsize = ROUNDSIZE
|
1997-01-30 02:44:48 +00:00
|
|
|
dumpfile = DUMPFILE
|
|
|
|
restart = 0
|
1997-01-31 14:43:15 +00:00
|
|
|
norun = 0
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
try:
|
1997-01-31 14:43:15 +00:00
|
|
|
opts, args = getopt.getopt(sys.argv[1:], 'Rd:m:nqr:vx')
|
1997-01-30 02:44:48 +00:00
|
|
|
except getopt.error, msg:
|
|
|
|
sys.stdout = sys.stderr
|
|
|
|
print msg
|
1997-01-31 18:57:23 +00:00
|
|
|
print __doc__%globals()
|
1997-01-30 02:44:48 +00:00
|
|
|
sys.exit(2)
|
|
|
|
for o, a in opts:
|
|
|
|
if o == '-R':
|
|
|
|
restart = 1
|
|
|
|
if o == '-d':
|
|
|
|
dumpfile = a
|
|
|
|
if o == '-m':
|
|
|
|
maxpage = string.atoi(a)
|
1997-01-31 14:43:15 +00:00
|
|
|
if o == '-n':
|
|
|
|
norun = 1
|
1997-01-30 02:44:48 +00:00
|
|
|
if o == '-q':
|
|
|
|
verbose = 0
|
|
|
|
if o == '-r':
|
|
|
|
roundsize = string.atoi(a)
|
|
|
|
if o == '-v':
|
|
|
|
verbose = verbose + 1
|
1997-01-30 03:58:21 +00:00
|
|
|
if o == '-x':
|
1997-02-02 23:30:32 +00:00
|
|
|
checkext = not checkext
|
1997-01-30 02:44:48 +00:00
|
|
|
|
1997-01-31 14:43:15 +00:00
|
|
|
if verbose > 0:
|
1997-01-30 03:30:20 +00:00
|
|
|
print AGENTNAME, "version", __version__
|
|
|
|
|
1997-01-30 02:44:48 +00:00
|
|
|
if restart:
|
1998-02-21 20:02:09 +00:00
|
|
|
c = load_pickle(dumpfile=dumpfile, verbose=verbose)
|
1997-01-30 02:44:48 +00:00
|
|
|
else:
|
1998-02-21 20:02:09 +00:00
|
|
|
c = Checker()
|
|
|
|
|
|
|
|
c.setflags(checkext=checkext, verbose=verbose,
|
|
|
|
maxpage=maxpage, roundsize=roundsize)
|
|
|
|
|
|
|
|
if not restart and not args:
|
|
|
|
args.append(DEFROOT)
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
for arg in args:
|
|
|
|
c.addroot(arg)
|
|
|
|
|
1997-01-31 14:43:15 +00:00
|
|
|
if not norun:
|
|
|
|
try:
|
|
|
|
c.run()
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
if verbose > 0:
|
|
|
|
print "[run interrupted]"
|
|
|
|
|
1997-01-30 03:58:21 +00:00
|
|
|
try:
|
1997-02-02 23:30:32 +00:00
|
|
|
c.report()
|
1997-01-30 03:58:21 +00:00
|
|
|
except KeyboardInterrupt:
|
|
|
|
if verbose > 0:
|
|
|
|
print "[report interrupted]"
|
1997-01-31 14:43:15 +00:00
|
|
|
|
1998-02-21 20:02:09 +00:00
|
|
|
if c.save_pickle(dumpfile):
|
|
|
|
if dumpfile == DUMPFILE:
|
|
|
|
print "Use ``%s -R'' to restart." % sys.argv[0]
|
|
|
|
else:
|
|
|
|
print "Use ``%s -R -d %s'' to restart." % (sys.argv[0], dumpfile)
|
|
|
|
|
|
|
|
|
|
|
|
def load_pickle(dumpfile=DUMPFILE, verbose=VERBOSE):
|
|
|
|
if verbose > 0:
|
|
|
|
print "Loading checkpoint from %s ..." % dumpfile
|
|
|
|
f = open(dumpfile, "rb")
|
|
|
|
c = pickle.load(f)
|
|
|
|
f.close()
|
|
|
|
if verbose > 0:
|
|
|
|
print "Done."
|
|
|
|
print "Root:", string.join(c.roots, "\n ")
|
|
|
|
return c
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
class Checker:
|
|
|
|
|
1998-02-21 20:02:09 +00:00
|
|
|
checkext = CHECKEXT
|
|
|
|
verbose = VERBOSE
|
|
|
|
maxpage = MAXPAGE
|
|
|
|
roundsize = ROUNDSIZE
|
|
|
|
|
|
|
|
validflags = tuple(dir())
|
|
|
|
|
|
|
|
def __init__(self):
|
1997-02-02 23:30:32 +00:00
|
|
|
self.reset()
|
1998-02-21 20:02:09 +00:00
|
|
|
|
|
|
|
def setflags(self, **kw):
|
|
|
|
for key in kw.keys():
|
|
|
|
if key not in self.validflags:
|
|
|
|
raise NameError, "invalid keyword argument: %s" % str(key)
|
|
|
|
for key, value in kw.items():
|
|
|
|
setattr(self, key, value)
|
1997-02-02 23:30:32 +00:00
|
|
|
|
|
|
|
def reset(self):
|
1997-01-30 02:44:48 +00:00
|
|
|
self.roots = []
|
|
|
|
self.todo = {}
|
|
|
|
self.done = {}
|
|
|
|
self.bad = {}
|
|
|
|
self.round = 0
|
1997-01-31 14:43:15 +00:00
|
|
|
# The following are not pickled:
|
1997-01-30 03:19:41 +00:00
|
|
|
self.robots = {}
|
1997-02-02 23:30:32 +00:00
|
|
|
self.errors = {}
|
1997-01-31 14:43:15 +00:00
|
|
|
self.urlopener = MyURLopener()
|
|
|
|
self.changed = 0
|
1997-01-30 03:19:41 +00:00
|
|
|
|
|
|
|
def __getstate__(self):
|
1997-02-02 23:30:32 +00:00
|
|
|
return (self.roots, self.todo, self.done, self.bad, self.round)
|
1997-01-30 03:19:41 +00:00
|
|
|
|
|
|
|
def __setstate__(self, state):
|
1998-02-21 20:02:09 +00:00
|
|
|
self.reset()
|
1997-02-02 23:30:32 +00:00
|
|
|
(self.roots, self.todo, self.done, self.bad, self.round) = state
|
1997-01-30 03:19:41 +00:00
|
|
|
for root in self.roots:
|
|
|
|
self.addrobot(root)
|
1997-02-02 23:30:32 +00:00
|
|
|
for url in self.bad.keys():
|
|
|
|
self.markerror(url)
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
def addroot(self, root):
|
|
|
|
if root not in self.roots:
|
1997-10-06 18:54:01 +00:00
|
|
|
troot = root
|
|
|
|
scheme, netloc, path, params, query, fragment = \
|
|
|
|
urlparse.urlparse(root)
|
|
|
|
i = string.rfind(path, "/") + 1
|
|
|
|
if 0 < i < len(path):
|
|
|
|
path = path[:i]
|
|
|
|
troot = urlparse.urlunparse((scheme, netloc, path,
|
|
|
|
params, query, fragment))
|
|
|
|
self.roots.append(troot)
|
1997-01-30 03:19:41 +00:00
|
|
|
self.addrobot(root)
|
1997-02-02 23:30:32 +00:00
|
|
|
self.newlink(root, ("<root>", root))
|
1997-01-30 03:19:41 +00:00
|
|
|
|
|
|
|
def addrobot(self, root):
|
1997-10-06 18:54:01 +00:00
|
|
|
root = urlparse.urljoin(root, "/")
|
|
|
|
if self.robots.has_key(root): return
|
1997-01-30 03:30:20 +00:00
|
|
|
url = urlparse.urljoin(root, "/robots.txt")
|
1997-01-30 03:19:41 +00:00
|
|
|
self.robots[root] = rp = robotparser.RobotFileParser()
|
1998-02-21 20:02:09 +00:00
|
|
|
if self.verbose > 2:
|
1997-01-30 03:30:20 +00:00
|
|
|
print "Parsing", url
|
1998-02-21 20:02:09 +00:00
|
|
|
rp.debug = self.verbose > 3
|
1997-01-30 03:19:41 +00:00
|
|
|
rp.set_url(url)
|
1997-01-30 03:30:20 +00:00
|
|
|
try:
|
|
|
|
rp.read()
|
|
|
|
except IOError, msg:
|
1998-02-21 20:02:09 +00:00
|
|
|
if self.verbose > 1:
|
1997-01-30 03:30:20 +00:00
|
|
|
print "I/O error parsing", url, ":", msg
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
def run(self):
|
|
|
|
while self.todo:
|
|
|
|
self.round = self.round + 1
|
1998-02-21 20:02:09 +00:00
|
|
|
if self.verbose > 0:
|
1997-01-30 02:44:48 +00:00
|
|
|
print
|
1997-02-02 23:30:32 +00:00
|
|
|
print "Round %d (%s)" % (self.round, self.status())
|
1997-01-31 14:43:15 +00:00
|
|
|
print
|
1998-02-21 20:02:09 +00:00
|
|
|
urls = self.todo.keys()[:self.roundsize]
|
1997-01-30 02:44:48 +00:00
|
|
|
for url in urls:
|
|
|
|
self.dopage(url)
|
1997-01-31 14:43:15 +00:00
|
|
|
|
|
|
|
def status(self):
|
1997-02-02 23:30:32 +00:00
|
|
|
return "%d total, %d to do, %d done, %d bad" % (
|
1997-01-31 14:43:15 +00:00
|
|
|
len(self.todo)+len(self.done),
|
|
|
|
len(self.todo), len(self.done),
|
1997-02-02 23:30:32 +00:00
|
|
|
len(self.bad))
|
1997-01-30 02:44:48 +00:00
|
|
|
|
1997-02-02 23:30:32 +00:00
|
|
|
def report(self):
|
1997-01-30 02:44:48 +00:00
|
|
|
print
|
|
|
|
if not self.todo: print "Final",
|
|
|
|
else: print "Interim",
|
1997-02-02 23:30:32 +00:00
|
|
|
print "Report (%s)" % self.status()
|
1997-01-30 02:44:48 +00:00
|
|
|
self.report_errors()
|
|
|
|
|
|
|
|
def report_errors(self):
|
|
|
|
if not self.bad:
|
|
|
|
print
|
|
|
|
print "No errors"
|
|
|
|
return
|
|
|
|
print
|
|
|
|
print "Error Report:"
|
1997-02-02 23:30:32 +00:00
|
|
|
sources = self.errors.keys()
|
1997-01-30 02:44:48 +00:00
|
|
|
sources.sort()
|
|
|
|
for source in sources:
|
1997-02-02 23:30:32 +00:00
|
|
|
triples = self.errors[source]
|
1997-01-30 02:44:48 +00:00
|
|
|
print
|
|
|
|
if len(triples) > 1:
|
|
|
|
print len(triples), "Errors in", source
|
|
|
|
else:
|
|
|
|
print "Error in", source
|
|
|
|
for url, rawlink, msg in triples:
|
|
|
|
print " HREF", url,
|
|
|
|
if rawlink != url: print "(%s)" % rawlink,
|
|
|
|
print
|
|
|
|
print " msg", msg
|
|
|
|
|
|
|
|
def dopage(self, url):
|
1998-02-21 20:02:09 +00:00
|
|
|
if self.verbose > 1:
|
|
|
|
if self.verbose > 2:
|
|
|
|
self.show("Check ", url, " from", self.todo[url])
|
1997-01-30 02:44:48 +00:00
|
|
|
else:
|
1997-02-02 23:30:32 +00:00
|
|
|
print "Check ", url
|
1997-01-30 02:44:48 +00:00
|
|
|
page = self.getpage(url)
|
1997-01-31 14:43:15 +00:00
|
|
|
if page:
|
|
|
|
for info in page.getlinkinfos():
|
|
|
|
link, rawlink = info
|
|
|
|
origin = url, rawlink
|
1997-02-02 23:30:32 +00:00
|
|
|
self.newlink(link, origin)
|
1997-01-31 14:43:15 +00:00
|
|
|
self.markdone(url)
|
|
|
|
|
1997-02-02 23:30:32 +00:00
|
|
|
def newlink(self, url, origin):
|
1997-01-31 14:43:15 +00:00
|
|
|
if self.done.has_key(url):
|
|
|
|
self.newdonelink(url, origin)
|
|
|
|
else:
|
|
|
|
self.newtodolink(url, origin)
|
|
|
|
|
|
|
|
def newdonelink(self, url, origin):
|
|
|
|
self.done[url].append(origin)
|
1998-02-21 20:02:09 +00:00
|
|
|
if self.verbose > 3:
|
1997-01-31 14:43:15 +00:00
|
|
|
print " Done link", url
|
|
|
|
|
|
|
|
def newtodolink(self, url, origin):
|
|
|
|
if self.todo.has_key(url):
|
|
|
|
self.todo[url].append(origin)
|
1998-02-21 20:02:09 +00:00
|
|
|
if self.verbose > 3:
|
1997-01-31 14:43:15 +00:00
|
|
|
print " Seen todo link", url
|
|
|
|
else:
|
|
|
|
self.todo[url] = [origin]
|
1998-02-21 20:02:09 +00:00
|
|
|
if self.verbose > 3:
|
1997-01-31 14:43:15 +00:00
|
|
|
print " New todo link", url
|
|
|
|
|
|
|
|
def markdone(self, url):
|
|
|
|
self.done[url] = self.todo[url]
|
|
|
|
del self.todo[url]
|
|
|
|
self.changed = 1
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
def inroots(self, url):
|
|
|
|
for root in self.roots:
|
|
|
|
if url[:len(root)] == root:
|
1997-10-06 18:54:01 +00:00
|
|
|
root = urlparse.urljoin(root, "/")
|
1997-01-30 03:19:41 +00:00
|
|
|
return self.robots[root].can_fetch(AGENTNAME, url)
|
1997-01-30 02:44:48 +00:00
|
|
|
return 0
|
|
|
|
|
|
|
|
def getpage(self, url):
|
1997-02-02 23:30:32 +00:00
|
|
|
if url[:7] == 'mailto:' or url[:5] == 'news:':
|
1998-02-21 20:02:09 +00:00
|
|
|
if self.verbose > 1: print " Not checking mailto/news URL"
|
1997-02-02 23:30:32 +00:00
|
|
|
return None
|
|
|
|
isint = self.inroots(url)
|
1998-02-21 20:02:09 +00:00
|
|
|
if not isint:
|
|
|
|
if not self.checkext:
|
|
|
|
if self.verbose > 1: print " Not checking ext link"
|
|
|
|
return None
|
|
|
|
f = self.openpage(url)
|
|
|
|
if f:
|
|
|
|
self.safeclose(f)
|
1997-02-02 23:30:32 +00:00
|
|
|
return None
|
1998-02-21 20:02:09 +00:00
|
|
|
text, nurl = self.readhtml(url)
|
|
|
|
if nurl != url:
|
|
|
|
if self.verbose > 1:
|
|
|
|
print " Redirected to", nurl
|
|
|
|
url = nurl
|
|
|
|
if text:
|
|
|
|
return Page(text, url, verbose=self.verbose, maxpage=self.maxpage)
|
|
|
|
|
|
|
|
def readhtml(self, url):
|
|
|
|
text = None
|
|
|
|
f, url = self.openhtml(url)
|
|
|
|
if f:
|
|
|
|
text = f.read()
|
|
|
|
f.close()
|
|
|
|
return text, url
|
|
|
|
|
|
|
|
def openhtml(self, url):
|
|
|
|
f = self.openpage(url)
|
|
|
|
if f:
|
|
|
|
url = f.geturl()
|
|
|
|
info = f.info()
|
|
|
|
if not self.checkforhtml(info, url):
|
|
|
|
self.safeclose(f)
|
|
|
|
f = None
|
|
|
|
return f, url
|
|
|
|
|
|
|
|
def openpage(self, url):
|
1997-01-30 02:44:48 +00:00
|
|
|
try:
|
1998-02-21 20:02:09 +00:00
|
|
|
return self.urlopener.open(url)
|
1997-01-30 02:44:48 +00:00
|
|
|
except IOError, msg:
|
1998-02-21 20:02:09 +00:00
|
|
|
msg = self.sanitize(msg)
|
|
|
|
if self.verbose > 0:
|
1997-01-30 02:44:48 +00:00
|
|
|
print "Error ", msg
|
1998-02-21 20:02:09 +00:00
|
|
|
if self.verbose > 0:
|
|
|
|
self.show(" HREF ", url, " from", self.todo[url])
|
1997-01-31 14:43:15 +00:00
|
|
|
self.setbad(url, msg)
|
1997-01-30 02:44:48 +00:00
|
|
|
return None
|
1998-02-21 20:02:09 +00:00
|
|
|
|
|
|
|
def checkforhtml(self, info, url):
|
1997-01-30 02:44:48 +00:00
|
|
|
if info.has_key('content-type'):
|
|
|
|
ctype = string.lower(info['content-type'])
|
1997-01-31 14:43:15 +00:00
|
|
|
else:
|
1998-02-21 20:02:09 +00:00
|
|
|
if url[-1:] == "/":
|
|
|
|
return 1
|
|
|
|
ctype, encoding = mimetypes.guess_type(url)
|
|
|
|
if ctype == 'text/html':
|
|
|
|
return 1
|
|
|
|
else:
|
|
|
|
if self.verbose > 1:
|
1997-01-31 14:43:15 +00:00
|
|
|
print " Not HTML, mime type", ctype
|
1998-02-21 20:02:09 +00:00
|
|
|
return 0
|
1997-01-30 02:44:48 +00:00
|
|
|
|
1997-01-31 14:43:15 +00:00
|
|
|
def setgood(self, url):
|
|
|
|
if self.bad.has_key(url):
|
|
|
|
del self.bad[url]
|
|
|
|
self.changed = 1
|
1998-02-21 20:02:09 +00:00
|
|
|
if self.verbose > 0:
|
1997-01-31 14:43:15 +00:00
|
|
|
print "(Clear previously seen error)"
|
|
|
|
|
|
|
|
def setbad(self, url, msg):
|
|
|
|
if self.bad.has_key(url) and self.bad[url] == msg:
|
1998-02-21 20:02:09 +00:00
|
|
|
if self.verbose > 0:
|
1997-01-31 14:43:15 +00:00
|
|
|
print "(Seen this error before)"
|
|
|
|
return
|
|
|
|
self.bad[url] = msg
|
|
|
|
self.changed = 1
|
1997-02-02 23:30:32 +00:00
|
|
|
self.markerror(url)
|
|
|
|
|
|
|
|
def markerror(self, url):
|
|
|
|
try:
|
|
|
|
origins = self.todo[url]
|
|
|
|
except KeyError:
|
|
|
|
origins = self.done[url]
|
|
|
|
for source, rawlink in origins:
|
|
|
|
triple = url, rawlink, self.bad[url]
|
|
|
|
self.seterror(source, triple)
|
|
|
|
|
|
|
|
def seterror(self, url, triple):
|
|
|
|
try:
|
|
|
|
self.errors[url].append(triple)
|
|
|
|
except KeyError:
|
|
|
|
self.errors[url] = [triple]
|
1997-01-31 14:43:15 +00:00
|
|
|
|
1998-02-21 20:02:09 +00:00
|
|
|
# The following used to be toplevel functions; they have been
|
|
|
|
# changed into methods so they can be overridden in subclasses.
|
|
|
|
|
|
|
|
def show(self, p1, link, p2, origins):
|
|
|
|
print p1, link
|
|
|
|
i = 0
|
|
|
|
for source, rawlink in origins:
|
|
|
|
i = i+1
|
|
|
|
if i == 2:
|
|
|
|
p2 = ' '*len(p2)
|
|
|
|
print p2, source,
|
|
|
|
if rawlink != link: print "(%s)" % rawlink,
|
|
|
|
print
|
|
|
|
|
|
|
|
def sanitize(self, msg):
|
|
|
|
if isinstance(IOError, ClassType) and isinstance(msg, IOError):
|
|
|
|
# Do the other branch recursively
|
|
|
|
msg.args = self.sanitize(msg.args)
|
|
|
|
elif isinstance(msg, TupleType):
|
|
|
|
if len(msg) >= 4 and msg[0] == 'http error' and \
|
|
|
|
isinstance(msg[3], InstanceType):
|
|
|
|
# Remove the Message instance -- it may contain
|
|
|
|
# a file object which prevents pickling.
|
|
|
|
msg = msg[:3] + msg[4:]
|
|
|
|
return msg
|
|
|
|
|
|
|
|
def safeclose(self, f):
|
|
|
|
try:
|
|
|
|
url = f.geturl()
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
if url[:4] == 'ftp:' or url[:7] == 'file://':
|
|
|
|
# Apparently ftp connections don't like to be closed
|
|
|
|
# prematurely...
|
|
|
|
text = f.read()
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
def save_pickle(self, dumpfile=DUMPFILE):
|
|
|
|
if not self.changed:
|
|
|
|
if self.verbose > 0:
|
|
|
|
print
|
|
|
|
print "No need to save checkpoint"
|
|
|
|
elif not dumpfile:
|
|
|
|
if self.verbose > 0:
|
|
|
|
print "No dumpfile, won't save checkpoint"
|
|
|
|
else:
|
|
|
|
if self.verbose > 0:
|
|
|
|
print
|
|
|
|
print "Saving checkpoint to %s ..." % dumpfile
|
|
|
|
newfile = dumpfile + ".new"
|
|
|
|
f = open(newfile, "wb")
|
|
|
|
pickle.dump(self, f)
|
|
|
|
f.close()
|
|
|
|
try:
|
|
|
|
os.unlink(dumpfile)
|
|
|
|
except os.error:
|
|
|
|
pass
|
|
|
|
os.rename(newfile, dumpfile)
|
|
|
|
if self.verbose > 0:
|
|
|
|
print "Done."
|
|
|
|
return 1
|
|
|
|
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
class Page:
|
|
|
|
|
1998-02-21 20:02:09 +00:00
|
|
|
def __init__(self, text, url, verbose=VERBOSE, maxpage=MAXPAGE):
|
1997-01-30 02:44:48 +00:00
|
|
|
self.text = text
|
|
|
|
self.url = url
|
1998-02-21 20:02:09 +00:00
|
|
|
self.verbose = verbose
|
|
|
|
self.maxpage = maxpage
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
def getlinkinfos(self):
|
|
|
|
size = len(self.text)
|
1998-02-21 20:02:09 +00:00
|
|
|
if size > self.maxpage:
|
|
|
|
if self.verbose > 0:
|
1997-01-30 02:44:48 +00:00
|
|
|
print "Skip huge file", self.url
|
|
|
|
print " (%.0f Kbytes)" % (size*0.001)
|
|
|
|
return []
|
1998-02-21 20:02:09 +00:00
|
|
|
if self.verbose > 2:
|
1997-01-30 02:44:48 +00:00
|
|
|
print " Parsing", self.url, "(%d bytes)" % size
|
1998-02-21 20:02:09 +00:00
|
|
|
parser = MyHTMLParser(verbose=self.verbose)
|
1997-01-30 02:44:48 +00:00
|
|
|
parser.feed(self.text)
|
|
|
|
parser.close()
|
|
|
|
rawlinks = parser.getlinks()
|
|
|
|
base = urlparse.urljoin(self.url, parser.getbase() or "")
|
|
|
|
infos = []
|
|
|
|
for rawlink in rawlinks:
|
|
|
|
t = urlparse.urlparse(rawlink)
|
|
|
|
t = t[:-1] + ('',)
|
|
|
|
rawlink = urlparse.urlunparse(t)
|
|
|
|
link = urlparse.urljoin(base, rawlink)
|
|
|
|
infos.append((link, rawlink))
|
|
|
|
return infos
|
|
|
|
|
|
|
|
|
|
|
|
class MyStringIO(StringIO.StringIO):
|
|
|
|
|
|
|
|
def __init__(self, url, info):
|
|
|
|
self.__url = url
|
|
|
|
self.__info = info
|
|
|
|
StringIO.StringIO.__init__(self)
|
|
|
|
|
|
|
|
def info(self):
|
|
|
|
return self.__info
|
|
|
|
|
|
|
|
def geturl(self):
|
|
|
|
return self.__url
|
|
|
|
|
|
|
|
|
|
|
|
class MyURLopener(urllib.FancyURLopener):
|
|
|
|
|
|
|
|
http_error_default = urllib.URLopener.http_error_default
|
|
|
|
|
1997-01-30 06:04:00 +00:00
|
|
|
def __init__(*args):
|
|
|
|
self = args[0]
|
|
|
|
apply(urllib.FancyURLopener.__init__, args)
|
1997-05-07 15:00:56 +00:00
|
|
|
self.addheaders = [
|
|
|
|
('User-agent', 'Python-webchecker/%s' % __version__),
|
|
|
|
]
|
|
|
|
|
|
|
|
def http_error_401(self, url, fp, errcode, errmsg, headers):
|
|
|
|
return None
|
1997-01-30 06:04:00 +00:00
|
|
|
|
1997-01-30 02:44:48 +00:00
|
|
|
def open_file(self, url):
|
|
|
|
path = urllib.url2pathname(urllib.unquote(url))
|
|
|
|
if path[-1] != os.sep:
|
|
|
|
url = url + '/'
|
|
|
|
if os.path.isdir(path):
|
|
|
|
indexpath = os.path.join(path, "index.html")
|
|
|
|
if os.path.exists(indexpath):
|
|
|
|
return self.open_file(url + "index.html")
|
|
|
|
try:
|
|
|
|
names = os.listdir(path)
|
|
|
|
except os.error, msg:
|
|
|
|
raise IOError, msg, sys.exc_traceback
|
|
|
|
names.sort()
|
|
|
|
s = MyStringIO("file:"+url, {'content-type': 'text/html'})
|
|
|
|
s.write('<BASE HREF="file:%s">\n' %
|
|
|
|
urllib.quote(os.path.join(path, "")))
|
|
|
|
for name in names:
|
|
|
|
q = urllib.quote(name)
|
|
|
|
s.write('<A HREF="%s">%s</A>\n' % (q, q))
|
|
|
|
s.seek(0)
|
|
|
|
return s
|
|
|
|
return urllib.FancyURLopener.open_file(self, path)
|
|
|
|
|
|
|
|
|
1997-01-31 14:43:15 +00:00
|
|
|
class MyHTMLParser(sgmllib.SGMLParser):
|
1997-01-30 02:44:48 +00:00
|
|
|
|
1998-02-21 20:02:09 +00:00
|
|
|
def __init__(self, verbose=VERBOSE):
|
1997-01-30 02:44:48 +00:00
|
|
|
self.base = None
|
1997-01-31 14:43:15 +00:00
|
|
|
self.links = {}
|
1998-02-21 20:02:09 +00:00
|
|
|
self.myverbose = verbose
|
|
|
|
sgmllib.SGMLParser.__init__(self)
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
def start_a(self, attributes):
|
1997-02-01 05:16:08 +00:00
|
|
|
self.link_attr(attributes, 'href')
|
|
|
|
|
|
|
|
def end_a(self): pass
|
|
|
|
|
1997-10-06 18:54:01 +00:00
|
|
|
def do_area(self, attributes):
|
|
|
|
self.link_attr(attributes, 'href')
|
|
|
|
|
1997-02-01 05:16:08 +00:00
|
|
|
def do_img(self, attributes):
|
|
|
|
self.link_attr(attributes, 'src', 'lowsrc')
|
|
|
|
|
|
|
|
def do_frame(self, attributes):
|
|
|
|
self.link_attr(attributes, 'src')
|
|
|
|
|
|
|
|
def link_attr(self, attributes, *args):
|
1997-01-30 02:44:48 +00:00
|
|
|
for name, value in attributes:
|
1997-02-01 05:16:08 +00:00
|
|
|
if name in args:
|
1997-01-31 14:43:15 +00:00
|
|
|
if value: value = string.strip(value)
|
|
|
|
if value: self.links[value] = None
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
def do_base(self, attributes):
|
|
|
|
for name, value in attributes:
|
1997-01-31 14:43:15 +00:00
|
|
|
if name == 'href':
|
|
|
|
if value: value = string.strip(value)
|
|
|
|
if value:
|
1998-02-21 20:02:09 +00:00
|
|
|
if self.myverbose > 1:
|
1997-01-31 14:43:15 +00:00
|
|
|
print " Base", value
|
|
|
|
self.base = value
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
def getlinks(self):
|
1997-01-31 14:43:15 +00:00
|
|
|
return self.links.keys()
|
1997-01-30 02:44:48 +00:00
|
|
|
|
|
|
|
def getbase(self):
|
|
|
|
return self.base
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
main()
|