Move cleanBin and hexdump into netutils.

This commit is contained in:
Aldo Cortesi 2012-09-24 11:21:12 +12:00
parent 21f74efa10
commit 0d59fd7e01
4 changed files with 8 additions and 54 deletions

View File

@ -3,6 +3,7 @@ import urwid
from PIL import Image
from PIL.ExifTags import TAGS
import lxml.html, lxml.etree
import netlib.utils
import common
from .. import utils, encoding, flow
from ..contrib import jsbeautifier, html2text
@ -22,7 +23,7 @@ def _view_text(content, total, limit):
Generates a body for a chunk of text.
"""
txt = []
for i in utils.cleanBin(content).splitlines():
for i in netlib.utils.cleanBin(content).splitlines():
txt.append(
urwid.Text(("text", i), wrap="any")
)
@ -76,7 +77,7 @@ class ViewHex:
content_types = []
def __call__(self, hdrs, content, limit):
txt = []
for offset, hexa, s in utils.hexdump(content[:limit]):
for offset, hexa, s in netlib.utils.hexdump(content[:limit]):
txt.append(urwid.Text([
("offset", offset),
" ",
@ -216,7 +217,7 @@ class ViewMultipart:
match = rx.search(parts[1])
if match:
keys.append(match.group(1) + ":")
vals.append(utils.cleanBin(
vals.append(netlib.utils.cleanBin(
"\n".join(parts[3+parts[2:].index(""):])
))
r = [
@ -306,7 +307,7 @@ class ViewImage:
)
clean = []
for i in parts:
clean.append([utils.cleanBin(i[0]), utils.cleanBin(i[1])])
clean.append([netlib.utils.cleanBin(i[0]), netlib.utils.cleanBin(i[1])])
fmt = common.format_keyvals(
clean,
key = "header",

View File

@ -14,6 +14,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, os
import netlib.utils
import flow, filt, utils
class DumpError(Exception): pass
@ -175,7 +176,7 @@ class DumpMaster(flow.FlowMaster):
result = result + "\n\n" + self.indent(4, f.response.headers)
if self.o.verbosity > 2:
if utils.isBin(f.response.content):
d = utils.hexdump(f.response.content)
d = netlib.utils.hexdump(f.response.content)
d = "\n".join("%s\t%s %s"%i for i in d)
cont = self.indent(4, d)
elif f.response.content:
@ -199,7 +200,7 @@ class DumpMaster(flow.FlowMaster):
print >> self.outfile, str_request(f.request)
print >> self.outfile, self.indent(4, f.request.headers)
if utils.isBin(f.request.content):
print >> self.outfile, self.indent(4, utils.hexdump(f.request.content))
print >> self.outfile, self.indent(4, netlib.utils.hexdump(f.request.content))
elif f.request.content:
print >> self.outfile, self.indent(4, f.request.content)
print >> self.outfile

View File

@ -55,24 +55,6 @@ def isXML(s):
return False
def cleanBin(s, fixspacing=False):
"""
Cleans binary data to make it safe to display. If fixspacing is True,
tabs, newlines and so forth will be maintained, if not, they will be
replaced with a placeholder.
"""
parts = []
for i in s:
o = ord(i)
if (o > 31 and o < 127):
parts.append(i)
elif i in "\n\r\t" and not fixspacing:
parts.append(i)
else:
parts.append(".")
return "".join(parts)
def pretty_json(s):
try:
p = json.loads(s)
@ -96,25 +78,6 @@ def urlencode(s):
return urllib.urlencode(s, False)
def hexdump(s):
"""
Returns a set of typles:
(offset, hex, str)
"""
parts = []
for i in range(0, len(s), 16):
o = "%.10x"%i
part = s[i:i+16]
x = " ".join("%.2x"%ord(i) for i in part)
if len(part) < 16:
x += " "
x += " ".join(" " for i in range(16 - len(part)))
parts.append(
(o, x, cleanBin(part, True))
)
return parts
def del_all(dict, keys):
for key in keys:
if key in dict:

View File

@ -22,10 +22,6 @@ def test_isXml():
assert utils.isXML(" \n<foo")
def test_hexdump():
assert utils.hexdump("one\0"*10)
def test_del_all():
d = dict(a=1, b=2, c=3)
utils.del_all(d, ["a", "x", "b"])
@ -125,13 +121,6 @@ def test_parse_content_type():
assert v == ('text', 'html', {'charset': 'UTF-8'})
def test_cleanBin():
assert utils.cleanBin("one") == "one"
assert utils.cleanBin("\00ne") == ".ne"
assert utils.cleanBin("\nne") == "\nne"
assert utils.cleanBin("\nne", True) == ".ne"
def test_safe_subn():
assert utils.safe_subn("foo", u"bar", "\xc2foo")