2019-09-10 14:58:26 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2019-10-29 15:22:24 +00:00
|
|
|
from six import binary_type, string_types
|
2019-10-14 12:43:35 +00:00
|
|
|
|
2019-10-03 16:45:22 +00:00
|
|
|
import base64
|
2019-09-10 14:58:26 +00:00
|
|
|
import errno
|
|
|
|
import json
|
|
|
|
import os
|
2019-10-14 12:47:11 +00:00
|
|
|
import re
|
2019-09-10 14:58:26 +00:00
|
|
|
import requests
|
2019-09-23 12:13:52 +00:00
|
|
|
import xmltodict
|
2019-09-20 14:21:04 +00:00
|
|
|
import toml
|
2019-09-17 09:50:06 +00:00
|
|
|
import yaml
|
2019-09-10 14:58:26 +00:00
|
|
|
|
2019-10-14 12:43:35 +00:00
|
|
|
try:
|
|
|
|
# python 3
|
2019-10-14 14:32:01 +00:00
|
|
|
from urllib.parse import unquote
|
|
|
|
from urllib.parse import unquote_plus
|
2019-10-14 12:47:11 +00:00
|
|
|
from urllib.parse import urlencode
|
|
|
|
from urllib.parse import parse_qs
|
2019-10-14 12:43:35 +00:00
|
|
|
except ImportError:
|
|
|
|
# python 2
|
2019-10-14 14:32:01 +00:00
|
|
|
from urllib import unquote
|
|
|
|
from urllib import unquote_plus
|
2019-10-14 12:47:11 +00:00
|
|
|
from urllib import urlencode
|
|
|
|
from urlparse import parse_qs
|
2019-10-14 12:43:35 +00:00
|
|
|
|
|
|
|
|
2019-10-03 16:45:22 +00:00
|
|
|
def decode_base64(s, **kwargs):
|
2019-10-14 12:43:35 +00:00
|
|
|
# fix urlencoded chars
|
2019-10-14 14:32:01 +00:00
|
|
|
s = unquote(s)
|
2019-10-14 12:43:35 +00:00
|
|
|
# fix padding
|
|
|
|
m = len(s) % 4
|
|
|
|
if m != 0:
|
|
|
|
s += '=' * (4 - m)
|
2019-10-29 15:22:24 +00:00
|
|
|
data = base64.b64decode(s)
|
2019-10-14 14:32:01 +00:00
|
|
|
format = kwargs.pop('format', None)
|
2019-10-29 15:22:24 +00:00
|
|
|
encoding = kwargs.pop('encoding', 'utf-8' if format else None)
|
2019-10-14 14:32:01 +00:00
|
|
|
if encoding:
|
2019-10-29 15:22:24 +00:00
|
|
|
data = data.decode(encoding)
|
|
|
|
if format:
|
|
|
|
decoders = {
|
|
|
|
'json': decode_json,
|
|
|
|
'toml': decode_toml,
|
|
|
|
'yaml': decode_yaml,
|
|
|
|
'xml': decode_xml,
|
|
|
|
}
|
|
|
|
decode_func = decoders.get(format.lower(), '')
|
|
|
|
if decode_func:
|
|
|
|
data = decode_func(data, **kwargs)
|
2019-10-14 12:43:35 +00:00
|
|
|
return data
|
2019-10-03 16:45:22 +00:00
|
|
|
|
2019-09-10 14:58:26 +00:00
|
|
|
|
|
|
|
def decode_json(s, **kwargs):
|
|
|
|
data = json.loads(s, **kwargs)
|
2019-09-17 09:50:06 +00:00
|
|
|
return data
|
|
|
|
|
|
|
|
|
2019-10-14 12:47:11 +00:00
|
|
|
def decode_query_string(s, **kwargs):
|
|
|
|
flat = kwargs.pop('flat', True)
|
|
|
|
qs_re = r'^(([\w\-\%\+]+\=[\w\-\%\+]*)+([\&]{1})?)+'
|
|
|
|
qs_pattern = re.compile(qs_re)
|
|
|
|
if qs_pattern.match(s):
|
|
|
|
data = parse_qs(s)
|
|
|
|
if flat:
|
|
|
|
data = { key:value[0] for key, value in data.items() }
|
|
|
|
return data
|
|
|
|
else:
|
|
|
|
raise ValueError('Invalid query string: {}'.format(s))
|
|
|
|
|
|
|
|
|
2019-09-23 12:13:52 +00:00
|
|
|
def decode_xml(s, **kwargs):
|
|
|
|
kwargs.setdefault('dict_constructor', dict)
|
|
|
|
data = xmltodict.parse(s, **kwargs)
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
2019-09-20 14:21:04 +00:00
|
|
|
def decode_toml(s, **kwargs):
|
|
|
|
data = toml.loads(s, **kwargs)
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
2019-09-17 09:50:06 +00:00
|
|
|
def decode_yaml(s, **kwargs):
|
|
|
|
kwargs.setdefault('Loader', yaml.Loader)
|
|
|
|
data = yaml.load(s, **kwargs)
|
|
|
|
return data
|
2019-09-10 14:58:26 +00:00
|
|
|
|
|
|
|
|
2019-10-03 16:45:22 +00:00
|
|
|
def encode_base64(d, **kwargs):
|
2019-10-29 15:22:24 +00:00
|
|
|
data = d
|
|
|
|
format = kwargs.pop('format', None)
|
|
|
|
encoding = kwargs.pop('encoding', 'utf-8' if format else None)
|
|
|
|
if not isinstance(data, string_types) and format:
|
2019-10-14 12:43:35 +00:00
|
|
|
encoders = {
|
|
|
|
'json': encode_json,
|
|
|
|
'toml': encode_toml,
|
|
|
|
'yaml': encode_yaml,
|
|
|
|
'xml': encode_xml,
|
|
|
|
}
|
2019-10-29 15:22:24 +00:00
|
|
|
encode_func = encoders.get(format.lower(), '')
|
2019-10-14 12:43:35 +00:00
|
|
|
if encode_func:
|
2019-10-29 15:22:24 +00:00
|
|
|
data = encode_func(data, **kwargs)
|
|
|
|
if isinstance(data, string_types) and encoding:
|
2019-10-14 12:43:35 +00:00
|
|
|
data = data.encode(encoding)
|
2019-10-29 15:22:24 +00:00
|
|
|
data = base64.b64encode(data)
|
|
|
|
if isinstance(data, binary_type) and encoding:
|
|
|
|
data = data.decode(encoding)
|
2019-10-03 16:45:22 +00:00
|
|
|
return data
|
|
|
|
|
|
|
|
|
2019-09-10 14:58:26 +00:00
|
|
|
def encode_json(d, **kwargs):
|
2019-09-17 09:50:06 +00:00
|
|
|
data = json.dumps(d, **kwargs)
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
2019-10-14 12:47:11 +00:00
|
|
|
def encode_query_string(d, **kwargs):
|
|
|
|
data = urlencode(d, **kwargs)
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
2019-09-20 14:21:04 +00:00
|
|
|
def encode_toml(d, **kwargs):
|
|
|
|
data = toml.dumps(d, **kwargs)
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
2019-09-23 12:13:52 +00:00
|
|
|
def encode_xml(d, **kwargs):
|
|
|
|
data = xmltodict.unparse(d, **kwargs)
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
2019-09-17 09:50:06 +00:00
|
|
|
def encode_yaml(d, **kwargs):
|
|
|
|
data = yaml.dump(d, **kwargs)
|
|
|
|
return data
|
2019-09-10 14:58:26 +00:00
|
|
|
|
|
|
|
|
2019-10-03 16:42:44 +00:00
|
|
|
def read_content(s):
|
|
|
|
# s -> filepath or url or data
|
|
|
|
if s.startswith('http://') or s.startswith('https://'):
|
|
|
|
content = read_url(s)
|
|
|
|
elif os.path.isfile(s):
|
|
|
|
content = read_file(s)
|
|
|
|
else:
|
|
|
|
content = s
|
|
|
|
return content
|
|
|
|
|
|
|
|
|
2019-09-10 14:58:26 +00:00
|
|
|
def read_file(filepath):
|
|
|
|
handler = open(filepath, 'r')
|
|
|
|
content = handler.read()
|
|
|
|
handler.close()
|
|
|
|
return content
|
|
|
|
|
|
|
|
|
|
|
|
def read_url(url, *args, **kwargs):
|
|
|
|
response = requests.get(url, *args, **kwargs)
|
2019-11-07 16:04:33 +00:00
|
|
|
if response.status_code == requests.codes.ok:
|
|
|
|
content = response.text
|
|
|
|
return content
|
|
|
|
else:
|
|
|
|
raise ValueError(
|
|
|
|
'Invalid url response status code: {}.'.format(
|
|
|
|
response.status_code))
|
2019-09-10 14:58:26 +00:00
|
|
|
|
|
|
|
|
|
|
|
def write_file(filepath, content):
|
|
|
|
# https://stackoverflow.com/questions/12517451/automatically-creating-directories-with-file-output
|
2019-10-14 14:32:40 +00:00
|
|
|
filedir = os.path.dirname(filepath)
|
|
|
|
if not os.path.exists(filedir):
|
2019-09-10 14:58:26 +00:00
|
|
|
try:
|
2019-10-14 14:32:40 +00:00
|
|
|
os.makedirs(filedir)
|
2019-09-10 14:58:26 +00:00
|
|
|
except OSError as e:
|
|
|
|
# Guard against race condition
|
|
|
|
if e.errno != errno.EEXIST:
|
|
|
|
raise e
|
|
|
|
handler = open(filepath, 'w+')
|
|
|
|
handler.write(content)
|
|
|
|
handler.close()
|
|
|
|
return True
|