mirror of https://github.com/rq/rq.git
Improve the lint situation (#1688)
* Move common flake8 options into config file Currently --max-line-length being specified in two places. Just use the existing value in the config file as the source of truth. Move --count and --statistics to config file as well. * Fix some lints
This commit is contained in:
parent
01635dc809
commit
9db728921d
|
@ -30,6 +30,6 @@ jobs:
|
|||
- name: Lint with flake8
|
||||
run: |
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||
flake8 . --select=E9,F63,F7,F82 --show-source
|
||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||
flake8 . --count --exit-zero --max-complexity=5 --max-line-length=120 --statistics
|
||||
flake8 . --exit-zero --max-complexity=5
|
||||
|
|
|
@ -24,7 +24,6 @@ from rq.defaults import (DEFAULT_CONNECTION_CLASS, DEFAULT_JOB_CLASS,
|
|||
from rq.exceptions import InvalidJobOperationError
|
||||
from rq.registry import FailedJobRegistry, clean_registries
|
||||
from rq.utils import import_attribute, get_call_string, make_colorizer
|
||||
from rq.serializers import DefaultSerializer
|
||||
from rq.suspension import (suspend as connection_suspend,
|
||||
resume as connection_resume, is_suspended)
|
||||
from rq.worker_registration import clean_worker_registry
|
||||
|
|
|
@ -11,7 +11,6 @@ from ast import literal_eval
|
|||
from shutil import get_terminal_size
|
||||
|
||||
import click
|
||||
import redis
|
||||
from redis import Redis
|
||||
from redis.sentinel import Sentinel
|
||||
from rq.defaults import (DEFAULT_CONNECTION_CLASS, DEFAULT_JOB_CLASS,
|
||||
|
|
|
@ -3,8 +3,7 @@ import sys
|
|||
|
||||
def is_python_version(*versions):
|
||||
for version in versions:
|
||||
if (sys.version_info[0] == version[0] and
|
||||
sys.version_info >= version):
|
||||
if (sys.version_info[0] == version[0] and sys.version_info >= version):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
|
|
@ -1,8 +1,3 @@
|
|||
from functools import partial
|
||||
|
||||
from redis import Redis
|
||||
|
||||
|
||||
def fix_return_type(func):
|
||||
# deliberately no functools.wraps() call here, since the function being
|
||||
# wrapped is a partial, which has no module
|
||||
|
|
|
@ -13,7 +13,7 @@ class job: # noqa
|
|||
def __init__(self, queue, connection=None, timeout=None,
|
||||
result_ttl=DEFAULT_RESULT_TTL, ttl=None,
|
||||
queue_class=None, depends_on=None, at_front=None, meta=None,
|
||||
description=None, failure_ttl=None, retry=None, on_failure=None,
|
||||
description=None, failure_ttl=None, retry=None, on_failure=None,
|
||||
on_success=None):
|
||||
"""A decorator that adds a ``delay`` method to the decorated function,
|
||||
which in turn creates a RQ job when called. Accepts a required
|
||||
|
|
|
@ -711,7 +711,7 @@ class Job:
|
|||
without worrying about the internals required to implement job
|
||||
cancellation.
|
||||
|
||||
You can enqueue the jobs dependents optionally,
|
||||
You can enqueue the jobs dependents optionally,
|
||||
Same pipelining behavior as Queue.enqueue_dependents on whether or not a pipeline is passed in.
|
||||
"""
|
||||
if self.is_canceled:
|
||||
|
|
|
@ -16,11 +16,12 @@ from collections.abc import Iterable
|
|||
|
||||
from redis.exceptions import ResponseError
|
||||
|
||||
from .compat import as_text, is_python_version, string_types
|
||||
from .compat import as_text, string_types
|
||||
from .exceptions import TimeoutFormatError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class _Colorizer:
|
||||
def __init__(self):
|
||||
esc = "\x1b["
|
||||
|
@ -277,7 +278,7 @@ def get_version(connection):
|
|||
This function also correctly handles 4 digit redis server versions.
|
||||
"""
|
||||
try:
|
||||
return tuple(int(i) for i in connection.info("server")["redis_version"].split('.')[:3])
|
||||
return tuple(int(i) for i in connection.info("server")["redis_version"].split('.')[:3])
|
||||
except ResponseError: # fakeredis doesn't implement Redis' INFO command
|
||||
return (5, 0, 9)
|
||||
|
||||
|
|
|
@ -36,8 +36,7 @@ from .queue import Queue
|
|||
from .registry import FailedJobRegistry, StartedJobRegistry, clean_registries
|
||||
from .scheduler import RQScheduler
|
||||
from .suspension import is_suspended
|
||||
from .timeouts import (JobTimeoutException, HorseMonitorTimeoutException,
|
||||
UnixSignalDeathPenalty, TimerDeathPenalty)
|
||||
from .timeouts import JobTimeoutException, HorseMonitorTimeoutException, UnixSignalDeathPenalty
|
||||
from .utils import (backend_class, ensure_list, get_version,
|
||||
make_colorizer, utcformat, utcnow, utcparse)
|
||||
from .version import VERSION
|
||||
|
|
|
@ -8,3 +8,5 @@ universal = 1
|
|||
[flake8]
|
||||
max-line-length=120
|
||||
ignore=E731
|
||||
count=True
|
||||
statistics=True
|
||||
|
|
|
@ -222,9 +222,11 @@ def kill_worker(pid, double_kill, interval=0.5):
|
|||
|
||||
|
||||
class Serializer:
|
||||
def loads(self): pass
|
||||
def loads(self):
|
||||
pass
|
||||
|
||||
def dumps(self): pass
|
||||
def dumps(self):
|
||||
pass
|
||||
|
||||
|
||||
def start_worker(queue_name, conn_kwargs, worker_name, burst):
|
||||
|
@ -238,6 +240,7 @@ def start_worker(queue_name, conn_kwargs, worker_name, burst):
|
|||
w = Worker([queue_name], name=worker_name, connection=Redis(**conn_kwargs))
|
||||
w.work(burst=burst)
|
||||
|
||||
|
||||
def start_worker_process(queue_name, connection=None, worker_name=None, burst=False):
|
||||
"""
|
||||
Use multiprocessing to start a new worker in a separate process.
|
||||
|
@ -248,6 +251,7 @@ def start_worker_process(queue_name, connection=None, worker_name=None, burst=Fa
|
|||
p.start()
|
||||
return p
|
||||
|
||||
|
||||
def burst_two_workers(queue, timeout=2, tries=5, pause=0.1):
|
||||
"""
|
||||
Get two workers working simultaneously in burst mode, on a given queue.
|
||||
|
|
|
@ -406,7 +406,7 @@ class TestRQCli(RQTestCase):
|
|||
runner = CliRunner()
|
||||
job = q.enqueue(say_hello)
|
||||
runner.invoke(main, ['worker', '-u', self.redis_url,
|
||||
'--serializer rq.serializer.JSONSerializer'])
|
||||
'--serializer rq.serializer.JSONSerializer'])
|
||||
self.assertIn(job.id, q.job_ids)
|
||||
|
||||
def test_cli_enqueue(self):
|
||||
|
@ -439,7 +439,7 @@ class TestRQCli(RQTestCase):
|
|||
self.assertTrue(queue.is_empty())
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ['enqueue', '-u', self.redis_url, '-S', 'rq.serializers.JSONSerializer', 'tests.fixtures.say_hello'])
|
||||
result = runner.invoke(main, ['enqueue', '-u', self.redis_url, '-S', 'rq.serializers.JSONSerializer', 'tests.fixtures.say_hello'])
|
||||
self.assert_normal_execution(result)
|
||||
|
||||
prefix = 'Enqueued tests.fixtures.say_hello() with job-id \''
|
||||
|
|
|
@ -106,7 +106,7 @@ class TestDecorator(RQTestCase):
|
|||
|
||||
bar_job = bar.delay()
|
||||
|
||||
self.assertEqual(foo_job._dependency_ids,[])
|
||||
self.assertEqual(foo_job._dependency_ids, [])
|
||||
self.assertIsNone(foo_job._dependency_id)
|
||||
|
||||
self.assertEqual(foo_job.dependency, None)
|
||||
|
@ -143,8 +143,8 @@ class TestDecorator(RQTestCase):
|
|||
self.assertIsNone(foo_job._dependency_id)
|
||||
self.assertIsNone(bar_job._dependency_id)
|
||||
|
||||
self.assertEqual(foo_job._dependency_ids,[])
|
||||
self.assertEqual(bar_job._dependency_ids,[])
|
||||
self.assertEqual(foo_job._dependency_ids, [])
|
||||
self.assertEqual(bar_job._dependency_ids, [])
|
||||
self.assertEqual(baz_job._dependency_id, bar_job.id)
|
||||
self.assertEqual(baz_job.dependency, bar_job)
|
||||
self.assertEqual(baz_job.dependency.id, bar_job.id)
|
||||
|
@ -152,7 +152,7 @@ class TestDecorator(RQTestCase):
|
|||
def test_decorator_accepts_on_failure_function_as_argument(self):
|
||||
"""Ensure that passing in on_failure function to the decorator sets the
|
||||
correct on_failure function on the job.
|
||||
"""
|
||||
"""
|
||||
# Only functions and builtins are supported as callback
|
||||
@job('default', on_failure=Job.fetch)
|
||||
def foo():
|
||||
|
@ -167,7 +167,6 @@ class TestDecorator(RQTestCase):
|
|||
result_job = Job.fetch(id=result.id, connection=self.testconn)
|
||||
self.assertEqual(result_job.failure_callback, print)
|
||||
|
||||
|
||||
def test_decorator_accepts_on_success_function_as_argument(self):
|
||||
"""Ensure that passing in on_failure function to the decorator sets the
|
||||
correct on_success function on the job.
|
||||
|
@ -178,7 +177,7 @@ class TestDecorator(RQTestCase):
|
|||
return 'Foo'
|
||||
with self.assertRaises(ValueError):
|
||||
result = foo.delay()
|
||||
|
||||
|
||||
@job('default', on_success=print)
|
||||
def hello():
|
||||
return 'Hello'
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import json
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from rq.serializers import DefaultSerializer, JSONSerializer
|
||||
from rq.serializers import JSONSerializer
|
||||
from unittest.mock import patch
|
||||
|
||||
from rq import Retry, Queue
|
||||
|
|
|
@ -217,7 +217,7 @@ class TestRegistry(RQTestCase):
|
|||
self.assertEqual(self.registry.count, 2)
|
||||
self.assertEqual(len(self.registry), 2)
|
||||
|
||||
# Make sure
|
||||
# Make sure
|
||||
|
||||
def test_clean_registries(self):
|
||||
"""clean_registries() cleans Started and Finished job registries."""
|
||||
|
|
|
@ -4,7 +4,6 @@ from multiprocessing import Process
|
|||
|
||||
from unittest import mock
|
||||
from rq import Queue
|
||||
from rq.compat import PY2
|
||||
from rq.exceptions import NoSuchJobError
|
||||
from rq.job import Job, Retry
|
||||
from rq.registry import FinishedJobRegistry, ScheduledJobRegistry
|
||||
|
@ -96,7 +95,7 @@ class TestScheduledJobRegistry(RQTestCase):
|
|||
with mock_tz, mock_day, mock_atz:
|
||||
registry.schedule(job, datetime(2019, 1, 1))
|
||||
self.assertEqual(self.testconn.zscore(registry.key, job.id),
|
||||
1546300800 + 18000) # 2019-01-01 UTC in Unix timestamp
|
||||
1546300800 + 18000) # 2019-01-01 UTC in Unix timestamp
|
||||
|
||||
# second, time.daylight != 0 (in DST)
|
||||
# mock the sitatuoin for American/New_York not in DST (UTC - 4)
|
||||
|
@ -317,7 +316,7 @@ class TestWorker(RQTestCase):
|
|||
|
||||
p.start()
|
||||
queue.enqueue_at(
|
||||
datetime(2019, 1, 1, tzinfo=timezone.utc),
|
||||
datetime(2019, 1, 1, tzinfo=timezone.utc),
|
||||
say_hello, meta={'foo': 'bar'}
|
||||
)
|
||||
worker.work(burst=False, with_scheduler=True)
|
||||
|
@ -326,6 +325,7 @@ class TestWorker(RQTestCase):
|
|||
registry = FinishedJobRegistry(queue=queue)
|
||||
self.assertEqual(len(registry), 1)
|
||||
|
||||
|
||||
class TestQueue(RQTestCase):
|
||||
|
||||
def test_enqueue_at(self):
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
import re
|
||||
import datetime
|
||||
from unittest import mock
|
||||
|
||||
from redis import Redis
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ import sys
|
|||
import time
|
||||
import zlib
|
||||
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from datetime import datetime, timedelta
|
||||
from multiprocessing import Process
|
||||
from time import sleep
|
||||
|
||||
|
@ -29,7 +29,7 @@ from tests.fixtures import (
|
|||
|
||||
from rq import Queue, SimpleWorker, Worker, get_current_connection
|
||||
from rq.compat import as_text, PY2
|
||||
from rq.job import Job, JobStatus, Dependency, Retry
|
||||
from rq.job import Job, JobStatus, Retry
|
||||
from rq.registry import StartedJobRegistry, FailedJobRegistry, FinishedJobRegistry
|
||||
from rq.suspension import resume, suspend
|
||||
from rq.utils import utcnow
|
||||
|
|
Loading…
Reference in New Issue