Bump version to 1.5.0

This commit is contained in:
Selwin Ong 2020-07-26 17:49:09 +07:00
parent a62b762200
commit f3c86c02c6
7 changed files with 64 additions and 20 deletions

View File

@ -1,3 +1,11 @@
### RQ 1.5.0 (2020-07-26)
* Failed jobs can now be retries. Thanks @selwin!
* Fixed scheduler on Python > 3.8.0. Thanks @selwin!
* RQ is now aware of which version of Redis server it's running on. Thanks @aparcar!
* RQ now uses `hset()` on redis-py >= 3.5.0. Thanks @aparcar!
* Fix incorrect worker timeout calculation in SimpleWorker.execute_job(). Thanks @davidmurray!
* Make horse handling logic more robust. Thanks @wevsty!
### RQ 1.4.3 (2020-06-28)
* Added `job.get_position()` and `queue.get_job_position()`. Thanks @aparcar!
* Longer TTLs for worker keys to prevent them from expiring inside the worker lifecycle. Thanks @selwin!

View File

@ -45,14 +45,36 @@ Then, create an RQ queue:
from redis import Redis
from rq import Queue
q = Queue(connection=Redis())
queue = Queue(connection=Redis())
```
And enqueue the function call:
```python
from my_module import count_words_at_url
job = q.enqueue(count_words_at_url, 'http://nvie.com')
job = queue.enqueue(count_words_at_url, 'http://nvie.com')
```
Scheduling jobs are also similarly easy:
```python
# Schedule job to run at 9:15, October 10th
job = queue.enqueue_at(datetime(2019, 10, 8, 9, 15), say_hello)
# Schedule job to run in 10 seconds
job = queue.enqueue_in(timedelta(seconds=10), say_hello)
```
Retrying failed jobs is also supported:
```python
from rq import Retry
# Retry up to 3 times, failed job will be requeued immediately
queue.enqueue(say_hello, retry=Retry(max=3))
# Retry up to 3 times, with configurable intervals between retries
queue.enqueue(say_hello, retry=Retry(max=3, interval=[10, 30, 60]))
```
For a more complete example, refer to the [docs][d]. But this is the essence.
@ -64,7 +86,7 @@ To start executing enqueued function calls in the background, start a worker
from your project's directory:
```console
$ rq worker
$ rq worker --with-scheduler
*** Listening for work on default
Got count_words_at_url('http://nvie.com') from default
Job result = 818

View File

@ -16,14 +16,14 @@ navigation:
url: /docs/results/
- text: Jobs
url: /docs/jobs/
- text: Exceptions
- text: Exceptions & Retries
url: /docs/exceptions/
- text: Scheduling Jobs
url: /docs/scheduling/
- text: Monitoring
url: /docs/monitoring/
- text: Job Registries
url: /docs/job_registries/
- text: Monitoring
url: /docs/monitoring/
- text: Connections
url: /docs/connections/
- text: Testing

View File

@ -20,7 +20,7 @@ body
header
{
background: url(../img/ribbon.png) no-repeat 50% 0;
max-width: 430px;
max-width: 630px;
width: 100%;
text-align: center;
@ -86,7 +86,7 @@ header a:hover
.container
{
margin: 0 auto;
max-width: 430px;
max-width: 630px;
width: 100%;
}

View File

@ -36,14 +36,30 @@ And enqueue the function call:
{% highlight python %}
from my_module import count_words_at_url
result = q.enqueue(
count_words_at_url, 'http://nvie.com')
result = q.enqueue(count_words_at_url, 'http://nvie.com')
{% endhighlight %}
For a more complete example, refer to the [docs][d]. But this is the essence.
Scheduling jobs are similarly easy:
[d]: {{site.baseurl}}docs/
{% highlight python %}
# Schedule job to run at 9:15, October 10th
job = queue.enqueue_at(datetime(2019, 10, 8, 9, 15), say_hello)
# Schedule job to be run in 10 seconds
job = queue.enqueue_in(timedelta(seconds=10), say_hello)
{% endhighlight %}
You can also ask RQ to retry failed jobs:
{% highlight python %}
from rq import Retry
# Retry up to 3 times, failed job will be requeued immediately
queue.enqueue(say_hello, retry=Retry(max=3))
# Retry up to 3 times, with configurable intervals between retries
queue.enqueue(say_hello, retry=Retry(max=3, interval=[10, 30, 60]))
{% endhighlight %}
### The worker
@ -51,7 +67,7 @@ To start executing enqueued function calls in the background, start a worker
from your project's directory:
{% highlight console %}
$ rq worker
$ rq worker --with-scheduler
*** Listening for work on default
Got count_words_at_url('http://nvie.com') from default
Job result = 818

View File

@ -2,4 +2,4 @@
from __future__ import (absolute_import, division, print_function,
unicode_literals)
VERSION = '1.4.3'
VERSION = '1.5.0'

View File

@ -843,25 +843,23 @@ class Worker(object):
self.connection,
job_class=self.job_class
)
# Requeue/reschedule if retry is configured
if job.retries_left and job.retries_left > 0:
retry = True
retry_interval = job.get_retry_interval()
job.retries_left = job.retries_left - 1
job.retries_left = job.retries_left - 1
else:
retry = False
job.set_status(JobStatus.FAILED, pipeline=pipeline)
started_job_registry.remove(job, pipeline=pipeline)
if not self.disable_default_exception_handler:
failed_job_registry = FailedJobRegistry(job.origin, job.connection,
job_class=self.job_class)
failed_job_registry.add(job, ttl=job.failure_ttl,
exc_string=exc_string, pipeline=pipeline)
self.set_current_job_id(None, pipeline=pipeline)
self.increment_failed_job_count(pipeline)
@ -869,7 +867,7 @@ class Worker(object):
self.increment_total_working_time(
job.ended_at - job.started_at, pipeline
)
if retry:
if retry_interval:
scheduled_datetime = datetime.now(timezone.utc) + timedelta(seconds=retry_interval)