pythonurllib3

Static time between retries in urllib3


I want to use a static time between retries instead of the exponential backoff factor in the urllib3 Retry class.

I have tried wraping the Retry class to implement a static retry. However, while LoggingRetry is being instansiated with the proper paremeters, the default values are being used when the actual retry happens. In the example below, there should be two seconds between retries. The backoff_factor will persist, but I cannot seem to get the delay_between_retries to stick.

import time
import urllib3
from urllib3.util.retry import Retry
from urllib3.exceptions import MaxRetryError
from requests. exceptions import HTTPError
import traceback



# Define the retry strategy with logging
class LoggingRetry(Retry):
    def __init__(self, delay_between_retries=0, *args, **kwargs):
        print(f"Initializing LoggingRetry with delay_between_retries={delay_between_retries}")
        self.delay_between_retries = delay_between_retries
        super().__init__(*args, **kwargs)

    def sleep(self, response=None):
        backoff = self.get_backoff_time()
        total_sleep_time = self.delay_between_retries + backoff
        print(f"LoggingRetry sleep -  delay_between_retries: {self.delay_between_retries}, Backoff time: {backoff}")

        time.sleep(total_sleep_time)


class MyClient:
    def __init__(self, total_tries=3, delay_between_retries=0, backoff_factor=0):
        self.total_tries = total_tries
        self.delay_between_retries = delay_between_retries
        self.backoff_factor = backoff_factor

        # Define a retry strategy with a custom fixed_wait_time
        self.retry_strategy = LoggingRetry(
            total=self.total_tries,
            status_forcelist=[202],
            backoff_factor=self.backoff_factor,
            delay_between_retries=self.delay_between_retries,
            raise_on_status=False,
            allowed_methods=frozenset(['GET'])
        )
        
        # Initialize HTTP client with retry strategy
        self.http = urllib3.PoolManager(retries=self.retry_strategy, maxsize=4, block=True)

    
    def request_data(self, url):    
        try:
            response = self.http.request('GET', url,)
            return response
        except MaxRetryError as me:
            print(f"Max retries reached: {me}")
        except HTTPError as he:
            print(f"An HTTP error occurred: {he}")
            traceback.print_exc()
        except Exception as e:
            print(f"An error occurred: {e}")
            traceback.print_exc()

MyClient(delay_between_retries=2, backoff_factor=0).request_data(url='http://0.0.0.202/')

Solution

  • Implement another method in your subclass:

    class LoggingRetry(Retry):
        ...
    
        def new(self, *args, **kwargs):
            return super().new(
               *args,
               **kwargs,
               delay_between_retries=self.delay_between_retries,
            )
    

    Explanation: the way urllib3.Retry works is that each retry attempt will create a new Retry instance with incremented retry counters. You could also override Retry.increment if you need more control than this.