simulationsimpybatching

Simulation of servers processing batches with interrupting set-up/switch-on times


I would like to create a system with servers that need time to set up before being ready to serve. The system starts to set up whenever enough M (2, 3,...) customers are in the queue. If the number of customers in the system reaches the maximum number of K(50), the coming customer will balk.

When a batch( group of M customers) leaves the system, we check if there are M customers(a batch) who are waiting to be served. If so, we keep the server remaining ON, otherwise, we turn off the server immediately.

This process has been done thanks to the answer by Michael R. Gibbs: Simulating a system of batching jobs with interrupting set-up/switch-on times using Simpy

I would like to add another policy to the system that the number of servers is limited. When I add a condition to limit the number of servers, it works until a server was interrupted or shut down. Then, these servers seem to have disappeared and are no longer active.

Here is my code:

import simpy
import random
import numpy as np

class param:
    def __init__(self, x):
        #self.FILE = 'Setup_time.csv'
        self.MEAN_INTERARRIVAL = x     # arrival_gap
        self.MEAN_SERVICE_TIME = 2      # processing_time
        self.MEAN_SWITCH_TIME = 3       # server_start_time
        self.NUM_OF_SERVER = 4          # maximum number of servers
        self.MAX_SYS_SIZE = 10           # maximum number of customers in the system
        self.BATCH_SIZE = 2
        self.RANDOM_SEED = 0

`# there is no wating so normal lists are good enough`

class Server():
    """
    Server that process batches

    Has two states: starting up, and batch processing
    """
    
    def __init__(self, id, env, processing_q, server_q, param):

        self.id = id
        self.env = env
        self.processing_q = processing_q
        self.server_q = server_q

        self.start_process = self.env.process(self.start_up(param))

    def start_up(self, param):
        """
        starts up the server, then start processing batches

        start up can be interrupted, stoping the server
        """
        global num_servers
         # start up

        if self.id <= param.NUM_OF_SERVER:         # I add the condition to limit the number of servers
            try:
                num_servers += 1
                print(f'{self.env.now} server {self.id} starting up')
                yield self.env.timeout(param.MEAN_SWITCH_TIME)
                #yield env.timeout(np.random.exponential(1/param.MEAN_SWITCH_TIME))

                print(f'{self.env.now} server {self.id} started')

                self.env.process(self.process(param))
            
            except simpy.Interrupt:
                print(f'{env.now} server {self.id} has been interupted-------------------')

    def process(self, param):
        """
        process batches
        keeps going as long as there are batches in queue

        If starts second batch, also interupts starting up server
        """
        global num_servers, num_active_server
        while True:
            num_active_server += 1

            b = processing_q.pop(0)
            print(f'{self.env.now} server {self.id} starting batch process')

            yield self.env.timeout(param.MEAN_SERVICE_TIME)
            #yield env.timeout(np.random.exponential(1/param.MEAN_SERVICE_TIME))

            num_servers -= 1
            num_active_server -= 1
            print(f'{self.env.now} server {self.id} finish batch process')

            if len(self.processing_q) > 0:
                # more processes to do,
                # steal batch from starting up server

                #if self.server_q:
                    #s = self.server_q.pop(0)        # Do these lines work for FIFO rule?
                    #s.stop()
                s = self.server_q.pop() # lifo
                s.stop()

            else:
                print(f'{env.now} server {self.id} no more batches, shutting down')
                break

    def stop(self):
        """
        Interrupts server start up, stoping server
        """
        try:
            self.start_process.interrupt()
        except:
            pass

def gen_arrivals(env, batching_q, processing_q, server_q, param):
    """
    Generate arring customers

    If queues are too big customer will abort

    If have enough customers, create a batch and start a server
    """
    global num_servers, num_balk, num_cumulative_customer, num_active_server
    id = 1
    while True:
        yield env.timeout(param.MEAN_INTERARRIVAL)
        #yield env.timeout(np.random.exponential(1/param.MEAN_INTERARRIVAL))
        num_cumulative_customer += 1

        customer = object()                         
        batching_q.append(customer)

        q_size  = len(batching_q) + (param.BATCH_SIZE * len(processing_q)) 
        sys_size = q_size + (num_active_server * param.BATCH_SIZE) 

        #if q_size > max_q_size:
        if  sys_size > param.MAX_SYS_SIZE:    # I check the limited condition for number customer in system instead of number customer in queue
            num_balk += 1

            batching_q.pop(-1)                # I added the statement  
            print(f'{env.now} customer arrived and aborted, sys len: {sys_size }')

        else:
            #customer = object()                         # I moved these 2 lines above to update system size before using the if statement
            #batching_q.append(customer)

            print(f'{env.now} customer has arrived, q len: {q_size}, sys len: {sys_size}')
            # check if a batch can be creatd
            while len(batching_q) >= param.BATCH_SIZE:
                batch = list()
                while len(batch) < param.BATCH_SIZE:
                    batch.append(batching_q.pop(0))

                # put batch in processing q
                processing_q.append(batch)

                # start server
                server = Server(id, env, processing_q, server_q, param)
                id += 1
                server_q.append(server)

        #Calculate balking probability
        prob_balk = num_balk/num_cumulative_customer
        #print(f'{env.now} prob_balk {prob_balk}')
        list_prob_balk.append(prob_balk)


`# boot up sim`
trial = 0
Pb= []    #balking probability
global customer_balk_number
for x in range(1,3):
    trial += 1
    print('trial:', trial)

    batching_q = list()
    processing_q = list()
    server_q = list() # servers that are still starting up
    num_servers = 0                     # number of server in system (both starting and serving server)
    num_active_server = 0               # number of servers serving customers
    num_balk = 0                        # number of balking customers
    num_cumulative_customer = 0         # total arriving customers
    
    list_prob_balk = []    #list balk prob each trial   

    paramtest1 = param(x)
    random.seed(paramtest1.RANDOM_SEED)

    # create and start the model
    env = simpy.Environment()
    env.process(gen_arrivals(env, batching_q, processing_q, server_q, paramtest1))
    env.run(30)

    Pb.append(list_prob_balk[-1])

#print('List of balk prob', Pb )

Solution

  • saw two issues with your code.

    First, when checking if the queue of starting up servers is not empty you need to use if len(self.server_q) > 0: not if self.server_q:

    Also, your tracking of server count and active count were a bit off. Your active count was counting the number of batches being processed. not active servers.

    I also had a error in my original code were I forgot to remove the server from the server startup q when the server became active.

    Here is the fixed code

    import simpy
    import random
    import numpy as np
    
    class param:
        def __init__(self, x):
            #self.FILE = 'Setup_time.csv'
            self.MEAN_INTERARRIVAL = x     # arrival_gap
            self.MEAN_SERVICE_TIME = 2      # processing_time
            self.MEAN_SWITCH_TIME = 3       # server_start_time
            self.NUM_OF_SERVER = 4          # maximum number of servers
            self.MAX_SYS_SIZE = 10           # maximum number of customers in the system
            self.BATCH_SIZE = 2
            self.RANDOM_SEED = 0
    
    # there is no wating so normal lists are good enough`
    
    class Server():
        """
        Server that process batches
    
        Has two states: starting up, and batch processing
        """
        
        def __init__(self, id, env, processing_q, server_q, param):
    
            self.id = id
            self.env = env
            self.processing_q = processing_q
            self.server_q = server_q
    
            self.start_process = self.env.process(self.start_up(param))
    
            global num_servers
            # sever has started, but not active 
            num_servers +=1
            
    
        def start_up(self, param):
            """
            starts up the server, then start processing batches
    
            start up can be interrupted, stoping the server
            """
            global num_servers
             # start up
    
            # self.id is not a count its a unique id for each server
            #if self.id <= param.NUM_OF_SERVER:         # I add the condition to limit the number of servers
    
            try:
                #num_servers += 1
                print(f'{self.env.now} server {self.id} starting up')
                yield self.env.timeout(param.MEAN_SWITCH_TIME)
                #yield env.timeout(np.random.exponential(1/param.MEAN_SWITCH_TIME))
    
                print(f'{self.env.now} server {self.id} started')
    
                # server has started, need to remove from startup q
                self.server_q.remove(self)
    
                self.env.process(self.process(param))
            
            except simpy.Interrupt:
                print(f'{env.now} server {self.id} has been interupted-------------------')
    
                # server is stoping, need to adjust server count
                num_servers -= 1
    
        def process(self, param):
            """
            process batches
            keeps going as long as there are batches in queue
    
            If starts second batch, also interupts starting up server
            """
            global num_servers, num_active_server
            # count server becoming ative only once
            num_active_server += 1
    
            while True:
                # server becomes active only once, this is counting batches processed
                # num_active_server += 1
    
                b = processing_q.pop(0)
                print(f'{self.env.now} server {self.id} starting batch process')
    
                yield self.env.timeout(param.MEAN_SERVICE_TIME)
                #yield env.timeout(np.random.exponential(1/param.MEAN_SERVICE_TIME))
    
                # Server is still running
                # num_servers -= 1
                # num_active_server -= 1
                print(f'{self.env.now} server {self.id} finish batch process')
    
                if len(self.processing_q) > 0:
                    # more processes to do,
                    # steal batch from starting up server
    
                    # #if self.server_q:
                    #     #s = self.server_q.pop(0)        # Do these lines work for FIFO rule?
                    #     #s.stop()
                    # s = self.server_q.pop() # lifo
                    # s.stop()
    
                    # need to check the length, not if it exists
                    if len(self.server_q) > 0:
                        s = self.server_q.pop(0)
                        s.stop()
    
                else:
                    print(f'{env.now} server {self.id} no more batches, shutting down')
                    break
            
            # now the server is shutting down
            num_active_server -= 1
            num_servers -= 1
    
        def stop(self):
            """
            Interrupts server start up, stoping server
            """
            try:
                self.start_process.interrupt()
            except:
                pass
    
    def gen_arrivals(env, batching_q, processing_q, server_q, param):
        """
        Generate arring customers
    
        If queues are too big customer will abort
    
        If have enough customers, create a batch and start a server
        """
        global num_servers, num_balk, num_cumulative_customer, num_active_server
        id = 1
        while True:
            yield env.timeout(param.MEAN_INTERARRIVAL)
            #yield env.timeout(np.random.exponential(1/param.MEAN_INTERARRIVAL))
            num_cumulative_customer += 1
    
            customer = object()                         
            batching_q.append(customer)
    
            q_size  = len(batching_q) + (param.BATCH_SIZE * len(processing_q)) 
            sys_size = q_size + (num_active_server * param.BATCH_SIZE) 
    
            #if q_size > max_q_size:
            if  sys_size > param.MAX_SYS_SIZE:    # I check the limited condition for number customer in system instead of number customer in queue
                num_balk += 1
    
                batching_q.pop(-1)                # I added the statement  
                print(f'{env.now} customer arrived and aborted, sys len: {sys_size }')
    
            else:
                #customer = object()                         # I moved these 2 lines above to update system size before using the if statement
                #batching_q.append(customer)
    
                print(f'{env.now} customer has arrived, q len: {q_size}, sys len: {sys_size}')
                # check if a batch can be creatd
                while len(batching_q) >= param.BATCH_SIZE:
                    batch = list()
                    while len(batch) < param.BATCH_SIZE:
                        batch.append(batching_q.pop(0))
    
                    # put batch in processing q
                    processing_q.append(batch)
    
                    # start server
                    if num_servers < param.NUM_OF_SERVER:
                        server = Server(id, env, processing_q, server_q, param)
                        id += 1
                        server_q.append(server)
    
            #Calculate balking probability
            prob_balk = num_balk/num_cumulative_customer
            #print(f'{env.now} prob_balk {prob_balk}')
            list_prob_balk.append(prob_balk)
    
    
    # boot up sim`
    trial = 0
    Pb= []    #balking probability
    global customer_balk_number
    for x in range(1,3):
        trial += 1
        print('trial:', trial)
    
        batching_q = list()
        processing_q = list()
        server_q = list() # servers that are still starting up
        num_servers = 0                     # number of server in system (both starting and serving server)
        num_active_server = 0               # number of servers serving customers
        num_balk = 0                        # number of balking customers
        num_cumulative_customer = 0         # total arriving customers
        
        list_prob_balk = []    #list balk prob each trial   
    
        paramtest1 = param(x)
        random.seed(paramtest1.RANDOM_SEED)
    
        # create and start the model
        env = simpy.Environment()
        env.process(gen_arrivals(env, batching_q, processing_q, server_q, paramtest1))
        env.run(30)
    
        Pb.append(list_prob_balk[-1])
    
    #print('List of balk prob', Pb )