dockerdocker-composepymodbus3

Passing from manual Docker host network to Docker Compose bridge


I have 2 docker images a modbus server and a client which I run manually with docker run --network host server and the same with the client and work perfectly. But now I need to add them to a docker-compose file where the network is bridge, what I did like this:

autoserver:
    image: 19mikel95/pymodmikel:autoserversynchub
    container_name: autoserver
    restart: unless-stopped

  clientperf:
    image: 19mikel95/pymodmikel:reloadcomp
    container_name: clientperf
    restart: unless-stopped
    depends_on:
      - autoserver
    links:
      - "autoserver:server"

And I read that to refer from a container to another one(client to server) I have to use the service name in the dockercompose YML (autoserver) so that is what I did. In the python file executed in the client(which is the performance.py from pymodbus) I changed 'localhost' to:

host = 'autoserver'
client = ModbusTcpClient(host, port=5020)

However I get this error:

[ERROR/MainProcess] failed to run test successfully Traceback (most recent call last): File "performance.py", line 72, in single_client_test client.read_holding_registers(10, 1, unit=1) File "/usr/lib/python3/dist-packages/pymodbus/client/common.py", line 114, in read_holding_registers return self.execute(request) File "/usr/lib/python3/dist-packages/pymodbus/client/sync.py", line 107, in execute raise ConnectionException("Failed to connect[%s]" % (self.str())) pymodbus.exceptions.ConnectionException: Modbus Error: [Connection] Failed to connect[ModbusTcpClient(autoserver:5020)]

as asked, my full docker-compose YML is this:

version: '2.1'

networks:
  monitor-net:
    driver: bridge

volumes:
    prometheus_data: {}
    grafana_data: {}

services:

  prometheus:
    image: prom/prometheus:latest
    container_name: prometheus
    volumes:
      - ./prometheus:/etc/prometheus
      - prometheus_data:/prometheus
    command:
      - '--config.file=/etc/prometheus/prometheus.yml'
      - '--storage.tsdb.path=/prometheus'
      - '--web.console.libraries=/etc/prometheus/console_libraries'
      - '--web.console.templates=/etc/prometheus/consoles'
      - '--storage.tsdb.retention.time=200h'
      - '--web.enable-lifecycle'
    restart: unless-stopped
    expose:
      - 9090
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  alertmanager:
    image: prom/alertmanager:latest
    container_name: alertmanager
    volumes:
      - ./alertmanager:/etc/alertmanager
    command:
      - '--config.file=/etc/alertmanager/config.yml'
      - '--storage.path=/alertmanager'
    restart: unless-stopped
    expose:
      - 9093
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  nodeexporter:
    image: prom/node-exporter:latest
    container_name: nodeexporter
    volumes:
      - /proc:/host/proc:ro
      - /sys:/host/sys:ro
      - c:\:/rootfs:ro
    command:
      - '--path.procfs=/host/proc'
      - '--path.rootfs=/rootfs'
      - '--path.sysfs=/host/sys'
      - '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
    restart: unless-stopped
    expose:
      - 9100
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  cadvisor:
    image: gcr.io/google-containers/cadvisor:latest
    container_name: cadvisor
    volumes:
      - c:\:/rootfs:ro
      - /var/run:/var/run:rw
      - /sys:/sys:ro
      - /var/lib/docker:/var/lib/docker:ro
      #- /cgroup:/cgroup:ro #doesn't work on MacOS only for Linux
    restart: unless-stopped
    expose:
      - 8080
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  grafana:
    image: grafana/grafana:latest
    container_name: grafana
    volumes:
      - grafana_data:/var/lib/grafana
      - ./grafana/provisioning:/etc/grafana/provisioning
    environment:
      - GF_SECURITY_ADMIN_USER=${ADMIN_USER:-admin}
      - GF_SECURITY_ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
      - GF_USERS_ALLOW_SIGN_UP=false
    restart: unless-stopped
    expose:
      - 3000
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  pushgateway:
    image: prom/pushgateway:latest
    container_name: pushgateway
    restart: unless-stopped
    expose:
      - 9091
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  caddy:
    image: stefanprodan/caddy
    container_name: caddy
    ports:
      - "3000:3000"
      - "9090:9090"
      - "9093:9093"
      - "9091:9091"
    volumes:
      - ./caddy:/etc/caddy
    environment:
      - ADMIN_USER=${ADMIN_USER:-admin}
      - ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
    restart: unless-stopped
    networks:
      - monitor-net
    labels:
      org.label-schema.group: "monitoring"

  autoserver:
    image: 19mikel95/pymodmikel:autoserversynchub
    container_name: autoserver
    ports:
      - "5020:5020"
    restart: unless-stopped
    networks:
      - monitor-net

  clientperf:
    image: 19mikel95/pymodmikel:reloadcomp
    container_name: clientperf
    restart: unless-stopped
    networks:
      - monitor-net
    depends_on:
      - autoserver
    links:
      - "autoserver:server"

Solution

  • The problem is in the StartTcpServer(context, identity=identity, address=("localhost", 5020)) of sincserver.py file from autoserver image. The localhost allows for TcpServer to accept connection only from localhost. It should be replaced with 0.0.0.0 in order to allow any external requests to this port.

    The following Docker Compose shows it (sed -i 's|localhost|0.0.0.0|g' sincserver.py replaces the hostname):

    version: '2.1'
    
    services:
      autoserver:
        image: 19mikel95/pymodmikel:autoserversynchub
        command: sh -c "
          sed -i 's|localhost|0.0.0.0|g' sincserver.py;
          python3 sincserver.py daemon off
          "
        ports:
          - "5020:5020"
        restart: unless-stopped
    
      clientperf:
        image: 19mikel95/pymodmikel:reloadcomp
        restart: unless-stopped
        depends_on:
          - autoserver
    

    Run:

    docker-compose up -d
    docker-compose logs -f clientperf
    

    And you will see log like

    clientperf_1  | [DEBUG/MainProcess] 574 requests/second
    clientperf_1  | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.7410697999875993 seconds
    clientperf_1  | [DEBUG/MainProcess] 692 requests/second
    clientperf_1  | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.4434449000109453 seconds
    clientperf_1  | [DEBUG/MainProcess] 708 requests/second
    clientperf_1  | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.4116760999895632 seconds
    clientperf_1  | [DEBUG/MainProcess] 890 requests/second
    clientperf_1  | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.1230684999900404 seconds
    clientperf_1  | [DEBUG/MainProcess] 803 requests/second
    clientperf_1  | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.2450218999874778 seconds
    clientperf_1  | [DEBUG/MainProcess] 753 requests/second
    clientperf_1  | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.3274328999978025 seconds
    clientperf_1  | [DEBUG/MainProcess] 609 requests/second
    clientperf_1  | [DEBUG/MainProcess] time taken to complete 1000 cycle by 10 workers is 1.6399398999928962 seconds