javawildflyinfinispaninfinispan-9

Infinispan replicated cache not replicating objects for read


We are trying to install a replicated cache across two infinispan nodes running on Wildfly 11 inside of Openshift. When we write an object on one node it doesn't show up on the other node for reading.

At boot the nodes connect in the cluster and can see each other as can be seen in the logs:

22:59:52,513 INFO  [org.infinispan.remoting.transport.jgroups.JGroupsTransport] (thread-2) ISPN000094: Received new cluster view for channel mychannel: [port-12-z73rp|13] (2) [port-12-z73rp, port-12-q10wf]

The cache configuration. We've tried transaction modes including NONE, FULL_XA, and BATCH:

<cache-container name="mycache" default-cache="MyDefault_Cache" jndi-name="java:jboss/infinispan/mycache" module="org.wildfly.clustering.server">
            <transport lock-timeout="60000"/>

            <replicated-cache name="MyCache_CategoryManager" mode="SYNC" jndi-name="java:jboss/infinispan/mycache/MyCache_CategoryManager">
              <locking
                  isolation="READ_COMMITTED"/>
              <transaction
                  locking="OPTIMISTIC"
                  mode="NONE"
                  stop-timeout="30000"/>
            </replicated-cache>
          </cache-container>
    </subsystem>

In the code the cache is wired via spring as:

    public Map<String, Object> getCustomCache() {
    System.out.println("CACHE -> " + this.customCache.getClass());
    return customCache;
}

public void setCustomCache(Map<String, Object> customCache) {
    this.customCache = customCache;
}

@Resource(lookup = "java:jboss/infinispan/mycache/MyCache_CategoryManager")
private Map<String, Object> customCache;

And the Jgroups config:

 <subsystem xmlns="urn:jboss:domain:jgroups:5.0">
        <channels default="ee">
            <channel name="ee" stack="udp" cluster="mycluster"/>
        </channels>
        <stacks>
            <stack name="udp">
                <transport type="UDP" socket-binding="jgroups-udp"/>
                <protocol type="PING"/>
                <protocol type="MERGE3"/>
                <protocol type="FD_SOCK"/>
                <protocol type="FD_ALL"/>
                <protocol type="VERIFY_SUSPECT"/>
                <protocol type="pbcast.NAKACK2"/>
                <protocol type="UNICAST3"/>
                <protocol type="pbcast.STABLE"/>
                <protocol type="pbcast.GMS"/>
                <protocol type="UFC"/>
                <protocol type="MFC"/>
                <protocol type="FRAG2"/>
            </stack>
            <stack name="tcp">
                <transport type="TCP" socket-binding="jgroups-tcp"/>
                <protocol type="PING"/>
                <protocol type="MERGE3"/>
                <protocol type="FD_SOCK"/>
                <protocol type="FD_ALL"/>
                <protocol type="VERIFY_SUSPECT"/>
                <protocol type="pbcast.NAKACK2"/>
                <protocol type="UNICAST3"/>
                <protocol type="pbcast.STABLE"/>
                <protocol type="pbcast.GMS"/>
                <protocol type="MFC"/>
                <protocol type="FRAG2"/>
            </stack>
            <stack name="pgping">
                <transport type="TCP" socket-binding="jgroups-tcp"/>
                <protocol type="JDBC_PING">
                    <property name="datasource_jndi_name">
                        ${env.PG_MYCLUSTER_SERV_DB_JNDI_NAME}
                    </property>
                    <property name="initialize_sql">
                        CREATE TABLE IF NOT EXISTS jgroupsping (own_addr VARCHAR(200) NOT NULL, cluster_name VARCHAR(200) NOT NULL, ping_data BYTEA DEFAULT NULL, PRIMARY KEY (own_addr, cluster_name))
                    </property>
                </protocol>
                <protocol type="PING"/>
                <protocol type="MERGE3"/>
                <protocol type="FD_SOCK" socket-binding="jgroups-tcp-fd"/>
                <protocol type="FD"/>
                <protocol type="VERIFY_SUSPECT"/>
                <protocol type="pbcast.NAKACK2"/>
                <protocol type="UNICAST3">
                    <property name="conn_close_timeout">5000</property>
                </protocol>
                <protocol type="pbcast.STABLE"/>
                <protocol type="pbcast.GMS">
                    <property name="join_timeout">3000</property>
                </protocol>
                <protocol type="MFC"/>
                <protocol type="FRAG2"/>
            </stack>
            <stack name="tcpping">
                <transport type="TCP" socket-binding="jgroups-tcp"/>
                <protocol type="TCPPING">
                    <property name="initial_hosts">127.0.0.1[7600]</property>
                    <property name="port_range">0</property>
                    <property name="num_initial_members">1</property>
                </protocol>
                <protocol type="MERGE3"/>
                <protocol type="FD_SOCK"/>
                <protocol type="FD_ALL"/>
                <protocol type="VERIFY_SUSPECT"/>
                <protocol type="BARRIER"/>
                <protocol type="pbcast.NAKACK2"/>
                <protocol type="UNICAST3"/>
                <protocol type="pbcast.STABLE"/>
                <protocol type="pbcast.GMS"/>
                <protocol type="UFC"/>
                <protocol type="MFC"/>
                <protocol type="FRAG2"/>
                <protocol type="RSVP"/>
                <protocol type="AUTH">
                    <property name="auth_class">org.jgroups.auth.MD5Token</property>
                    <property name="token_hash">SHA</property>
                    <property name="auth_value">${jboss.node.name}</property>
                </protocol>
            </stack>
        </stacks>
    </subsystem>

The data makes it to the database and if we rebuild the cache on the second node it will include updated information.

Given that the nodes connect how can we troubleshoot the fact that the read replication isn't occurring? We have tried both replicated and distributed cache configurations.


Solution

  • In the end there was one key configuration element missing from the configuration files. The key missing element was the resource env ref from the web.xml file:

    <resource-env-ref>
        <resource-env-ref-name>myCache</resource-env-ref-name>
        <resource-env-ref-type>org.infinispan.Cache</resource-env-ref-type>
        <lookup-name>java:jboss/infinispan/cache/myCache/myCacheManger</lookup-name>
      </resource-env-ref>
    

    From the standalone.xml

    <replicated-cache name="myCache" statistics-enabled="false" mode="SYNC"/>