By type
[38;5;6m [38;5;5m03:23:39.17 [0m [38;5;6m [38;5;5m03:23:39.25 [0m[1mWelcome to the Bitnami elasticsearch container[0m [38;5;6m [38;5;5m03:23:39.27 [0mSubscribe to project updates by watching [1mhttps://github.com/bitnami/bitnami-docker-elasticsearch[0m [38;5;6m [38;5;5m03:23:39.36 [0mSubmit issues and feature requests at [1mhttps://github.com/bitnami/bitnami-docker-elasticsearch/issues[0m [38;5;6m [38;5;5m03:23:39.37 [0m [38;5;6m [38;5;5m03:23:39.46 [0m[38;5;2mINFO [0m ==> ** Starting Elasticsearch setup ** [38;5;6m [38;5;5m03:23:39.88 [0m[38;5;2mINFO [0m ==> Configuring/Initializing Elasticsearch... [38;5;6m [38;5;5m03:23:40.47 [0m[38;5;2mINFO [0m ==> Setting default configuration [38;5;6m [38;5;5m03:23:40.66 [0m[38;5;2mINFO [0m ==> Configuring Elasticsearch cluster settings... OpenJDK 64-Bit Server VM warning: Option UseConcMarkSweepGC was deprecated in version 9.0 and will likely be removed in a future release. [38;5;6m [38;5;5m03:24:03.65 [0m[38;5;2mINFO [0m ==> ** Elasticsearch setup finished! ** [38;5;6m [38;5;5m03:24:03.87 [0m[38;5;2mINFO [0m ==> ** Starting Elasticsearch ** OpenJDK 64-Bit Server VM warning: Option UseConcMarkSweepGC was deprecated in version 9.0 and will likely be removed in a future release. [2023-05-06T03:24:48,857][INFO ][o.e.n.Node ] [onap-sdnrdb-master-0] version[7.9.3], pid[1], build[oss/tar/c4138e51121ef06a6404866cddc601906fe5c868/2020-10-16T10:36:16.141335Z], OS[Linux/5.4.0-96-generic/amd64], JVM[BellSoft/OpenJDK 64-Bit Server VM/11.0.9/11.0.9+11-LTS] [2023-05-06T03:24:48,859][INFO ][o.e.n.Node ] [onap-sdnrdb-master-0] JVM home [/opt/bitnami/java] [2023-05-06T03:24:48,860][INFO ][o.e.n.Node ] [onap-sdnrdb-master-0] JVM arguments [-Xshare:auto, -Des.networkaddress.cache.ttl=60, -Des.networkaddress.cache.negative.ttl=10, -XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, -XX:-OmitStackTraceInFastThrow, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, -Dio.netty.recycler.maxCapacityPerThread=0, -Dio.netty.allocator.numDirectArenas=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, -Djava.locale.providers=SPI,COMPAT, -Xms128m, -Xmx128m, -XX:+UseConcMarkSweepGC, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -Djava.io.tmpdir=/tmp/elasticsearch-17729482147217119099, -XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=data, -XX:ErrorFile=logs/hs_err_pid%p.log, -Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m, -XX:MaxDirectMemorySize=67108864, -Des.path.home=/opt/bitnami/elasticsearch, -Des.path.conf=/opt/bitnami/elasticsearch/config, -Des.distribution.flavor=oss, -Des.distribution.type=tar, -Des.bundled_jdk=true] [2023-05-06T03:25:10,360][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [aggs-matrix-stats] [2023-05-06T03:25:10,362][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [analysis-common] [2023-05-06T03:25:10,363][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [geo] [2023-05-06T03:25:10,364][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [ingest-common] [2023-05-06T03:25:10,456][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [ingest-geoip] [2023-05-06T03:25:10,457][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [ingest-user-agent] [2023-05-06T03:25:10,458][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [kibana] [2023-05-06T03:25:10,459][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [lang-expression] [2023-05-06T03:25:10,459][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [lang-mustache] [2023-05-06T03:25:10,460][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [lang-painless] [2023-05-06T03:25:10,461][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [mapper-extras] [2023-05-06T03:25:10,462][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [parent-join] [2023-05-06T03:25:10,462][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [percolator] [2023-05-06T03:25:10,463][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [rank-eval] [2023-05-06T03:25:10,464][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [reindex] [2023-05-06T03:25:10,555][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [repository-url] [2023-05-06T03:25:10,556][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [tasks] [2023-05-06T03:25:10,557][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded module [transport-netty4] [2023-05-06T03:25:10,559][INFO ][o.e.p.PluginsService ] [onap-sdnrdb-master-0] loaded plugin [repository-s3] [2023-05-06T03:25:12,065][INFO ][o.e.e.NodeEnvironment ] [onap-sdnrdb-master-0] using [1] data paths, mounts [[/bitnami/elasticsearch/data (192.168.5.117:/dockerdata-nfs/onap/elastic-master-0)]], net usable_space [95.3gb], net total_space [99.9gb], types [nfs4] [2023-05-06T03:25:12,066][INFO ][o.e.e.NodeEnvironment ] [onap-sdnrdb-master-0] heap size [123.7mb], compressed ordinary object pointers [true] [2023-05-06T03:25:13,158][INFO ][o.e.n.Node ] [onap-sdnrdb-master-0] node name [onap-sdnrdb-master-0], node ID [3I3WBw2cR7CpdmvtdJW6eA], cluster name [sdnrdb-cluster] [2023-05-06T03:26:09,570][INFO ][o.e.t.NettyAllocator ] [onap-sdnrdb-master-0] creating NettyAllocator with the following configs: [name=unpooled, factors={es.unsafe.use_unpooled_allocator=false, g1gc_enabled=false, g1gc_region_size=0b, heap_size=123.7mb}] [2023-05-06T03:26:11,456][INFO ][o.e.d.DiscoveryModule ] [onap-sdnrdb-master-0] using discovery type [zen] and seed hosts providers [settings] [2023-05-06T03:26:17,459][WARN ][o.e.g.DanglingIndicesState] [onap-sdnrdb-master-0] gateway.auto_import_dangling_indices is disabled, dangling indices will not be automatically detected or imported and must be managed manually [2023-05-06T03:26:19,961][INFO ][o.e.n.Node ] [onap-sdnrdb-master-0] initialized [2023-05-06T03:26:19,962][INFO ][o.e.n.Node ] [onap-sdnrdb-master-0] starting ... [2023-05-06T03:26:21,705][INFO ][o.e.t.TransportService ] [onap-sdnrdb-master-0] publish_address {10.233.67.187:9300}, bound_addresses {0.0.0.0:9300} [2023-05-06T03:26:22,860][WARN ][o.e.t.TcpTransport ] [onap-sdnrdb-master-0] exception caught on transport layer [Netty4TcpChannel{localAddress=/10.233.67.187:9300, remoteAddress=/127.0.0.6:55673}], closing connection java.lang.IllegalStateException: transport not ready yet to handle incoming requests at org.elasticsearch.transport.TransportService.onRequestReceived(TransportService.java:943) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler.handleRequest(InboundHandler.java:136) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler.messageReceived(InboundHandler.java:93) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler.inboundMessage(InboundHandler.java:78) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TcpTransport.inboundMessage(TcpTransport.java:692) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundPipeline.forwardFragments(InboundPipeline.java:142) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundPipeline.doHandleBytes(InboundPipeline.java:117) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundPipeline.handleBytes(InboundPipeline.java:82) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.netty4.Netty4MessageChannelHandler.channelRead(Netty4MessageChannelHandler.java:76) [transport-netty4-client-7.9.3.jar:7.9.3] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.handler.logging.LoggingHandler.channelRead(LoggingHandler.java:271) [netty-handler-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:103) [netty-codec-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:163) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:714) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:615) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:578) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) [netty-common-4.1.49.Final.jar:4.1.49.Final] at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) [netty-common-4.1.49.Final.jar:4.1.49.Final] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:26:23,956][WARN ][o.e.t.TcpTransport ] [onap-sdnrdb-master-0] exception caught on transport layer [Netty4TcpChannel{localAddress=/10.233.67.187:9300, remoteAddress=/127.0.0.6:45897}], closing connection java.lang.IllegalStateException: transport not ready yet to handle incoming requests at org.elasticsearch.transport.TransportService.onRequestReceived(TransportService.java:943) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler.handleRequest(InboundHandler.java:136) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler.messageReceived(InboundHandler.java:93) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler.inboundMessage(InboundHandler.java:78) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TcpTransport.inboundMessage(TcpTransport.java:692) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundPipeline.forwardFragments(InboundPipeline.java:142) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundPipeline.doHandleBytes(InboundPipeline.java:117) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundPipeline.handleBytes(InboundPipeline.java:82) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.netty4.Netty4MessageChannelHandler.channelRead(Netty4MessageChannelHandler.java:76) [transport-netty4-client-7.9.3.jar:7.9.3] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.handler.logging.LoggingHandler.channelRead(LoggingHandler.java:271) [netty-handler-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:103) [netty-codec-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:163) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:714) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:615) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:578) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) [netty-common-4.1.49.Final.jar:4.1.49.Final] at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) [netty-common-4.1.49.Final.jar:4.1.49.Final] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:26:24,955][WARN ][o.e.t.TcpTransport ] [onap-sdnrdb-master-0] exception caught on transport layer [Netty4TcpChannel{localAddress=/10.233.67.187:9300, remoteAddress=/127.0.0.6:57817}], closing connection java.lang.IllegalStateException: transport not ready yet to handle incoming requests at org.elasticsearch.transport.TransportService.onRequestReceived(TransportService.java:943) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler.handleRequest(InboundHandler.java:136) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler.messageReceived(InboundHandler.java:93) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler.inboundMessage(InboundHandler.java:78) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TcpTransport.inboundMessage(TcpTransport.java:692) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundPipeline.forwardFragments(InboundPipeline.java:142) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundPipeline.doHandleBytes(InboundPipeline.java:117) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundPipeline.handleBytes(InboundPipeline.java:82) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.netty4.Netty4MessageChannelHandler.channelRead(Netty4MessageChannelHandler.java:76) [transport-netty4-client-7.9.3.jar:7.9.3] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.handler.logging.LoggingHandler.channelRead(LoggingHandler.java:271) [netty-handler-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:103) [netty-codec-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:163) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:714) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:615) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:578) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) [netty-common-4.1.49.Final.jar:4.1.49.Final] at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) [netty-common-4.1.49.Final.jar:4.1.49.Final] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:26:25,958][WARN ][o.e.t.TcpTransport ] [onap-sdnrdb-master-0] exception caught on transport layer [Netty4TcpChannel{localAddress=/10.233.67.187:9300, remoteAddress=/127.0.0.6:32845}], closing connection java.lang.IllegalStateException: transport not ready yet to handle incoming requests at org.elasticsearch.transport.TransportService.onRequestReceived(TransportService.java:943) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler.handleRequest(InboundHandler.java:136) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler.messageReceived(InboundHandler.java:93) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler.inboundMessage(InboundHandler.java:78) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TcpTransport.inboundMessage(TcpTransport.java:692) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundPipeline.forwardFragments(InboundPipeline.java:142) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundPipeline.doHandleBytes(InboundPipeline.java:117) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundPipeline.handleBytes(InboundPipeline.java:82) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.netty4.Netty4MessageChannelHandler.channelRead(Netty4MessageChannelHandler.java:76) [transport-netty4-client-7.9.3.jar:7.9.3] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.handler.logging.LoggingHandler.channelRead(LoggingHandler.java:271) [netty-handler-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:103) [netty-codec-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:163) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:714) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:615) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:578) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493) [netty-transport-4.1.49.Final.jar:4.1.49.Final] at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989) [netty-common-4.1.49.Final.jar:4.1.49.Final] at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) [netty-common-4.1.49.Final.jar:4.1.49.Final] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:26:26,061][INFO ][o.e.b.BootstrapChecks ] [onap-sdnrdb-master-0] bound or publishing to a non-loopback address, enforcing bootstrap checks [2023-05-06T03:26:36,171][WARN ][o.e.c.c.ClusterFormationFailureHelper] [onap-sdnrdb-master-0] master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and this node must discover master-eligible nodes [onap-sdnrdb-master-0, onap-sdnrdb-master-1, onap-sdnrdb-master-2] to bootstrap a cluster: have discovered [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}]; discovery will continue using [10.233.68.81:9300, 10.233.65.213:9300, 10.233.71.203:9300] from hosts providers and [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}] from last-known cluster state; node term 0, last-accepted version 0 in term 0 [2023-05-06T03:26:46,175][WARN ][o.e.c.c.ClusterFormationFailureHelper] [onap-sdnrdb-master-0] master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and this node must discover master-eligible nodes [onap-sdnrdb-master-0, onap-sdnrdb-master-1, onap-sdnrdb-master-2] to bootstrap a cluster: have discovered [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}]; discovery will continue using [10.233.68.81:9300, 10.233.65.213:9300, 10.233.71.203:9300] from hosts providers and [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}] from last-known cluster state; node term 0, last-accepted version 0 in term 0 [2023-05-06T03:26:56,178][WARN ][o.e.c.c.ClusterFormationFailureHelper] [onap-sdnrdb-master-0] master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and this node must discover master-eligible nodes [onap-sdnrdb-master-0, onap-sdnrdb-master-1, onap-sdnrdb-master-2] to bootstrap a cluster: have discovered [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}]; discovery will continue using [10.233.68.81:9300, 10.233.65.213:9300, 10.233.71.203:9300] from hosts providers and [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}] from last-known cluster state; node term 0, last-accepted version 0 in term 0 [2023-05-06T03:27:06,183][WARN ][o.e.c.c.ClusterFormationFailureHelper] [onap-sdnrdb-master-0] master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and this node must discover master-eligible nodes [onap-sdnrdb-master-0, onap-sdnrdb-master-1, onap-sdnrdb-master-2] to bootstrap a cluster: have discovered [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}]; discovery will continue using [10.233.68.81:9300, 10.233.65.213:9300, 10.233.71.203:9300] from hosts providers and [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}] from last-known cluster state; node term 0, last-accepted version 0 in term 0 [2023-05-06T03:27:16,188][WARN ][o.e.c.c.ClusterFormationFailureHelper] [onap-sdnrdb-master-0] master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and this node must discover master-eligible nodes [onap-sdnrdb-master-0, onap-sdnrdb-master-1, onap-sdnrdb-master-2] to bootstrap a cluster: have discovered [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}]; discovery will continue using [10.233.68.81:9300, 10.233.65.213:9300, 10.233.71.203:9300] from hosts providers and [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}] from last-known cluster state; node term 0, last-accepted version 0 in term 0 [2023-05-06T03:27:26,193][WARN ][o.e.c.c.ClusterFormationFailureHelper] [onap-sdnrdb-master-0] master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and this node must discover master-eligible nodes [onap-sdnrdb-master-0, onap-sdnrdb-master-1, onap-sdnrdb-master-2] to bootstrap a cluster: have discovered [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}]; discovery will continue using [10.233.68.81:9300, 10.233.65.213:9300, 10.233.71.203:9300] from hosts providers and [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}] from last-known cluster state; node term 0, last-accepted version 0 in term 0 [2023-05-06T03:27:36,197][WARN ][o.e.c.c.ClusterFormationFailureHelper] [onap-sdnrdb-master-0] master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and this node must discover master-eligible nodes [onap-sdnrdb-master-0, onap-sdnrdb-master-1, onap-sdnrdb-master-2] to bootstrap a cluster: have discovered [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}]; discovery will continue using [10.233.65.213:9300, 10.233.71.203:9300, 10.233.68.81:9300] from hosts providers and [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}] from last-known cluster state; node term 0, last-accepted version 0 in term 0 [2023-05-06T03:27:46,200][WARN ][o.e.c.c.ClusterFormationFailureHelper] [onap-sdnrdb-master-0] master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and this node must discover master-eligible nodes [onap-sdnrdb-master-0, onap-sdnrdb-master-1, onap-sdnrdb-master-2] to bootstrap a cluster: have discovered [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}]; discovery will continue using [10.233.65.213:9300, 10.233.71.203:9300, 10.233.68.81:9300] from hosts providers and [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}] from last-known cluster state; node term 0, last-accepted version 0 in term 0 [2023-05-06T03:27:51,192][INFO ][o.e.c.c.Coordinator ] [onap-sdnrdb-master-0] setting initial configuration to VotingConfiguration{3I3WBw2cR7CpdmvtdJW6eA,{bootstrap-placeholder}-onap-sdnrdb-master-1,bcSeTMzMRRmTcOQNXqZLZw} [2023-05-06T03:27:52,976][INFO ][o.e.c.s.MasterService ] [onap-sdnrdb-master-0] elected-as-master ([2] nodes joined)[{onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr} elect leader, {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} elect leader, _BECOME_MASTER_TASK_, _FINISH_ELECTION_], term: 2, version: 1, delta: master node changed {previous [], current [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}]}, added {{onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr}} [2023-05-06T03:27:52,980][WARN ][o.e.c.s.MasterService ] [onap-sdnrdb-master-0] failing [elected-as-master ([2] nodes joined)[{onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr} elect leader, {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} elect leader, _BECOME_MASTER_TASK_, _FINISH_ELECTION_]]: failed to commit cluster state version [1] org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException: node is no longer master for term 2 while handling publication at org.elasticsearch.cluster.coordination.Coordinator.publish(Coordinator.java:1083) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.publish(MasterService.java:268) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.runTasks(MasterService.java:250) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.access$000(MasterService.java:73) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService$Batcher.run(MasterService.java:151) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.TaskBatcher.runIfNotProcessed(TaskBatcher.java:150) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.TaskBatcher$BatchedTask.run(TaskBatcher.java:188) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:678) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:252) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:215) [elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:27:52,985][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=0, optionalJoin=Optional[Join{term=1, lastAcceptedTerm=0, lastAcceptedVersion=0, sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, targetNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}}]} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-0][10.233.67.187:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException: node is no longer master for term 2 while handling publication at org.elasticsearch.cluster.coordination.Coordinator.publish(Coordinator.java:1083) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.publish(MasterService.java:268) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.runTasks(MasterService.java:250) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.access$000(MasterService.java:73) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService$Batcher.run(MasterService.java:151) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.TaskBatcher.runIfNotProcessed(TaskBatcher.java:150) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.TaskBatcher$BatchedTask.run(TaskBatcher.java:188) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:678) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:252) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:215) [elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:27:56,184][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=1, optionalJoin=Optional[Join{term=2, lastAcceptedTerm=0, lastAcceptedVersion=0, sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, targetNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}}]} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-0][10.233.67.187:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.CoordinationStateRejectedException: received a newer join from {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} at org.elasticsearch.cluster.coordination.JoinHelper$CandidateJoinAccumulator.handleJoinRequest(JoinHelper.java:459) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.processJoinRequest(Coordinator.java:533) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.lambda$handleJoinRequest$7(Coordinator.java:496) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:63) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:375) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:362) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.handleJoinRequest(Coordinator.java:483) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.JoinHelper.lambda$new$0(JoinHelper.java:136) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:72) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService$8.doRun(TransportService.java:800) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:737) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:27:56,721][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=2, optionalJoin=Optional[Join{term=3, lastAcceptedTerm=0, lastAcceptedVersion=0, sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, targetNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}}]} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-0][10.233.67.187:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.CoordinationStateRejectedException: received a newer join from {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} at org.elasticsearch.cluster.coordination.JoinHelper$CandidateJoinAccumulator.handleJoinRequest(JoinHelper.java:459) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.processJoinRequest(Coordinator.java:533) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.lambda$handleJoinRequest$7(Coordinator.java:496) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:63) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:375) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:362) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.handleJoinRequest(Coordinator.java:483) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.JoinHelper.lambda$new$0(JoinHelper.java:136) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:72) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService$8.doRun(TransportService.java:800) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:737) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:27:57,206][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=3, optionalJoin=Optional[Join{term=4, lastAcceptedTerm=0, lastAcceptedVersion=0, sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, targetNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}}]} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-0][10.233.67.187:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.CoordinationStateRejectedException: received a newer join from {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} at org.elasticsearch.cluster.coordination.JoinHelper$CandidateJoinAccumulator.handleJoinRequest(JoinHelper.java:459) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.processJoinRequest(Coordinator.java:533) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.lambda$handleJoinRequest$7(Coordinator.java:496) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:63) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:375) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:362) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.handleJoinRequest(Coordinator.java:483) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.JoinHelper.lambda$new$0(JoinHelper.java:136) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:72) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService$8.doRun(TransportService.java:800) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:737) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:27:58,456][INFO ][o.e.m.j.JvmGcMonitorService] [onap-sdnrdb-master-0] [gc][98] overhead, spent [385ms] collecting in the last [1s] [2023-05-06T03:27:59,558][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=7, optionalJoin=Optional[Join{term=8, lastAcceptedTerm=0, lastAcceptedVersion=0, sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, targetNode={onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr}}]} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-1][10.233.71.203:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.CoordinationStateRejectedException: incoming term 8 does not match current term 9 at org.elasticsearch.cluster.coordination.CoordinationState.handleJoin(CoordinationState.java:225) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.handleJoin(Coordinator.java:1013) ~[elasticsearch-7.9.3.jar:7.9.3] at java.util.Optional.ifPresent(Optional.java:183) ~[?:?] at org.elasticsearch.cluster.coordination.Coordinator.processJoinRequest(Coordinator.java:532) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.lambda$handleJoinRequest$7(Coordinator.java:496) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:63) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.ClusterConnectionManager.connectToNode(ClusterConnectionManager.java:120) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:378) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:362) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.handleJoinRequest(Coordinator.java:483) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.JoinHelper.lambda$new$0(JoinHelper.java:136) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:72) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler$RequestHandler.doRun(InboundHandler.java:263) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:737) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) ~[?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:27:59,756][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=5, optionalJoin=Optional[Join{term=6, lastAcceptedTerm=0, lastAcceptedVersion=0, sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, targetNode={onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr}}]} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-2][10.233.65.213:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.CoordinationStateRejectedException: incoming term 6 does not match current term 8 at org.elasticsearch.cluster.coordination.CoordinationState.handleJoin(CoordinationState.java:225) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.handleJoin(Coordinator.java:1013) ~[elasticsearch-7.9.3.jar:7.9.3] at java.util.Optional.ifPresent(Optional.java:183) ~[?:?] at org.elasticsearch.cluster.coordination.Coordinator.processJoinRequest(Coordinator.java:532) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.lambda$handleJoinRequest$7(Coordinator.java:496) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:63) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.ClusterConnectionManager.connectToNode(ClusterConnectionManager.java:120) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:378) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:362) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.handleJoinRequest(Coordinator.java:483) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.JoinHelper.lambda$new$0(JoinHelper.java:136) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:72) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler$RequestHandler.doRun(InboundHandler.java:263) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:737) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) ~[?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:27:59,790][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=4, optionalJoin=Optional[Join{term=5, lastAcceptedTerm=0, lastAcceptedVersion=0, sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, targetNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}}]} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-0][10.233.67.187:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.CoordinationStateRejectedException: received a newer join from {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} at org.elasticsearch.cluster.coordination.JoinHelper$CandidateJoinAccumulator.handleJoinRequest(JoinHelper.java:459) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.processJoinRequest(Coordinator.java:533) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.lambda$handleJoinRequest$7(Coordinator.java:496) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:63) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:375) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:362) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.handleJoinRequest(Coordinator.java:483) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.JoinHelper.lambda$new$0(JoinHelper.java:136) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:72) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService$8.doRun(TransportService.java:800) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:737) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:28:00,257][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=6, optionalJoin=Optional[Join{term=7, lastAcceptedTerm=0, lastAcceptedVersion=0, sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, targetNode={onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr}}]} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-2][10.233.65.213:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.CoordinationStateRejectedException: incoming term 7 does not match current term 9 at org.elasticsearch.cluster.coordination.CoordinationState.handleJoin(CoordinationState.java:225) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.handleJoin(Coordinator.java:1013) ~[elasticsearch-7.9.3.jar:7.9.3] at java.util.Optional.ifPresent(Optional.java:183) ~[?:?] at org.elasticsearch.cluster.coordination.Coordinator.processJoinRequest(Coordinator.java:532) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.lambda$handleJoinRequest$7(Coordinator.java:496) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:63) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.ClusterConnectionManager.connectToNode(ClusterConnectionManager.java:120) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:378) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:362) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.handleJoinRequest(Coordinator.java:483) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.JoinHelper.lambda$new$0(JoinHelper.java:136) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:72) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler$RequestHandler.doRun(InboundHandler.java:263) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:737) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) ~[?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:28:00,655][INFO ][o.e.c.s.MasterService ] [onap-sdnrdb-master-0] elected-as-master ([3] nodes joined)[{onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr} elect leader, {onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr} elect leader, {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} elect leader, _BECOME_MASTER_TASK_, _FINISH_ELECTION_], term: 10, version: 1, delta: master node changed {previous [], current [{onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}]}, added {{onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr},{onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr}} [2023-05-06T03:28:00,698][WARN ][o.e.c.s.MasterService ] [onap-sdnrdb-master-0] failing [elected-as-master ([3] nodes joined)[{onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr} elect leader, {onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr} elect leader, {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} elect leader, _BECOME_MASTER_TASK_, _FINISH_ELECTION_]]: failed to commit cluster state version [1] org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException: node is no longer master for term 10 while handling publication at org.elasticsearch.cluster.coordination.Coordinator.publish(Coordinator.java:1083) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.publish(MasterService.java:268) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.runTasks(MasterService.java:250) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.access$000(MasterService.java:73) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService$Batcher.run(MasterService.java:151) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.TaskBatcher.runIfNotProcessed(TaskBatcher.java:150) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.TaskBatcher$BatchedTask.run(TaskBatcher.java:188) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:678) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:252) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:215) [elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:28:00,757][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=9, optionalJoin=Optional[Join{term=10, lastAcceptedTerm=0, lastAcceptedVersion=0, sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, targetNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}}]} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-0][10.233.67.187:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException: node is no longer master for term 10 while handling publication at org.elasticsearch.cluster.coordination.Coordinator.publish(Coordinator.java:1083) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.publish(MasterService.java:268) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.runTasks(MasterService.java:250) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.access$000(MasterService.java:73) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService$Batcher.run(MasterService.java:151) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.TaskBatcher.runIfNotProcessed(TaskBatcher.java:150) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.TaskBatcher$BatchedTask.run(TaskBatcher.java:188) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:678) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:252) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:215) [elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:28:01,357][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=8, optionalJoin=Optional[Join{term=9, lastAcceptedTerm=0, lastAcceptedVersion=0, sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, targetNode={onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr}}]} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-1][10.233.71.203:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException: node is no longer master for term 10 while handling publication at org.elasticsearch.cluster.coordination.Coordinator.publish(Coordinator.java:1083) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.publish(MasterService.java:268) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.runTasks(MasterService.java:250) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.access$000(MasterService.java:73) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService$Batcher.run(MasterService.java:151) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.TaskBatcher.runIfNotProcessed(TaskBatcher.java:150) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.TaskBatcher$BatchedTask.run(TaskBatcher.java:188) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:678) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:252) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:215) ~[elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) ~[?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:28:01,969][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=11, optionalJoin=Optional[Join{term=12, lastAcceptedTerm=0, lastAcceptedVersion=0, sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, targetNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}}]} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-0][10.233.67.187:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.CoordinationStateRejectedException: became follower at org.elasticsearch.cluster.coordination.JoinHelper$CandidateJoinAccumulator.lambda$close$3(JoinHelper.java:484) [elasticsearch-7.9.3.jar:7.9.3] at java.util.HashMap$Values.forEach(HashMap.java:976) [?:?] at org.elasticsearch.cluster.coordination.JoinHelper$CandidateJoinAccumulator.close(JoinHelper.java:484) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.becomeFollower(Coordinator.java:621) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.onFollowerCheckRequest(Coordinator.java:257) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.FollowersChecker$2.doRun(FollowersChecker.java:198) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:737) [elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:28:03,486][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=10, optionalJoin=Optional[Join{term=11, lastAcceptedTerm=0, lastAcceptedVersion=0, sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, targetNode={onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr}}]} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-2][10.233.65.213:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException: node is no longer master for term 13 while handling publication at org.elasticsearch.cluster.coordination.Coordinator.publish(Coordinator.java:1083) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.publish(MasterService.java:268) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.runTasks(MasterService.java:250) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.access$000(MasterService.java:73) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService$Batcher.run(MasterService.java:151) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.TaskBatcher.runIfNotProcessed(TaskBatcher.java:150) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.TaskBatcher$BatchedTask.run(TaskBatcher.java:188) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:678) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:252) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:215) ~[elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) ~[?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T03:28:04,359][INFO ][o.e.c.c.CoordinationState] [onap-sdnrdb-master-0] cluster UUID set to [-duUHc6ER92zyeAKiLRfGQ] [2023-05-06T03:28:04,685][INFO ][o.e.c.s.ClusterApplierService] [onap-sdnrdb-master-0] master node changed {previous [], current [{onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr}]}, added {{onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr}}, term: 13, version: 1, reason: ApplyCommitRequest{term=13, version=1, sourceNode={onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr}} [2023-05-06T03:28:04,773][INFO ][o.e.h.AbstractHttpServerTransport] [onap-sdnrdb-master-0] publish_address {10.233.67.187:9200}, bound_addresses {0.0.0.0:9200} [2023-05-06T03:28:04,775][INFO ][o.e.n.Node ] [onap-sdnrdb-master-0] started [2023-05-06T03:28:07,764][INFO ][o.e.c.s.ClusterApplierService] [onap-sdnrdb-master-0] added {{onap-sdnrdb-coordinating-only-6c56bdbcb9-lsqjl}{dvGQhqtuQAOGNKbHanJeIQ}{jlrKn8xHTQWT9RywUBkV7A}{10.233.68.81}{10.233.68.81:9300}{r},{onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr}}, term: 13, version: 2, reason: ApplyCommitRequest{term=13, version=2, sourceNode={onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr}} [2023-05-06T03:28:20,459][INFO ][o.e.c.s.ClusterSettings ] [onap-sdnrdb-master-0] updating [action.auto_create_index] from [true] to [false] [2023-05-06T04:27:54,477][INFO ][o.e.c.c.Coordinator ] [onap-sdnrdb-master-0] master node [{onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr}] failed, restarting discovery org.elasticsearch.transport.NodeDisconnectedException: [onap-sdnrdb-master-1][10.233.71.203:9300][disconnected] disconnected [2023-05-06T04:27:54,483][INFO ][o.e.c.s.ClusterApplierService] [onap-sdnrdb-master-0] master node changed {previous [{onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr}], current []}, term: 13, version: 92, reason: becoming candidate: onLeaderFailure [2023-05-06T04:27:58,737][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=14, optionalJoin=Optional[Join{term=15, lastAcceptedTerm=13, lastAcceptedVersion=92, sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, targetNode={onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr}}]} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-1][10.233.71.203:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.CoordinationStateRejectedException: incoming term 15 does not match current term 16 at org.elasticsearch.cluster.coordination.CoordinationState.handleJoin(CoordinationState.java:225) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.handleJoin(Coordinator.java:1013) ~[elasticsearch-7.9.3.jar:7.9.3] at java.util.Optional.ifPresent(Optional.java:183) ~[?:?] at org.elasticsearch.cluster.coordination.Coordinator.processJoinRequest(Coordinator.java:532) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.lambda$handleJoinRequest$7(Coordinator.java:496) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:63) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.ClusterConnectionManager.connectToNode(ClusterConnectionManager.java:120) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:378) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:362) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.handleJoinRequest(Coordinator.java:483) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.JoinHelper.lambda$new$0(JoinHelper.java:136) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:72) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler$RequestHandler.doRun(InboundHandler.java:263) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:737) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) ~[?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T04:27:59,589][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=15, optionalJoin=Optional.empty} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-2][10.233.65.213:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.CoordinationStateRejectedException: received a newer join from {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} at org.elasticsearch.cluster.coordination.JoinHelper$CandidateJoinAccumulator.handleJoinRequest(JoinHelper.java:459) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.processJoinRequest(Coordinator.java:533) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.access$000(Coordinator.java:101) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator$2.onResponse(Coordinator.java:509) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator$2.onResponse(Coordinator.java:505) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.action.ActionListenerResponseHandler.handleResponse(ActionListenerResponseHandler.java:54) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService$ContextRestoreResponseHandler.handleResponse(TransportService.java:1162) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler$1.doRun(InboundHandler.java:213) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:737) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) ~[?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T04:27:59,595][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=13, optionalJoin=Optional[Join{term=14, lastAcceptedTerm=13, lastAcceptedVersion=92, sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, targetNode={onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr}}]} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-2][10.233.65.213:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.NotMasterException: Node [{onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr}] not master for join request [2023-05-06T04:27:59,863][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=16, optionalJoin=Optional.empty} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-2][10.233.65.213:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.CoordinationStateRejectedException: received a newer join from {onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr} at org.elasticsearch.cluster.coordination.JoinHelper$CandidateJoinAccumulator.handleJoinRequest(JoinHelper.java:459) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.processJoinRequest(Coordinator.java:533) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.lambda$handleJoinRequest$7(Coordinator.java:496) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:63) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.ClusterConnectionManager.connectToNode(ClusterConnectionManager.java:120) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:378) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.TransportService.connectToNode(TransportService.java:362) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.Coordinator.handleJoinRequest(Coordinator.java:483) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.coordination.JoinHelper.lambda$new$0(JoinHelper.java:136) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:72) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.transport.InboundHandler$RequestHandler.doRun(InboundHandler.java:263) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:737) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) ~[elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) ~[?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T04:28:00,919][INFO ][o.e.c.s.ClusterApplierService] [onap-sdnrdb-master-0] master node changed {previous [], current [{onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr}]}, term: 17, version: 95, reason: ApplyCommitRequest{term=17, version=95, sourceNode={onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr}} [2023-05-06T04:28:03,735][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=13, optionalJoin=Optional[Join{term=13, lastAcceptedTerm=0, lastAcceptedVersion=0, sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, targetNode={onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr}}]} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-1][10.233.71.203:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException: node is no longer master for term 17 while handling publication at org.elasticsearch.cluster.coordination.Coordinator.publish(Coordinator.java:1083) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.publish(MasterService.java:268) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.runTasks(MasterService.java:250) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.access$000(MasterService.java:73) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService$Batcher.run(MasterService.java:151) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.TaskBatcher.runIfNotProcessed(TaskBatcher.java:150) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.TaskBatcher$BatchedTask.run(TaskBatcher.java:188) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:678) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:252) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:215) ~[elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) ~[?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T04:28:03,737][INFO ][o.e.c.c.JoinHelper ] [onap-sdnrdb-master-0] failed to join {onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr} with JoinRequest{sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, minimumTerm=15, optionalJoin=Optional[Join{term=16, lastAcceptedTerm=13, lastAcceptedVersion=92, sourceNode={onap-sdnrdb-master-0}{3I3WBw2cR7CpdmvtdJW6eA}{e6qqK-WnT2aWwakK3i8zBA}{10.233.67.187}{10.233.67.187:9300}{dmr}, targetNode={onap-sdnrdb-master-1}{bI9wTPL2RgKmam7cco7rKg}{ktxGy7LRT0yWmBcyFHhS0Q}{10.233.71.203}{10.233.71.203:9300}{dmr}}]} org.elasticsearch.transport.RemoteTransportException: [onap-sdnrdb-master-1][10.233.71.203:9300][internal:cluster/coordination/join] Caused by: org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException: node is no longer master for term 17 while handling publication at org.elasticsearch.cluster.coordination.Coordinator.publish(Coordinator.java:1083) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.publish(MasterService.java:268) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.runTasks(MasterService.java:250) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService.access$000(MasterService.java:73) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.MasterService$Batcher.run(MasterService.java:151) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.TaskBatcher.runIfNotProcessed(TaskBatcher.java:150) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.cluster.service.TaskBatcher$BatchedTask.run(TaskBatcher.java:188) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:678) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:252) ~[elasticsearch-7.9.3.jar:7.9.3] at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:215) ~[elasticsearch-7.9.3.jar:7.9.3] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) ~[?:?] at java.lang.Thread.run(Thread.java:834) [?:?] [2023-05-06T04:28:06,256][INFO ][o.e.c.s.ClusterApplierService] [onap-sdnrdb-master-0] removed {{onap-sdnrdb-coordinating-only-6c56bdbcb9-lsqjl}{dvGQhqtuQAOGNKbHanJeIQ}{jlrKn8xHTQWT9RywUBkV7A}{10.233.68.81}{10.233.68.81:9300}{r}}, term: 17, version: 96, reason: ApplyCommitRequest{term=17, version=96, sourceNode={onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr}} [2023-05-06T04:28:08,591][INFO ][o.e.c.s.ClusterApplierService] [onap-sdnrdb-master-0] added {{onap-sdnrdb-coordinating-only-6c56bdbcb9-lsqjl}{dvGQhqtuQAOGNKbHanJeIQ}{jlrKn8xHTQWT9RywUBkV7A}{10.233.68.81}{10.233.68.81:9300}{r}}, term: 17, version: 98, reason: ApplyCommitRequest{term=17, version=98, sourceNode={onap-sdnrdb-master-2}{bcSeTMzMRRmTcOQNXqZLZw}{vqSLS_UTSC2zeUiCZ4rs4A}{10.233.65.213}{10.233.65.213:9300}{dmr}} [2023-05-06T04:39:10,979][INFO ][o.e.m.j.JvmGcMonitorService] [onap-sdnrdb-master-0] [gc][4367] overhead, spent [297ms] collecting in the last [1s]