By type
+ export KAFKA_BROKER_ID=2 + cp /opt/app/osaaf/local/cadi.properties /etc/kafka/data/cadi.properties + export KAFKA_ADVERTISED_LISTENERS=EXTERNAL_SASL_PLAINTEXT://10.253.0.233:30492,INTERNAL_SASL_PLAINTEXT://:9092 + exec /etc/confluent/docker/run ===> ENV Variables ... A1POLICYMANAGEMENT_EXTERNAL_PORT=tcp://10.233.28.215:8433 A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP=tcp://10.233.28.215:8433 A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_ADDR=10.233.28.215 A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_PORT=8433 A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_PROTO=tcp A1POLICYMANAGEMENT_EXTERNAL_SERVICE_HOST=10.233.28.215 A1POLICYMANAGEMENT_EXTERNAL_SERVICE_PORT=8433 A1POLICYMANAGEMENT_EXTERNAL_SERVICE_PORT_HTTPS_API=8433 A1POLICYMANAGEMENT_PORT=tcp://10.233.41.97:8433 A1POLICYMANAGEMENT_PORT_8081_TCP=tcp://10.233.41.97:8081 A1POLICYMANAGEMENT_PORT_8081_TCP_ADDR=10.233.41.97 A1POLICYMANAGEMENT_PORT_8081_TCP_PORT=8081 A1POLICYMANAGEMENT_PORT_8081_TCP_PROTO=tcp A1POLICYMANAGEMENT_PORT_8433_TCP=tcp://10.233.41.97:8433 A1POLICYMANAGEMENT_PORT_8433_TCP_ADDR=10.233.41.97 A1POLICYMANAGEMENT_PORT_8433_TCP_PORT=8433 A1POLICYMANAGEMENT_PORT_8433_TCP_PROTO=tcp A1POLICYMANAGEMENT_SERVICE_HOST=10.233.41.97 A1POLICYMANAGEMENT_SERVICE_PORT=8433 A1POLICYMANAGEMENT_SERVICE_PORT_HTTPS_API=8433 A1POLICYMANAGEMENT_SERVICE_PORT_HTTP_API=8081 AAF_CASS_PORT=tcp://10.233.28.62:7000 AAF_CASS_PORT_7000_TCP=tcp://10.233.28.62:7000 AAF_CASS_PORT_7000_TCP_ADDR=10.233.28.62 AAF_CASS_PORT_7000_TCP_PORT=7000 AAF_CASS_PORT_7000_TCP_PROTO=tcp AAF_CASS_PORT_7001_TCP=tcp://10.233.28.62:7001 AAF_CASS_PORT_7001_TCP_ADDR=10.233.28.62 AAF_CASS_PORT_7001_TCP_PORT=7001 AAF_CASS_PORT_7001_TCP_PROTO=tcp AAF_CASS_PORT_9042_TCP=tcp://10.233.28.62:9042 AAF_CASS_PORT_9042_TCP_ADDR=10.233.28.62 AAF_CASS_PORT_9042_TCP_PORT=9042 AAF_CASS_PORT_9042_TCP_PROTO=tcp AAF_CASS_PORT_9160_TCP=tcp://10.233.28.62:9160 AAF_CASS_PORT_9160_TCP_ADDR=10.233.28.62 AAF_CASS_PORT_9160_TCP_PORT=9160 AAF_CASS_PORT_9160_TCP_PROTO=tcp AAF_CASS_SERVICE_HOST=10.233.28.62 AAF_CASS_SERVICE_PORT=7000 AAF_CASS_SERVICE_PORT_TCP_CQL=9042 AAF_CASS_SERVICE_PORT_TCP_INTRA=7000 AAF_CASS_SERVICE_PORT_TCP_THRIFT=9160 AAF_CASS_SERVICE_PORT_TLS=7001 AAF_CM_PORT=tcp://10.233.10.68:8150 AAF_CM_PORT_8150_TCP=tcp://10.233.10.68:8150 AAF_CM_PORT_8150_TCP_ADDR=10.233.10.68 AAF_CM_PORT_8150_TCP_PORT=8150 AAF_CM_PORT_8150_TCP_PROTO=tcp AAF_CM_SERVICE_HOST=10.233.10.68 AAF_CM_SERVICE_PORT=8150 AAF_CM_SERVICE_PORT_API=8150 AAF_FS_PORT=tcp://10.233.10.159:8096 AAF_FS_PORT_8096_TCP=tcp://10.233.10.159:8096 AAF_FS_PORT_8096_TCP_ADDR=10.233.10.159 AAF_FS_PORT_8096_TCP_PORT=8096 AAF_FS_PORT_8096_TCP_PROTO=tcp AAF_FS_SERVICE_HOST=10.233.10.159 AAF_FS_SERVICE_PORT=8096 AAF_FS_SERVICE_PORT_API=8096 AAF_GUI_PORT=tcp://10.233.31.17:8200 AAF_GUI_PORT_8200_TCP=tcp://10.233.31.17:8200 AAF_GUI_PORT_8200_TCP_ADDR=10.233.31.17 AAF_GUI_PORT_8200_TCP_PORT=8200 AAF_GUI_PORT_8200_TCP_PROTO=tcp AAF_GUI_SERVICE_HOST=10.233.31.17 AAF_GUI_SERVICE_PORT=8200 AAF_GUI_SERVICE_PORT_GUI=8200 AAF_LOCATE_PORT=tcp://10.233.44.3:8095 AAF_LOCATE_PORT_8095_TCP=tcp://10.233.44.3:8095 AAF_LOCATE_PORT_8095_TCP_ADDR=10.233.44.3 AAF_LOCATE_PORT_8095_TCP_PORT=8095 AAF_LOCATE_PORT_8095_TCP_PROTO=tcp AAF_LOCATE_SERVICE_HOST=10.233.44.3 AAF_LOCATE_SERVICE_PORT=8095 AAF_LOCATE_SERVICE_PORT_API=8095 AAF_OAUTH_PORT=tcp://10.233.48.12:8140 AAF_OAUTH_PORT_8140_TCP=tcp://10.233.48.12:8140 AAF_OAUTH_PORT_8140_TCP_ADDR=10.233.48.12 AAF_OAUTH_PORT_8140_TCP_PORT=8140 AAF_OAUTH_PORT_8140_TCP_PROTO=tcp AAF_OAUTH_SERVICE_HOST=10.233.48.12 AAF_OAUTH_SERVICE_PORT=8140 AAF_OAUTH_SERVICE_PORT_API=8140 AAF_SERVICE_PORT=tcp://10.233.11.214:8100 AAF_SERVICE_PORT_8100_TCP=tcp://10.233.11.214:8100 AAF_SERVICE_PORT_8100_TCP_ADDR=10.233.11.214 AAF_SERVICE_PORT_8100_TCP_PORT=8100 AAF_SERVICE_PORT_8100_TCP_PROTO=tcp AAF_SERVICE_SERVICE_HOST=10.233.11.214 AAF_SERVICE_SERVICE_PORT=8100 AAF_SERVICE_SERVICE_PORT_API=8100 AAF_SMS_DB_PORT=tcp://10.233.49.208:8200 AAF_SMS_DB_PORT_8200_TCP=tcp://10.233.49.208:8200 AAF_SMS_DB_PORT_8200_TCP_ADDR=10.233.49.208 AAF_SMS_DB_PORT_8200_TCP_PORT=8200 AAF_SMS_DB_PORT_8200_TCP_PROTO=tcp AAF_SMS_DB_SERVICE_HOST=10.233.49.208 AAF_SMS_DB_SERVICE_PORT=8200 AAF_SMS_DB_SERVICE_PORT_AAF_SMS_DB=8200 AAF_SMS_PORT=tcp://10.233.48.46:10443 AAF_SMS_PORT_10443_TCP=tcp://10.233.48.46:10443 AAF_SMS_PORT_10443_TCP_ADDR=10.233.48.46 AAF_SMS_PORT_10443_TCP_PORT=10443 AAF_SMS_PORT_10443_TCP_PROTO=tcp AAF_SMS_SERVICE_HOST=10.233.48.46 AAF_SMS_SERVICE_PORT=10443 AAI_BABEL_PORT=tcp://10.233.50.128:9516 AAI_BABEL_PORT_9516_TCP=tcp://10.233.50.128:9516 AAI_BABEL_PORT_9516_TCP_ADDR=10.233.50.128 AAI_BABEL_PORT_9516_TCP_PORT=9516 AAI_BABEL_PORT_9516_TCP_PROTO=tcp AAI_BABEL_SERVICE_HOST=10.233.50.128 AAI_BABEL_SERVICE_PORT=9516 AAI_BABEL_SERVICE_PORT_HTTPS=9516 AAI_GRAPHADMIN_PORT=tcp://10.233.10.117:8449 AAI_GRAPHADMIN_PORT_5005_TCP=tcp://10.233.10.117:5005 AAI_GRAPHADMIN_PORT_5005_TCP_ADDR=10.233.10.117 AAI_GRAPHADMIN_PORT_5005_TCP_PORT=5005 AAI_GRAPHADMIN_PORT_5005_TCP_PROTO=tcp AAI_GRAPHADMIN_PORT_8449_TCP=tcp://10.233.10.117:8449 AAI_GRAPHADMIN_PORT_8449_TCP_ADDR=10.233.10.117 AAI_GRAPHADMIN_PORT_8449_TCP_PORT=8449 AAI_GRAPHADMIN_PORT_8449_TCP_PROTO=tcp AAI_GRAPHADMIN_SERVICE_HOST=10.233.10.117 AAI_GRAPHADMIN_SERVICE_PORT=8449 AAI_GRAPHADMIN_SERVICE_PORT_HTTPS=8449 AAI_GRAPHADMIN_SERVICE_PORT_TCP_5005=5005 AAI_MODELLOADER_PORT=tcp://10.233.24.21:8080 AAI_MODELLOADER_PORT_8080_TCP=tcp://10.233.24.21:8080 AAI_MODELLOADER_PORT_8080_TCP_ADDR=10.233.24.21 AAI_MODELLOADER_PORT_8080_TCP_PORT=8080 AAI_MODELLOADER_PORT_8080_TCP_PROTO=tcp AAI_MODELLOADER_PORT_8443_TCP=tcp://10.233.24.21:8443 AAI_MODELLOADER_PORT_8443_TCP_ADDR=10.233.24.21 AAI_MODELLOADER_PORT_8443_TCP_PORT=8443 AAI_MODELLOADER_PORT_8443_TCP_PROTO=tcp AAI_MODELLOADER_SERVICE_HOST=10.233.24.21 AAI_MODELLOADER_SERVICE_PORT=8080 AAI_MODELLOADER_SERVICE_PORT_HTTP=8080 AAI_MODELLOADER_SERVICE_PORT_HTTPS=8443 AAI_PORT=tcp://10.233.48.89:8443 AAI_PORT_8443_TCP=tcp://10.233.48.89:8443 AAI_PORT_8443_TCP_ADDR=10.233.48.89 AAI_PORT_8443_TCP_PORT=8443 AAI_PORT_8443_TCP_PROTO=tcp AAI_RESOURCES_PORT=tcp://10.233.48.178:8447 AAI_RESOURCES_PORT_5005_TCP=tcp://10.233.48.178:5005 AAI_RESOURCES_PORT_5005_TCP_ADDR=10.233.48.178 AAI_RESOURCES_PORT_5005_TCP_PORT=5005 AAI_RESOURCES_PORT_5005_TCP_PROTO=tcp AAI_RESOURCES_PORT_8447_TCP=tcp://10.233.48.178:8447 AAI_RESOURCES_PORT_8447_TCP_ADDR=10.233.48.178 AAI_RESOURCES_PORT_8447_TCP_PORT=8447 AAI_RESOURCES_PORT_8447_TCP_PROTO=tcp AAI_RESOURCES_SERVICE_HOST=10.233.48.178 AAI_RESOURCES_SERVICE_PORT=8447 AAI_RESOURCES_SERVICE_PORT_HTTPS=8447 AAI_RESOURCES_SERVICE_PORT_TCP_5005=5005 AAI_SCHEMA_SERVICE_PORT=tcp://10.233.3.207:8452 AAI_SCHEMA_SERVICE_PORT_5005_TCP=tcp://10.233.3.207:5005 AAI_SCHEMA_SERVICE_PORT_5005_TCP_ADDR=10.233.3.207 AAI_SCHEMA_SERVICE_PORT_5005_TCP_PORT=5005 AAI_SCHEMA_SERVICE_PORT_5005_TCP_PROTO=tcp AAI_SCHEMA_SERVICE_PORT_8452_TCP=tcp://10.233.3.207:8452 AAI_SCHEMA_SERVICE_PORT_8452_TCP_ADDR=10.233.3.207 AAI_SCHEMA_SERVICE_PORT_8452_TCP_PORT=8452 AAI_SCHEMA_SERVICE_PORT_8452_TCP_PROTO=tcp AAI_SCHEMA_SERVICE_SERVICE_HOST=10.233.3.207 AAI_SCHEMA_SERVICE_SERVICE_PORT=8452 AAI_SCHEMA_SERVICE_SERVICE_PORT_HTTPS=8452 AAI_SCHEMA_SERVICE_SERVICE_PORT_TCP_5005=5005 AAI_SERVICE_HOST=10.233.48.89 AAI_SERVICE_PORT=8443 AAI_SERVICE_PORT_HTTPS=8443 AAI_SPARKY_BE_PORT=tcp://10.233.42.209:8000 AAI_SPARKY_BE_PORT_8000_TCP=tcp://10.233.42.209:8000 AAI_SPARKY_BE_PORT_8000_TCP_ADDR=10.233.42.209 AAI_SPARKY_BE_PORT_8000_TCP_PORT=8000 AAI_SPARKY_BE_PORT_8000_TCP_PROTO=tcp AAI_SPARKY_BE_SERVICE_HOST=10.233.42.209 AAI_SPARKY_BE_SERVICE_PORT=8000 AAI_SPARKY_BE_SERVICE_PORT_HTTPS=8000 AAI_TRAVERSAL_PORT=tcp://10.233.10.146:8446 AAI_TRAVERSAL_PORT_5005_TCP=tcp://10.233.10.146:5005 AAI_TRAVERSAL_PORT_5005_TCP_ADDR=10.233.10.146 AAI_TRAVERSAL_PORT_5005_TCP_PORT=5005 AAI_TRAVERSAL_PORT_5005_TCP_PROTO=tcp AAI_TRAVERSAL_PORT_8446_TCP=tcp://10.233.10.146:8446 AAI_TRAVERSAL_PORT_8446_TCP_ADDR=10.233.10.146 AAI_TRAVERSAL_PORT_8446_TCP_PORT=8446 AAI_TRAVERSAL_PORT_8446_TCP_PROTO=tcp AAI_TRAVERSAL_SERVICE_HOST=10.233.10.146 AAI_TRAVERSAL_SERVICE_PORT=8446 AAI_TRAVERSAL_SERVICE_PORT_HTTPS=8446 AAI_TRAVERSAL_SERVICE_PORT_TCP_5005=5005 ALLOW_UNSIGNED=false AWX_POSTGRESQL_PORT=tcp://10.233.39.157:5432 AWX_POSTGRESQL_PORT_5432_TCP=tcp://10.233.39.157:5432 AWX_POSTGRESQL_PORT_5432_TCP_ADDR=10.233.39.157 AWX_POSTGRESQL_PORT_5432_TCP_PORT=5432 AWX_POSTGRESQL_PORT_5432_TCP_PROTO=tcp AWX_POSTGRESQL_SERVICE_HOST=10.233.39.157 AWX_POSTGRESQL_SERVICE_PORT=5432 AWX_POSTGRESQL_SERVICE_PORT_AWX_POSTGRESQL=5432 AWX_RABBITMQ_PORT=tcp://10.233.35.40:15672 AWX_RABBITMQ_PORT_15672_TCP=tcp://10.233.35.40:15672 AWX_RABBITMQ_PORT_15672_TCP_ADDR=10.233.35.40 AWX_RABBITMQ_PORT_15672_TCP_PORT=15672 AWX_RABBITMQ_PORT_15672_TCP_PROTO=tcp AWX_RABBITMQ_PORT_5672_TCP=tcp://10.233.35.40:5672 AWX_RABBITMQ_PORT_5672_TCP_ADDR=10.233.35.40 AWX_RABBITMQ_PORT_5672_TCP_PORT=5672 AWX_RABBITMQ_PORT_5672_TCP_PROTO=tcp AWX_RABBITMQ_SERVICE_HOST=10.233.35.40 AWX_RABBITMQ_SERVICE_PORT=15672 AWX_RABBITMQ_SERVICE_PORT_AMQP=5672 AWX_RABBITMQ_SERVICE_PORT_HTTP=15672 AWX_RMQ_MGMT_PORT=tcp://10.233.57.136:15672 AWX_RMQ_MGMT_PORT_15672_TCP=tcp://10.233.57.136:15672 AWX_RMQ_MGMT_PORT_15672_TCP_ADDR=10.233.57.136 AWX_RMQ_MGMT_PORT_15672_TCP_PORT=15672 AWX_RMQ_MGMT_PORT_15672_TCP_PROTO=tcp AWX_RMQ_MGMT_SERVICE_HOST=10.233.57.136 AWX_RMQ_MGMT_SERVICE_PORT=15672 AWX_RMQ_MGMT_SERVICE_PORT_RMQMGMT=15672 AWX_WEB_PORT=tcp://10.233.57.46:8052 AWX_WEB_PORT_8052_TCP=tcp://10.233.57.46:8052 AWX_WEB_PORT_8052_TCP_ADDR=10.233.57.46 AWX_WEB_PORT_8052_TCP_PORT=8052 AWX_WEB_PORT_8052_TCP_PROTO=tcp AWX_WEB_SERVICE_HOST=10.233.57.46 AWX_WEB_SERVICE_PORT=8052 AWX_WEB_SERVICE_PORT_WEB=8052 CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT=tcp://10.233.44.126:5701 CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP=tcp://10.233.44.126:5701 CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_ADDR=10.233.44.126 CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_PORT=5701 CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_PROTO=tcp CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_HOST=10.233.44.126 CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_PORT=5701 CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_PORT_BLUEPRINTS_PROCESSOR_CLUSTER=5701 CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT=tcp://10.233.3.224:9111 CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP=tcp://10.233.3.224:9111 CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_ADDR=10.233.3.224 CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_PORT=9111 CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_PROTO=tcp CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_HOST=10.233.3.224 CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_PORT=9111 CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_PORT_BLUEPRINTS_PROCESSOR_GRPC=9111 CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT=tcp://10.233.22.219:8080 CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP=tcp://10.233.22.219:8080 CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_ADDR=10.233.22.219 CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_PORT=8080 CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_PROTO=tcp CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_HOST=10.233.22.219 CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_PORT=8080 CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_PORT_BLUEPRINTS_PROCESSOR_HTTP=8080 CDS_COMMAND_EXECUTOR_PORT=tcp://10.233.51.163:50051 CDS_COMMAND_EXECUTOR_PORT_50051_TCP=tcp://10.233.51.163:50051 CDS_COMMAND_EXECUTOR_PORT_50051_TCP_ADDR=10.233.51.163 CDS_COMMAND_EXECUTOR_PORT_50051_TCP_PORT=50051 CDS_COMMAND_EXECUTOR_PORT_50051_TCP_PROTO=tcp CDS_COMMAND_EXECUTOR_SERVICE_HOST=10.233.51.163 CDS_COMMAND_EXECUTOR_SERVICE_PORT=50051 CDS_COMMAND_EXECUTOR_SERVICE_PORT_COMMAND_EXECUTOR_GRPC=50051 CDS_DB_PORT=tcp://10.233.50.92:3306 CDS_DB_PORT_3306_TCP=tcp://10.233.50.92:3306 CDS_DB_PORT_3306_TCP_ADDR=10.233.50.92 CDS_DB_PORT_3306_TCP_PORT=3306 CDS_DB_PORT_3306_TCP_PROTO=tcp CDS_DB_SERVICE_HOST=10.233.50.92 CDS_DB_SERVICE_PORT=3306 CDS_DB_SERVICE_PORT_MYSQL=3306 CDS_PY_EXECUTOR_PORT=tcp://10.233.43.51:50052 CDS_PY_EXECUTOR_PORT_50052_TCP=tcp://10.233.43.51:50052 CDS_PY_EXECUTOR_PORT_50052_TCP_ADDR=10.233.43.51 CDS_PY_EXECUTOR_PORT_50052_TCP_PORT=50052 CDS_PY_EXECUTOR_PORT_50052_TCP_PROTO=tcp CDS_PY_EXECUTOR_PORT_50053_TCP=tcp://10.233.43.51:50053 CDS_PY_EXECUTOR_PORT_50053_TCP_ADDR=10.233.43.51 CDS_PY_EXECUTOR_PORT_50053_TCP_PORT=50053 CDS_PY_EXECUTOR_PORT_50053_TCP_PROTO=tcp CDS_PY_EXECUTOR_SERVICE_HOST=10.233.43.51 CDS_PY_EXECUTOR_SERVICE_PORT=50052 CDS_PY_EXECUTOR_SERVICE_PORT_EXECUTOR_GRPC=50052 CDS_PY_EXECUTOR_SERVICE_PORT_MANAGER_GRPC=50053 CDS_SDC_LISTENER_PORT=tcp://10.233.31.135:8080 CDS_SDC_LISTENER_PORT_8080_TCP=tcp://10.233.31.135:8080 CDS_SDC_LISTENER_PORT_8080_TCP_ADDR=10.233.31.135 CDS_SDC_LISTENER_PORT_8080_TCP_PORT=8080 CDS_SDC_LISTENER_PORT_8080_TCP_PROTO=tcp CDS_SDC_LISTENER_SERVICE_HOST=10.233.31.135 CDS_SDC_LISTENER_SERVICE_PORT=8080 CDS_SDC_LISTENER_SERVICE_PORT_CDS_SDC_LISTENER_HTTP=8080 CDS_UI_PORT=tcp://10.233.34.223:3000 CDS_UI_PORT_3000_TCP=tcp://10.233.34.223:3000 CDS_UI_PORT_3000_TCP_ADDR=10.233.34.223 CDS_UI_PORT_3000_TCP_PORT=3000 CDS_UI_PORT_3000_TCP_PROTO=tcp CDS_UI_SERVICE_HOST=10.233.34.223 CDS_UI_SERVICE_PORT=3000 CDS_UI_SERVICE_PORT_CDS_UI_3000=3000 CHART_MUSEUM_PORT=tcp://10.233.46.102:80 CHART_MUSEUM_PORT_80_TCP=tcp://10.233.46.102:80 CHART_MUSEUM_PORT_80_TCP_ADDR=10.233.46.102 CHART_MUSEUM_PORT_80_TCP_PORT=80 CHART_MUSEUM_PORT_80_TCP_PROTO=tcp CHART_MUSEUM_SERVICE_HOST=10.233.46.102 CHART_MUSEUM_SERVICE_PORT=80 CHART_MUSEUM_SERVICE_PORT_HTTP=80 CLI_PORT=tcp://10.233.39.167:443 CLI_PORT_443_TCP=tcp://10.233.39.167:443 CLI_PORT_443_TCP_ADDR=10.233.39.167 CLI_PORT_443_TCP_PORT=443 CLI_PORT_443_TCP_PROTO=tcp CLI_PORT_9090_TCP=tcp://10.233.39.167:9090 CLI_PORT_9090_TCP_ADDR=10.233.39.167 CLI_PORT_9090_TCP_PORT=9090 CLI_PORT_9090_TCP_PROTO=tcp CLI_SERVICE_HOST=10.233.39.167 CLI_SERVICE_PORT=443 CLI_SERVICE_PORT_CLI443=443 CLI_SERVICE_PORT_CLI9090=9090 COMPONENT=kafka CONFIG_BINDING_SERVICE_PORT=tcp://10.233.32.24:10000 CONFIG_BINDING_SERVICE_PORT_10000_TCP=tcp://10.233.32.24:10000 CONFIG_BINDING_SERVICE_PORT_10000_TCP_ADDR=10.233.32.24 CONFIG_BINDING_SERVICE_PORT_10000_TCP_PORT=10000 CONFIG_BINDING_SERVICE_PORT_10000_TCP_PROTO=tcp CONFIG_BINDING_SERVICE_PORT_10443_TCP=tcp://10.233.32.24:10443 CONFIG_BINDING_SERVICE_PORT_10443_TCP_ADDR=10.233.32.24 CONFIG_BINDING_SERVICE_PORT_10443_TCP_PORT=10443 CONFIG_BINDING_SERVICE_PORT_10443_TCP_PROTO=tcp CONFIG_BINDING_SERVICE_SERVICE_HOST=10.233.32.24 CONFIG_BINDING_SERVICE_SERVICE_PORT=10000 CONFIG_BINDING_SERVICE_SERVICE_PORT_CONFIG_BINDING_SERVICE_INSECURE=10000 CONFIG_BINDING_SERVICE_SERVICE_PORT_CONFIG_BINDING_SERVICE_SECURE=10443 CONFLUENT_DEB_VERSION=1 CONFLUENT_MAJOR_VERSION=5 CONFLUENT_MINOR_VERSION=3 CONFLUENT_MVN_LABEL= CONFLUENT_PATCH_VERSION=1 CONFLUENT_PLATFORM_LABEL= CONFLUENT_VERSION=5.3.1 CONSUL_SERVER_UI_PORT=tcp://10.233.11.251:8500 CONSUL_SERVER_UI_PORT_8500_TCP=tcp://10.233.11.251:8500 CONSUL_SERVER_UI_PORT_8500_TCP_ADDR=10.233.11.251 CONSUL_SERVER_UI_PORT_8500_TCP_PORT=8500 CONSUL_SERVER_UI_PORT_8500_TCP_PROTO=tcp CONSUL_SERVER_UI_SERVICE_HOST=10.233.11.251 CONSUL_SERVER_UI_SERVICE_PORT=8500 CONSUL_SERVER_UI_SERVICE_PORT_CONSUL_UI=8500 CPS_CORE_PG_PRIMARY_PORT=tcp://10.233.34.216:5432 CPS_CORE_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.34.216:5432 CPS_CORE_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.34.216 CPS_CORE_PG_PRIMARY_PORT_5432_TCP_PORT=5432 CPS_CORE_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp CPS_CORE_PG_PRIMARY_SERVICE_HOST=10.233.34.216 CPS_CORE_PG_PRIMARY_SERVICE_PORT=5432 CPS_CORE_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432 CPS_CORE_PG_REPLICA_PORT=tcp://10.233.62.183:5432 CPS_CORE_PG_REPLICA_PORT_5432_TCP=tcp://10.233.62.183:5432 CPS_CORE_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.62.183 CPS_CORE_PG_REPLICA_PORT_5432_TCP_PORT=5432 CPS_CORE_PG_REPLICA_PORT_5432_TCP_PROTO=tcp CPS_CORE_PG_REPLICA_SERVICE_HOST=10.233.62.183 CPS_CORE_PG_REPLICA_SERVICE_PORT=5432 CPS_CORE_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432 CPS_CORE_PORT=tcp://10.233.39.39:8080 CPS_CORE_PORT_8080_TCP=tcp://10.233.39.39:8080 CPS_CORE_PORT_8080_TCP_ADDR=10.233.39.39 CPS_CORE_PORT_8080_TCP_PORT=8080 CPS_CORE_PORT_8080_TCP_PROTO=tcp CPS_CORE_PORT_8081_TCP=tcp://10.233.39.39:8081 CPS_CORE_PORT_8081_TCP_ADDR=10.233.39.39 CPS_CORE_PORT_8081_TCP_PORT=8081 CPS_CORE_PORT_8081_TCP_PROTO=tcp CPS_CORE_POSTGRES_PORT=tcp://10.233.16.136:5432 CPS_CORE_POSTGRES_PORT_5432_TCP=tcp://10.233.16.136:5432 CPS_CORE_POSTGRES_PORT_5432_TCP_ADDR=10.233.16.136 CPS_CORE_POSTGRES_PORT_5432_TCP_PORT=5432 CPS_CORE_POSTGRES_PORT_5432_TCP_PROTO=tcp CPS_CORE_POSTGRES_SERVICE_HOST=10.233.16.136 CPS_CORE_POSTGRES_SERVICE_PORT=5432 CPS_CORE_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432 CPS_CORE_SERVICE_HOST=10.233.39.39 CPS_CORE_SERVICE_PORT=8080 CPS_CORE_SERVICE_PORT_HTTP=8080 CPS_CORE_SERVICE_PORT_MANAGEMENT=8081 CPS_TEMPORAL_DB_PORT=tcp://10.233.1.30:5432 CPS_TEMPORAL_DB_PORT_5432_TCP=tcp://10.233.1.30:5432 CPS_TEMPORAL_DB_PORT_5432_TCP_ADDR=10.233.1.30 CPS_TEMPORAL_DB_PORT_5432_TCP_PORT=5432 CPS_TEMPORAL_DB_PORT_5432_TCP_PROTO=tcp CPS_TEMPORAL_DB_SERVICE_HOST=10.233.1.30 CPS_TEMPORAL_DB_SERVICE_PORT=5432 CPS_TEMPORAL_DB_SERVICE_PORT_TCP_TIMESCALEDB=5432 CPS_TEMPORAL_PORT=tcp://10.233.23.231:8080 CPS_TEMPORAL_PORT_8080_TCP=tcp://10.233.23.231:8080 CPS_TEMPORAL_PORT_8080_TCP_ADDR=10.233.23.231 CPS_TEMPORAL_PORT_8080_TCP_PORT=8080 CPS_TEMPORAL_PORT_8080_TCP_PROTO=tcp CPS_TEMPORAL_PORT_8081_TCP=tcp://10.233.23.231:8081 CPS_TEMPORAL_PORT_8081_TCP_ADDR=10.233.23.231 CPS_TEMPORAL_PORT_8081_TCP_PORT=8081 CPS_TEMPORAL_PORT_8081_TCP_PROTO=tcp CPS_TEMPORAL_SERVICE_HOST=10.233.23.231 CPS_TEMPORAL_SERVICE_PORT=8080 CPS_TEMPORAL_SERVICE_PORT_HTTP=8080 CPS_TEMPORAL_SERVICE_PORT_MANAGEMENT=8081 CUB_CLASSPATH=/etc/confluent/docker/docker-utils.jar DASHBOARD_PORT=tcp://10.233.18.223:8443 DASHBOARD_PORT_8443_TCP=tcp://10.233.18.223:8443 DASHBOARD_PORT_8443_TCP_ADDR=10.233.18.223 DASHBOARD_PORT_8443_TCP_PORT=8443 DASHBOARD_PORT_8443_TCP_PROTO=tcp DASHBOARD_SERVICE_HOST=10.233.18.223 DASHBOARD_SERVICE_PORT=8443 DASHBOARD_SERVICE_PORT_DASHBOARD=8443 DBC_PG_PRIMARY_PORT=tcp://10.233.2.166:5432 DBC_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.2.166:5432 DBC_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.2.166 DBC_PG_PRIMARY_PORT_5432_TCP_PORT=5432 DBC_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp DBC_PG_PRIMARY_SERVICE_HOST=10.233.2.166 DBC_PG_PRIMARY_SERVICE_PORT=5432 DBC_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432 DBC_PG_REPLICA_PORT=tcp://10.233.35.185:5432 DBC_PG_REPLICA_PORT_5432_TCP=tcp://10.233.35.185:5432 DBC_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.35.185 DBC_PG_REPLICA_PORT_5432_TCP_PORT=5432 DBC_PG_REPLICA_PORT_5432_TCP_PROTO=tcp DBC_PG_REPLICA_SERVICE_HOST=10.233.35.185 DBC_PG_REPLICA_SERVICE_PORT=5432 DBC_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432 DBC_POSTGRES_PORT=tcp://10.233.33.50:5432 DBC_POSTGRES_PORT_5432_TCP=tcp://10.233.33.50:5432 DBC_POSTGRES_PORT_5432_TCP_ADDR=10.233.33.50 DBC_POSTGRES_PORT_5432_TCP_PORT=5432 DBC_POSTGRES_PORT_5432_TCP_PROTO=tcp DBC_POSTGRES_SERVICE_HOST=10.233.33.50 DBC_POSTGRES_SERVICE_PORT=5432 DBC_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432 DCAEMOD_DESIGNTOOL_PORT=tcp://10.233.7.95:8080 DCAEMOD_DESIGNTOOL_PORT_8080_TCP=tcp://10.233.7.95:8080 DCAEMOD_DESIGNTOOL_PORT_8080_TCP_ADDR=10.233.7.95 DCAEMOD_DESIGNTOOL_PORT_8080_TCP_PORT=8080 DCAEMOD_DESIGNTOOL_PORT_8080_TCP_PROTO=tcp DCAEMOD_DESIGNTOOL_SERVICE_HOST=10.233.7.95 DCAEMOD_DESIGNTOOL_SERVICE_PORT=8080 DCAEMOD_DESIGNTOOL_SERVICE_PORT_HTTP=8080 DCAEMOD_DISTRIBUTOR_API_PORT=tcp://10.233.6.220:8080 DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP=tcp://10.233.6.220:8080 DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP_ADDR=10.233.6.220 DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP_PORT=8080 DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP_PROTO=tcp DCAEMOD_DISTRIBUTOR_API_SERVICE_HOST=10.233.6.220 DCAEMOD_DISTRIBUTOR_API_SERVICE_PORT=8080 DCAEMOD_DISTRIBUTOR_API_SERVICE_PORT_HTTP=8080 DCAEMOD_GENPROCESSOR_PORT=tcp://10.233.10.55:8080 DCAEMOD_GENPROCESSOR_PORT_8080_TCP=tcp://10.233.10.55:8080 DCAEMOD_GENPROCESSOR_PORT_8080_TCP_ADDR=10.233.10.55 DCAEMOD_GENPROCESSOR_PORT_8080_TCP_PORT=8080 DCAEMOD_GENPROCESSOR_PORT_8080_TCP_PROTO=tcp DCAEMOD_GENPROCESSOR_SERVICE_HOST=10.233.10.55 DCAEMOD_GENPROCESSOR_SERVICE_PORT=8080 DCAEMOD_GENPROCESSOR_SERVICE_PORT_HTTP=8080 DCAEMOD_HEALTHCHECK_PORT=tcp://10.233.13.230:8080 DCAEMOD_HEALTHCHECK_PORT_8080_TCP=tcp://10.233.13.230:8080 DCAEMOD_HEALTHCHECK_PORT_8080_TCP_ADDR=10.233.13.230 DCAEMOD_HEALTHCHECK_PORT_8080_TCP_PORT=8080 DCAEMOD_HEALTHCHECK_PORT_8080_TCP_PROTO=tcp DCAEMOD_HEALTHCHECK_SERVICE_HOST=10.233.13.230 DCAEMOD_HEALTHCHECK_SERVICE_PORT=8080 DCAEMOD_HEALTHCHECK_SERVICE_PORT_HTTP=8080 DCAEMOD_NIFI_REGISTRY_PORT=tcp://10.233.46.128:18080 DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP=tcp://10.233.46.128:18080 DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP_ADDR=10.233.46.128 DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP_PORT=18080 DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP_PROTO=tcp DCAEMOD_NIFI_REGISTRY_SERVICE_HOST=10.233.46.128 DCAEMOD_NIFI_REGISTRY_SERVICE_PORT=18080 DCAEMOD_NIFI_REGISTRY_SERVICE_PORT_HTTP=18080 DCAEMOD_ONBOARDING_API_PORT=tcp://10.233.16.176:8080 DCAEMOD_ONBOARDING_API_PORT_8080_TCP=tcp://10.233.16.176:8080 DCAEMOD_ONBOARDING_API_PORT_8080_TCP_ADDR=10.233.16.176 DCAEMOD_ONBOARDING_API_PORT_8080_TCP_PORT=8080 DCAEMOD_ONBOARDING_API_PORT_8080_TCP_PROTO=tcp DCAEMOD_ONBOARDING_API_SERVICE_HOST=10.233.16.176 DCAEMOD_ONBOARDING_API_SERVICE_PORT=8080 DCAEMOD_ONBOARDING_API_SERVICE_PORT_HTTP=8080 DCAEMOD_PG_PRIMARY_PORT=tcp://10.233.61.167:5432 DCAEMOD_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.61.167:5432 DCAEMOD_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.61.167 DCAEMOD_PG_PRIMARY_PORT_5432_TCP_PORT=5432 DCAEMOD_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp DCAEMOD_PG_PRIMARY_SERVICE_HOST=10.233.61.167 DCAEMOD_PG_PRIMARY_SERVICE_PORT=5432 DCAEMOD_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432 DCAEMOD_PG_REPLICA_PORT=tcp://10.233.15.244:5432 DCAEMOD_PG_REPLICA_PORT_5432_TCP=tcp://10.233.15.244:5432 DCAEMOD_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.15.244 DCAEMOD_PG_REPLICA_PORT_5432_TCP_PORT=5432 DCAEMOD_PG_REPLICA_PORT_5432_TCP_PROTO=tcp DCAEMOD_PG_REPLICA_SERVICE_HOST=10.233.15.244 DCAEMOD_PG_REPLICA_SERVICE_PORT=5432 DCAEMOD_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432 DCAEMOD_POSTGRES_PORT=tcp://10.233.39.156:5432 DCAEMOD_POSTGRES_PORT_5432_TCP=tcp://10.233.39.156:5432 DCAEMOD_POSTGRES_PORT_5432_TCP_ADDR=10.233.39.156 DCAEMOD_POSTGRES_PORT_5432_TCP_PORT=5432 DCAEMOD_POSTGRES_PORT_5432_TCP_PROTO=tcp DCAEMOD_POSTGRES_SERVICE_HOST=10.233.39.156 DCAEMOD_POSTGRES_SERVICE_PORT=5432 DCAEMOD_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432 DCAEMOD_RUNTIME_API_PORT=tcp://10.233.54.172:9090 DCAEMOD_RUNTIME_API_PORT_9090_TCP=tcp://10.233.54.172:9090 DCAEMOD_RUNTIME_API_PORT_9090_TCP_ADDR=10.233.54.172 DCAEMOD_RUNTIME_API_PORT_9090_TCP_PORT=9090 DCAEMOD_RUNTIME_API_PORT_9090_TCP_PROTO=tcp DCAEMOD_RUNTIME_API_SERVICE_HOST=10.233.54.172 DCAEMOD_RUNTIME_API_SERVICE_PORT=9090 DCAEMOD_RUNTIME_API_SERVICE_PORT_HTTP=9090 DCAE_CLOUDIFY_MANAGER_PORT=tcp://10.233.28.234:443 DCAE_CLOUDIFY_MANAGER_PORT_443_TCP=tcp://10.233.28.234:443 DCAE_CLOUDIFY_MANAGER_PORT_443_TCP_ADDR=10.233.28.234 DCAE_CLOUDIFY_MANAGER_PORT_443_TCP_PORT=443 DCAE_CLOUDIFY_MANAGER_PORT_443_TCP_PROTO=tcp DCAE_CLOUDIFY_MANAGER_SERVICE_HOST=10.233.28.234 DCAE_CLOUDIFY_MANAGER_SERVICE_PORT=443 DCAE_CLOUDIFY_MANAGER_SERVICE_PORT_DCAE_CLOUDIFY_MANAGER=443 DCAE_DASHBOARD_PG_PRIMARY_PORT=tcp://10.233.28.166:5432 DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.28.166:5432 DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.28.166 DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP_PORT=5432 DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp DCAE_DASHBOARD_PG_PRIMARY_SERVICE_HOST=10.233.28.166 DCAE_DASHBOARD_PG_PRIMARY_SERVICE_PORT=5432 DCAE_DASHBOARD_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_DASHBOARD_PG_REPLICA_PORT=tcp://10.233.26.30:5432 DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP=tcp://10.233.26.30:5432 DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.26.30 DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP_PORT=5432 DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP_PROTO=tcp DCAE_DASHBOARD_PG_REPLICA_SERVICE_HOST=10.233.26.30 DCAE_DASHBOARD_PG_REPLICA_SERVICE_PORT=5432 DCAE_DASHBOARD_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_DASHBOARD_POSTGRES_PORT=tcp://10.233.40.143:5432 DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP=tcp://10.233.40.143:5432 DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP_ADDR=10.233.40.143 DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP_PORT=5432 DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP_PROTO=tcp DCAE_DASHBOARD_POSTGRES_SERVICE_HOST=10.233.40.143 DCAE_DASHBOARD_POSTGRES_SERVICE_PORT=5432 DCAE_DASHBOARD_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_HEALTHCHECK_PORT=tcp://10.233.24.221:80 DCAE_HEALTHCHECK_PORT_80_TCP=tcp://10.233.24.221:80 DCAE_HEALTHCHECK_PORT_80_TCP_ADDR=10.233.24.221 DCAE_HEALTHCHECK_PORT_80_TCP_PORT=80 DCAE_HEALTHCHECK_PORT_80_TCP_PROTO=tcp DCAE_HEALTHCHECK_SERVICE_HOST=10.233.24.221 DCAE_HEALTHCHECK_SERVICE_PORT=80 DCAE_HEALTHCHECK_SERVICE_PORT_DCAE_HEALTHCHECK=80 DCAE_HV_VES_COLLECTOR_PORT=tcp://10.233.34.224:6061 DCAE_HV_VES_COLLECTOR_PORT_6061_TCP=tcp://10.233.34.224:6061 DCAE_HV_VES_COLLECTOR_PORT_6061_TCP_ADDR=10.233.34.224 DCAE_HV_VES_COLLECTOR_PORT_6061_TCP_PORT=6061 DCAE_HV_VES_COLLECTOR_PORT_6061_TCP_PROTO=tcp DCAE_HV_VES_COLLECTOR_SERVICE_HOST=10.233.34.224 DCAE_HV_VES_COLLECTOR_SERVICE_PORT=6061 DCAE_HV_VES_COLLECTOR_SERVICE_PORT_HTTPS_HTTP=6061 DCAE_INV_PG_PRIMARY_PORT=tcp://10.233.43.53:5432 DCAE_INV_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.43.53:5432 DCAE_INV_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.43.53 DCAE_INV_PG_PRIMARY_PORT_5432_TCP_PORT=5432 DCAE_INV_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp DCAE_INV_PG_PRIMARY_SERVICE_HOST=10.233.43.53 DCAE_INV_PG_PRIMARY_SERVICE_PORT=5432 DCAE_INV_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_INV_PG_REPLICA_PORT=tcp://10.233.35.188:5432 DCAE_INV_PG_REPLICA_PORT_5432_TCP=tcp://10.233.35.188:5432 DCAE_INV_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.35.188 DCAE_INV_PG_REPLICA_PORT_5432_TCP_PORT=5432 DCAE_INV_PG_REPLICA_PORT_5432_TCP_PROTO=tcp DCAE_INV_PG_REPLICA_SERVICE_HOST=10.233.35.188 DCAE_INV_PG_REPLICA_SERVICE_PORT=5432 DCAE_INV_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_INV_POSTGRES_PORT=tcp://10.233.8.99:5432 DCAE_INV_POSTGRES_PORT_5432_TCP=tcp://10.233.8.99:5432 DCAE_INV_POSTGRES_PORT_5432_TCP_ADDR=10.233.8.99 DCAE_INV_POSTGRES_PORT_5432_TCP_PORT=5432 DCAE_INV_POSTGRES_PORT_5432_TCP_PROTO=tcp DCAE_INV_POSTGRES_SERVICE_HOST=10.233.8.99 DCAE_INV_POSTGRES_SERVICE_PORT=5432 DCAE_INV_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_MONGOHOST_READ_PORT=tcp://10.233.24.78:27017 DCAE_MONGOHOST_READ_PORT_27017_TCP=tcp://10.233.24.78:27017 DCAE_MONGOHOST_READ_PORT_27017_TCP_ADDR=10.233.24.78 DCAE_MONGOHOST_READ_PORT_27017_TCP_PORT=27017 DCAE_MONGOHOST_READ_PORT_27017_TCP_PROTO=tcp DCAE_MONGOHOST_READ_SERVICE_HOST=10.233.24.78 DCAE_MONGOHOST_READ_SERVICE_PORT=27017 DCAE_MONGOHOST_READ_SERVICE_PORT_MONGO=27017 DCAE_MS_HEALTHCHECK_PORT=tcp://10.233.26.176:8080 DCAE_MS_HEALTHCHECK_PORT_8080_TCP=tcp://10.233.26.176:8080 DCAE_MS_HEALTHCHECK_PORT_8080_TCP_ADDR=10.233.26.176 DCAE_MS_HEALTHCHECK_PORT_8080_TCP_PORT=8080 DCAE_MS_HEALTHCHECK_PORT_8080_TCP_PROTO=tcp DCAE_MS_HEALTHCHECK_SERVICE_HOST=10.233.26.176 DCAE_MS_HEALTHCHECK_SERVICE_PORT=8080 DCAE_MS_HEALTHCHECK_SERVICE_PORT_HTTP=8080 DCAE_PG_PRIMARY_PORT=tcp://10.233.55.1:5432 DCAE_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.55.1:5432 DCAE_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.55.1 DCAE_PG_PRIMARY_PORT_5432_TCP_PORT=5432 DCAE_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp DCAE_PG_PRIMARY_SERVICE_HOST=10.233.55.1 DCAE_PG_PRIMARY_SERVICE_PORT=5432 DCAE_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_PG_REPLICA_PORT=tcp://10.233.36.252:5432 DCAE_PG_REPLICA_PORT_5432_TCP=tcp://10.233.36.252:5432 DCAE_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.36.252 DCAE_PG_REPLICA_PORT_5432_TCP_PORT=5432 DCAE_PG_REPLICA_PORT_5432_TCP_PROTO=tcp DCAE_PG_REPLICA_SERVICE_HOST=10.233.36.252 DCAE_PG_REPLICA_SERVICE_PORT=5432 DCAE_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_POSTGRES_PORT=tcp://10.233.38.254:5432 DCAE_POSTGRES_PORT_5432_TCP=tcp://10.233.38.254:5432 DCAE_POSTGRES_PORT_5432_TCP_ADDR=10.233.38.254 DCAE_POSTGRES_PORT_5432_TCP_PORT=5432 DCAE_POSTGRES_PORT_5432_TCP_PROTO=tcp DCAE_POSTGRES_SERVICE_HOST=10.233.38.254 DCAE_POSTGRES_SERVICE_PORT=5432 DCAE_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_PRH_PORT=tcp://10.233.37.163:8100 DCAE_PRH_PORT_8100_TCP=tcp://10.233.37.163:8100 DCAE_PRH_PORT_8100_TCP_ADDR=10.233.37.163 DCAE_PRH_PORT_8100_TCP_PORT=8100 DCAE_PRH_PORT_8100_TCP_PROTO=tcp DCAE_PRH_SERVICE_HOST=10.233.37.163 DCAE_PRH_SERVICE_PORT=8100 DCAE_PRH_SERVICE_PORT_HTTP=8100 DCAE_TCAGEN2_PORT=tcp://10.233.49.181:9091 DCAE_TCAGEN2_PORT_9091_TCP=tcp://10.233.49.181:9091 DCAE_TCAGEN2_PORT_9091_TCP_ADDR=10.233.49.181 DCAE_TCAGEN2_PORT_9091_TCP_PORT=9091 DCAE_TCAGEN2_PORT_9091_TCP_PROTO=tcp DCAE_TCAGEN2_SERVICE_HOST=10.233.49.181 DCAE_TCAGEN2_SERVICE_PORT=9091 DCAE_TCAGEN2_SERVICE_PORT_HTTP=9091 DCAE_VES_COLLECTOR_PORT=tcp://10.233.43.254:8443 DCAE_VES_COLLECTOR_PORT_8443_TCP=tcp://10.233.43.254:8443 DCAE_VES_COLLECTOR_PORT_8443_TCP_ADDR=10.233.43.254 DCAE_VES_COLLECTOR_PORT_8443_TCP_PORT=8443 DCAE_VES_COLLECTOR_PORT_8443_TCP_PROTO=tcp DCAE_VES_COLLECTOR_SERVICE_HOST=10.233.43.254 DCAE_VES_COLLECTOR_SERVICE_PORT=8443 DCAE_VES_COLLECTOR_SERVICE_PORT_HTTPS_HTTP=8443 DEPLOYMENT_HANDLER_PORT=tcp://10.233.36.94:8443 DEPLOYMENT_HANDLER_PORT_8443_TCP=tcp://10.233.36.94:8443 DEPLOYMENT_HANDLER_PORT_8443_TCP_ADDR=10.233.36.94 DEPLOYMENT_HANDLER_PORT_8443_TCP_PORT=8443 DEPLOYMENT_HANDLER_PORT_8443_TCP_PROTO=tcp DEPLOYMENT_HANDLER_SERVICE_HOST=10.233.36.94 DEPLOYMENT_HANDLER_SERVICE_PORT=8443 DEPLOYMENT_HANDLER_SERVICE_PORT_DEPLOYMENT_HANDLER=8443 DMAAP_BC_PORT=tcp://10.233.32.34:8443 DMAAP_BC_PORT_8443_TCP=tcp://10.233.32.34:8443 DMAAP_BC_PORT_8443_TCP_ADDR=10.233.32.34 DMAAP_BC_PORT_8443_TCP_PORT=8443 DMAAP_BC_PORT_8443_TCP_PROTO=tcp DMAAP_BC_SERVICE_HOST=10.233.32.34 DMAAP_BC_SERVICE_PORT=8443 DMAAP_BC_SERVICE_PORT_HTTPS_API=8443 DMAAP_DR_NODE_EXTERNAL_PORT=tcp://10.233.17.140:8443 DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP=tcp://10.233.17.140:8443 DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_ADDR=10.233.17.140 DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_PORT=8443 DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_PROTO=tcp DMAAP_DR_NODE_EXTERNAL_SERVICE_HOST=10.233.17.140 DMAAP_DR_NODE_EXTERNAL_SERVICE_PORT=8443 DMAAP_DR_NODE_EXTERNAL_SERVICE_PORT_HTTPS_API=8443 DMAAP_DR_NODE_PORT=tcp://10.233.18.226:8443 DMAAP_DR_NODE_PORT_8080_TCP=tcp://10.233.18.226:8080 DMAAP_DR_NODE_PORT_8080_TCP_ADDR=10.233.18.226 DMAAP_DR_NODE_PORT_8080_TCP_PORT=8080 DMAAP_DR_NODE_PORT_8080_TCP_PROTO=tcp DMAAP_DR_NODE_PORT_8443_TCP=tcp://10.233.18.226:8443 DMAAP_DR_NODE_PORT_8443_TCP_ADDR=10.233.18.226 DMAAP_DR_NODE_PORT_8443_TCP_PORT=8443 DMAAP_DR_NODE_PORT_8443_TCP_PROTO=tcp DMAAP_DR_NODE_SERVICE_HOST=10.233.18.226 DMAAP_DR_NODE_SERVICE_PORT=8443 DMAAP_DR_NODE_SERVICE_PORT_HTTPS_API=8443 DMAAP_DR_NODE_SERVICE_PORT_HTTP_API=8080 DMAAP_DR_PROV_PORT=tcp://10.233.31.213:443 DMAAP_DR_PROV_PORT_443_TCP=tcp://10.233.31.213:443 DMAAP_DR_PROV_PORT_443_TCP_ADDR=10.233.31.213 DMAAP_DR_PROV_PORT_443_TCP_PORT=443 DMAAP_DR_PROV_PORT_443_TCP_PROTO=tcp DMAAP_DR_PROV_SERVICE_HOST=10.233.31.213 DMAAP_DR_PROV_SERVICE_PORT=443 DMAAP_DR_PROV_SERVICE_PORT_DR_PROV_PORT2=443 EJBCA_PORT=tcp://10.233.13.59:8443 EJBCA_PORT_8080_TCP=tcp://10.233.13.59:8080 EJBCA_PORT_8080_TCP_ADDR=10.233.13.59 EJBCA_PORT_8080_TCP_PORT=8080 EJBCA_PORT_8080_TCP_PROTO=tcp EJBCA_PORT_8443_TCP=tcp://10.233.13.59:8443 EJBCA_PORT_8443_TCP_ADDR=10.233.13.59 EJBCA_PORT_8443_TCP_PORT=8443 EJBCA_PORT_8443_TCP_PROTO=tcp EJBCA_SERVICE_HOST=10.233.13.59 EJBCA_SERVICE_PORT=8443 EJBCA_SERVICE_PORT_HTTPS_API=8443 EJBCA_SERVICE_PORT_HTTP_API=8080 HOLMES_ENGINE_MGMT_PORT=tcp://10.233.45.160:9102 HOLMES_ENGINE_MGMT_PORT_9102_TCP=tcp://10.233.45.160:9102 HOLMES_ENGINE_MGMT_PORT_9102_TCP_ADDR=10.233.45.160 HOLMES_ENGINE_MGMT_PORT_9102_TCP_PORT=9102 HOLMES_ENGINE_MGMT_PORT_9102_TCP_PROTO=tcp HOLMES_ENGINE_MGMT_SERVICE_HOST=10.233.45.160 HOLMES_ENGINE_MGMT_SERVICE_PORT=9102 HOLMES_ENGINE_MGMT_SERVICE_PORT_HTTPS_REST=9102 HOLMES_POSTGRES_PORT=tcp://10.233.40.224:5432 HOLMES_POSTGRES_PORT_5432_TCP=tcp://10.233.40.224:5432 HOLMES_POSTGRES_PORT_5432_TCP_ADDR=10.233.40.224 HOLMES_POSTGRES_PORT_5432_TCP_PORT=5432 HOLMES_POSTGRES_PORT_5432_TCP_PROTO=tcp HOLMES_POSTGRES_PRIMARY_PORT=tcp://10.233.15.228:5432 HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP=tcp://10.233.15.228:5432 HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_ADDR=10.233.15.228 HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_PORT=5432 HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_PROTO=tcp HOLMES_POSTGRES_PRIMARY_SERVICE_HOST=10.233.15.228 HOLMES_POSTGRES_PRIMARY_SERVICE_PORT=5432 HOLMES_POSTGRES_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432 HOLMES_POSTGRES_REPLICA_PORT=tcp://10.233.56.44:5432 HOLMES_POSTGRES_REPLICA_PORT_5432_TCP=tcp://10.233.56.44:5432 HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_ADDR=10.233.56.44 HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_PORT=5432 HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_PROTO=tcp HOLMES_POSTGRES_REPLICA_SERVICE_HOST=10.233.56.44 HOLMES_POSTGRES_REPLICA_SERVICE_PORT=5432 HOLMES_POSTGRES_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432 HOLMES_POSTGRES_SERVICE_HOST=10.233.40.224 HOLMES_POSTGRES_SERVICE_PORT=5432 HOLMES_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432 HOLMES_RULE_MGMT_PORT=tcp://10.233.11.115:9101 HOLMES_RULE_MGMT_PORT_9101_TCP=tcp://10.233.11.115:9101 HOLMES_RULE_MGMT_PORT_9101_TCP_ADDR=10.233.11.115 HOLMES_RULE_MGMT_PORT_9101_TCP_PORT=9101 HOLMES_RULE_MGMT_PORT_9101_TCP_PROTO=tcp HOLMES_RULE_MGMT_PORT_9104_TCP=tcp://10.233.11.115:9104 HOLMES_RULE_MGMT_PORT_9104_TCP_ADDR=10.233.11.115 HOLMES_RULE_MGMT_PORT_9104_TCP_PORT=9104 HOLMES_RULE_MGMT_PORT_9104_TCP_PROTO=tcp HOLMES_RULE_MGMT_SERVICE_HOST=10.233.11.115 HOLMES_RULE_MGMT_SERVICE_PORT=9101 HOLMES_RULE_MGMT_SERVICE_PORT_HTTPS_REST=9101 HOLMES_RULE_MGMT_SERVICE_PORT_HTTPS_UI=9104 HOME=/home/mrkafka HOSTNAME=onap-message-router-kafka-2 HOST_IP=10.253.0.233 INVENTORY_PORT=tcp://10.233.54.132:8080 INVENTORY_PORT_8080_TCP=tcp://10.233.54.132:8080 INVENTORY_PORT_8080_TCP_ADDR=10.233.54.132 INVENTORY_PORT_8080_TCP_PORT=8080 INVENTORY_PORT_8080_TCP_PROTO=tcp INVENTORY_SERVICE_HOST=10.233.54.132 INVENTORY_SERVICE_PORT=8080 INVENTORY_SERVICE_PORT_INVENTORY=8080 KAFKA_ADVERTISED_LISTENERS=EXTERNAL_SASL_PLAINTEXT://10.253.0.233:30492,INTERNAL_SASL_PLAINTEXT://:9092 KAFKA_AUTHORIZER_CLASS_NAME=org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer KAFKA_BROKER_ID=2 KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE=false KAFKA_DEFAULT_REPLICATION_FACTOR=3 KAFKA_INTER_BROKER_LISTENER_NAME=INTERNAL_SASL_PLAINTEXT KAFKA_JMX_PORT=5555 KAFKA_LISTENERS=EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092 KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT KAFKA_LOG_DIRS=/var/lib/kafka/data KAFKA_LOG_RETENTION_HOURS=168 KAFKA_NUM_PARTITIONS=3 KAFKA_NUM_RECOVERY_THREADS_PER_DATA_DIR=5 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=3 KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/secrets/jaas/kafka_server_jaas.conf KAFKA_SASL_ENABLED_MECHANISMS=PLAIN KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL=PLAIN KAFKA_TRANSACTION_STATE_LOG_MIN_ISR=1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR=1 KAFKA_USER=mrkafka KAFKA_VERSION=5.3.1 KAFKA_ZOOKEEPER_CONNECT=onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS=6000 KAFKA_ZOOKEEPER_SET_ACL=true KUBERNETES_PORT=tcp://10.233.0.1:443 KUBERNETES_PORT_443_TCP=tcp://10.233.0.1:443 KUBERNETES_PORT_443_TCP_ADDR=10.233.0.1 KUBERNETES_PORT_443_TCP_PORT=443 KUBERNETES_PORT_443_TCP_PROTO=tcp KUBERNETES_SERVICE_HOST=10.233.0.1 KUBERNETES_SERVICE_PORT=443 KUBERNETES_SERVICE_PORT_HTTPS=443 LANG=C.UTF-8 MARIADB_GALERA_PORT=tcp://10.233.33.123:3306 MARIADB_GALERA_PORT_3306_TCP=tcp://10.233.33.123:3306 MARIADB_GALERA_PORT_3306_TCP_ADDR=10.233.33.123 MARIADB_GALERA_PORT_3306_TCP_PORT=3306 MARIADB_GALERA_PORT_3306_TCP_PROTO=tcp MARIADB_GALERA_SERVICE_HOST=10.233.33.123 MARIADB_GALERA_SERVICE_PORT=3306 MARIADB_GALERA_SERVICE_PORT_MYSQL=3306 MESSAGE_ROUTER_EXTERNAL_PORT=tcp://10.233.9.94:3905 MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP=tcp://10.233.9.94:3905 MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_ADDR=10.233.9.94 MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_PORT=3905 MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_PROTO=tcp MESSAGE_ROUTER_EXTERNAL_SERVICE_HOST=10.233.9.94 MESSAGE_ROUTER_EXTERNAL_SERVICE_PORT=3905 MESSAGE_ROUTER_EXTERNAL_SERVICE_PORT_HTTPS_API=3905 MESSAGE_ROUTER_KAFKA_0_PORT=tcp://10.233.0.62:9091 MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP=tcp://10.233.0.62:9091 MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_ADDR=10.233.0.62 MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_PORT=9091 MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_PROTO=tcp MESSAGE_ROUTER_KAFKA_0_SERVICE_HOST=10.233.0.62 MESSAGE_ROUTER_KAFKA_0_SERVICE_PORT=9091 MESSAGE_ROUTER_KAFKA_0_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_0=9091 MESSAGE_ROUTER_KAFKA_1_PORT=tcp://10.233.43.237:9091 MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP=tcp://10.233.43.237:9091 MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_ADDR=10.233.43.237 MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_PORT=9091 MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_PROTO=tcp MESSAGE_ROUTER_KAFKA_1_SERVICE_HOST=10.233.43.237 MESSAGE_ROUTER_KAFKA_1_SERVICE_PORT=9091 MESSAGE_ROUTER_KAFKA_1_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_1=9091 MESSAGE_ROUTER_KAFKA_2_PORT=tcp://10.233.57.193:9091 MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP=tcp://10.233.57.193:9091 MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_ADDR=10.233.57.193 MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_PORT=9091 MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_PROTO=tcp MESSAGE_ROUTER_KAFKA_2_SERVICE_HOST=10.233.57.193 MESSAGE_ROUTER_KAFKA_2_SERVICE_PORT=9091 MESSAGE_ROUTER_KAFKA_2_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_2=9091 MESSAGE_ROUTER_PORT=tcp://10.233.52.96:3905 MESSAGE_ROUTER_PORT_3904_TCP=tcp://10.233.52.96:3904 MESSAGE_ROUTER_PORT_3904_TCP_ADDR=10.233.52.96 MESSAGE_ROUTER_PORT_3904_TCP_PORT=3904 MESSAGE_ROUTER_PORT_3904_TCP_PROTO=tcp MESSAGE_ROUTER_PORT_3905_TCP=tcp://10.233.52.96:3905 MESSAGE_ROUTER_PORT_3905_TCP_ADDR=10.233.52.96 MESSAGE_ROUTER_PORT_3905_TCP_PORT=3905 MESSAGE_ROUTER_PORT_3905_TCP_PROTO=tcp MESSAGE_ROUTER_SERVICE_HOST=10.233.52.96 MESSAGE_ROUTER_SERVICE_PORT=3905 MESSAGE_ROUTER_SERVICE_PORT_HTTPS_API=3905 MESSAGE_ROUTER_SERVICE_PORT_HTTP_API=3904 MODELING_ETSICATALOG_PORT=tcp://10.233.12.203:8806 MODELING_ETSICATALOG_PORT_8806_TCP=tcp://10.233.12.203:8806 MODELING_ETSICATALOG_PORT_8806_TCP_ADDR=10.233.12.203 MODELING_ETSICATALOG_PORT_8806_TCP_PORT=8806 MODELING_ETSICATALOG_PORT_8806_TCP_PROTO=tcp MODELING_ETSICATALOG_SERVICE_HOST=10.233.12.203 MODELING_ETSICATALOG_SERVICE_PORT=8806 MODELING_ETSICATALOG_SERVICE_PORT_MODELING_ETSICATALOG=8806 MSB_CONSUL_PORT=tcp://10.233.8.66:8500 MSB_CONSUL_PORT_8500_TCP=tcp://10.233.8.66:8500 MSB_CONSUL_PORT_8500_TCP_ADDR=10.233.8.66 MSB_CONSUL_PORT_8500_TCP_PORT=8500 MSB_CONSUL_PORT_8500_TCP_PROTO=tcp MSB_CONSUL_SERVICE_HOST=10.233.8.66 MSB_CONSUL_SERVICE_PORT=8500 MSB_CONSUL_SERVICE_PORT_HTTP_MSB_CONSUL=8500 MSB_DISCOVERY_PORT=tcp://10.233.14.98:10081 MSB_DISCOVERY_PORT_10081_TCP=tcp://10.233.14.98:10081 MSB_DISCOVERY_PORT_10081_TCP_ADDR=10.233.14.98 MSB_DISCOVERY_PORT_10081_TCP_PORT=10081 MSB_DISCOVERY_PORT_10081_TCP_PROTO=tcp MSB_DISCOVERY_SERVICE_HOST=10.233.14.98 MSB_DISCOVERY_SERVICE_PORT=10081 MSB_DISCOVERY_SERVICE_PORT_HTTP_MSB_DISCOVERY=10081 MSB_EAG_PORT=tcp://10.233.24.9:443 MSB_EAG_PORT_443_TCP=tcp://10.233.24.9:443 MSB_EAG_PORT_443_TCP_ADDR=10.233.24.9 MSB_EAG_PORT_443_TCP_PORT=443 MSB_EAG_PORT_443_TCP_PROTO=tcp MSB_EAG_SERVICE_HOST=10.233.24.9 MSB_EAG_SERVICE_PORT=443 MSB_EAG_SERVICE_PORT_HTTPS_MSB_EAG=443 MSB_IAG_PORT=tcp://10.233.52.161:443 MSB_IAG_PORT_443_TCP=tcp://10.233.52.161:443 MSB_IAG_PORT_443_TCP_ADDR=10.233.52.161 MSB_IAG_PORT_443_TCP_PORT=443 MSB_IAG_PORT_443_TCP_PROTO=tcp MSB_IAG_SERVICE_HOST=10.233.52.161 MSB_IAG_SERVICE_PORT=443 MSB_IAG_SERVICE_PORT_HTTPS_MSB_IAG=443 MULTICLOUD_FCAPS_PORT=tcp://10.233.39.18:9011 MULTICLOUD_FCAPS_PORT_9011_TCP=tcp://10.233.39.18:9011 MULTICLOUD_FCAPS_PORT_9011_TCP_ADDR=10.233.39.18 MULTICLOUD_FCAPS_PORT_9011_TCP_PORT=9011 MULTICLOUD_FCAPS_PORT_9011_TCP_PROTO=tcp MULTICLOUD_FCAPS_SERVICE_HOST=10.233.39.18 MULTICLOUD_FCAPS_SERVICE_PORT=9011 MULTICLOUD_FCAPS_SERVICE_PORT_MULTICLOUD_FCAPS=9011 MULTICLOUD_FRAMEWORK_PORT=tcp://10.233.5.16:9001 MULTICLOUD_FRAMEWORK_PORT_9001_TCP=tcp://10.233.5.16:9001 MULTICLOUD_FRAMEWORK_PORT_9001_TCP_ADDR=10.233.5.16 MULTICLOUD_FRAMEWORK_PORT_9001_TCP_PORT=9001 MULTICLOUD_FRAMEWORK_PORT_9001_TCP_PROTO=tcp MULTICLOUD_FRAMEWORK_SERVICE_HOST=10.233.5.16 MULTICLOUD_FRAMEWORK_SERVICE_PORT=9001 MULTICLOUD_FRAMEWORK_SERVICE_PORT_MULTICLOUD_FRAMEWORK=9001 MULTICLOUD_K8S_MONGO_READ_PORT=tcp://10.233.6.233:27017 MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP=tcp://10.233.6.233:27017 MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_ADDR=10.233.6.233 MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_PORT=27017 MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_PROTO=tcp MULTICLOUD_K8S_MONGO_READ_SERVICE_HOST=10.233.6.233 MULTICLOUD_K8S_MONGO_READ_SERVICE_PORT=27017 MULTICLOUD_K8S_MONGO_READ_SERVICE_PORT_MONGO=27017 MULTICLOUD_K8S_PORT=tcp://10.233.17.4:9015 MULTICLOUD_K8S_PORT_9015_TCP=tcp://10.233.17.4:9015 MULTICLOUD_K8S_PORT_9015_TCP_ADDR=10.233.17.4 MULTICLOUD_K8S_PORT_9015_TCP_PORT=9015 MULTICLOUD_K8S_PORT_9015_TCP_PROTO=tcp MULTICLOUD_K8S_SERVICE_HOST=10.233.17.4 MULTICLOUD_K8S_SERVICE_PORT=9015 MULTICLOUD_PIKE_PORT=tcp://10.233.34.100:9007 MULTICLOUD_PIKE_PORT_9007_TCP=tcp://10.233.34.100:9007 MULTICLOUD_PIKE_PORT_9007_TCP_ADDR=10.233.34.100 MULTICLOUD_PIKE_PORT_9007_TCP_PORT=9007 MULTICLOUD_PIKE_PORT_9007_TCP_PROTO=tcp MULTICLOUD_PIKE_SERVICE_HOST=10.233.34.100 MULTICLOUD_PIKE_SERVICE_PORT=9007 MULTICLOUD_PIKE_SERVICE_PORT_MULTICLOUD_PIKE=9007 NBI_MONGOHOST_READ_PORT=tcp://10.233.12.13:27017 NBI_MONGOHOST_READ_PORT_27017_TCP=tcp://10.233.12.13:27017 NBI_MONGOHOST_READ_PORT_27017_TCP_ADDR=10.233.12.13 NBI_MONGOHOST_READ_PORT_27017_TCP_PORT=27017 NBI_MONGOHOST_READ_PORT_27017_TCP_PROTO=tcp NBI_MONGOHOST_READ_SERVICE_HOST=10.233.12.13 NBI_MONGOHOST_READ_SERVICE_PORT=27017 NBI_MONGOHOST_READ_SERVICE_PORT_MONGO=27017 NBI_PORT=tcp://10.233.42.108:8443 NBI_PORT_8443_TCP=tcp://10.233.42.108:8443 NBI_PORT_8443_TCP_ADDR=10.233.42.108 NBI_PORT_8443_TCP_PORT=8443 NBI_PORT_8443_TCP_PROTO=tcp NBI_SERVICE_HOST=10.233.42.108 NBI_SERVICE_PORT=8443 NBI_SERVICE_PORT_API_8443=8443 NCMP_DMI_PLUGIN_PORT=tcp://10.233.1.177:8080 NCMP_DMI_PLUGIN_PORT_8080_TCP=tcp://10.233.1.177:8080 NCMP_DMI_PLUGIN_PORT_8080_TCP_ADDR=10.233.1.177 NCMP_DMI_PLUGIN_PORT_8080_TCP_PORT=8080 NCMP_DMI_PLUGIN_PORT_8080_TCP_PROTO=tcp NCMP_DMI_PLUGIN_PORT_8081_TCP=tcp://10.233.1.177:8081 NCMP_DMI_PLUGIN_PORT_8081_TCP_ADDR=10.233.1.177 NCMP_DMI_PLUGIN_PORT_8081_TCP_PORT=8081 NCMP_DMI_PLUGIN_PORT_8081_TCP_PROTO=tcp NCMP_DMI_PLUGIN_SERVICE_HOST=10.233.1.177 NCMP_DMI_PLUGIN_SERVICE_PORT=8080 NCMP_DMI_PLUGIN_SERVICE_PORT_HTTP=8080 NCMP_DMI_PLUGIN_SERVICE_PORT_MANAGEMENT=8081 NETBOX_APP_PORT=tcp://10.233.61.174:8001 NETBOX_APP_PORT_8001_TCP=tcp://10.233.61.174:8001 NETBOX_APP_PORT_8001_TCP_ADDR=10.233.61.174 NETBOX_APP_PORT_8001_TCP_PORT=8001 NETBOX_APP_PORT_8001_TCP_PROTO=tcp NETBOX_APP_SERVICE_HOST=10.233.61.174 NETBOX_APP_SERVICE_PORT=8001 NETBOX_APP_SERVICE_PORT_NETBOX_APP=8001 NETBOX_NGINX_PORT=tcp://10.233.54.155:8080 NETBOX_NGINX_PORT_8080_TCP=tcp://10.233.54.155:8080 NETBOX_NGINX_PORT_8080_TCP_ADDR=10.233.54.155 NETBOX_NGINX_PORT_8080_TCP_PORT=8080 NETBOX_NGINX_PORT_8080_TCP_PROTO=tcp NETBOX_NGINX_SERVICE_HOST=10.233.54.155 NETBOX_NGINX_SERVICE_PORT=8080 NETBOX_POSTGRES_PORT=tcp://10.233.52.32:5432 NETBOX_POSTGRES_PORT_5432_TCP=tcp://10.233.52.32:5432 NETBOX_POSTGRES_PORT_5432_TCP_ADDR=10.233.52.32 NETBOX_POSTGRES_PORT_5432_TCP_PORT=5432 NETBOX_POSTGRES_PORT_5432_TCP_PROTO=tcp NETBOX_POSTGRES_SERVICE_HOST=10.233.52.32 NETBOX_POSTGRES_SERVICE_PORT=5432 NETBOX_POSTGRES_SERVICE_PORT_NETBOX_POSTGRES=5432 ONAP_CDS_DB_METRICS_PORT=tcp://10.233.55.244:9104 ONAP_CDS_DB_METRICS_PORT_9104_TCP=tcp://10.233.55.244:9104 ONAP_CDS_DB_METRICS_PORT_9104_TCP_ADDR=10.233.55.244 ONAP_CDS_DB_METRICS_PORT_9104_TCP_PORT=9104 ONAP_CDS_DB_METRICS_PORT_9104_TCP_PROTO=tcp ONAP_CDS_DB_METRICS_SERVICE_HOST=10.233.55.244 ONAP_CDS_DB_METRICS_SERVICE_PORT=9104 ONAP_CDS_DB_METRICS_SERVICE_PORT_METRICS=9104 ONAP_MARIADB_GALERA_METRICS_PORT=tcp://10.233.20.213:9104 ONAP_MARIADB_GALERA_METRICS_PORT_9104_TCP=tcp://10.233.20.213:9104 ONAP_MARIADB_GALERA_METRICS_PORT_9104_TCP_ADDR=10.233.20.213 ONAP_MARIADB_GALERA_METRICS_PORT_9104_TCP_PORT=9104 ONAP_MARIADB_GALERA_METRICS_PORT_9104_TCP_PROTO=tcp ONAP_MARIADB_GALERA_METRICS_SERVICE_HOST=10.233.20.213 ONAP_MARIADB_GALERA_METRICS_SERVICE_PORT=9104 ONAP_MARIADB_GALERA_METRICS_SERVICE_PORT_METRICS=9104 ONAP_POLICY_MARIADB_METRICS_PORT=tcp://10.233.2.154:9104 ONAP_POLICY_MARIADB_METRICS_PORT_9104_TCP=tcp://10.233.2.154:9104 ONAP_POLICY_MARIADB_METRICS_PORT_9104_TCP_ADDR=10.233.2.154 ONAP_POLICY_MARIADB_METRICS_PORT_9104_TCP_PORT=9104 ONAP_POLICY_MARIADB_METRICS_PORT_9104_TCP_PROTO=tcp ONAP_POLICY_MARIADB_METRICS_SERVICE_HOST=10.233.2.154 ONAP_POLICY_MARIADB_METRICS_SERVICE_PORT=9104 ONAP_POLICY_MARIADB_METRICS_SERVICE_PORT_METRICS=9104 OOF_HAS_API_PORT=tcp://10.233.6.6:8091 OOF_HAS_API_PORT_8091_TCP=tcp://10.233.6.6:8091 OOF_HAS_API_PORT_8091_TCP_ADDR=10.233.6.6 OOF_HAS_API_PORT_8091_TCP_PORT=8091 OOF_HAS_API_PORT_8091_TCP_PROTO=tcp OOF_HAS_API_SERVICE_HOST=10.233.6.6 OOF_HAS_API_SERVICE_PORT=8091 OOF_HAS_API_SERVICE_PORT_OOF_HAS_API=8091 OOF_OSDF_PORT=tcp://10.233.11.39:8698 OOF_OSDF_PORT_8698_TCP=tcp://10.233.11.39:8698 OOF_OSDF_PORT_8698_TCP_ADDR=10.233.11.39 OOF_OSDF_PORT_8698_TCP_PORT=8698 OOF_OSDF_PORT_8698_TCP_PROTO=tcp OOF_OSDF_SERVICE_HOST=10.233.11.39 OOF_OSDF_SERVICE_PORT=8698 OOM_CERT_SERVICE_PORT=tcp://10.233.56.86:8443 OOM_CERT_SERVICE_PORT_8443_TCP=tcp://10.233.56.86:8443 OOM_CERT_SERVICE_PORT_8443_TCP_ADDR=10.233.56.86 OOM_CERT_SERVICE_PORT_8443_TCP_PORT=8443 OOM_CERT_SERVICE_PORT_8443_TCP_PROTO=tcp OOM_CERT_SERVICE_SERVICE_HOST=10.233.56.86 OOM_CERT_SERVICE_SERVICE_PORT=8443 OOM_CERT_SERVICE_SERVICE_PORT_HTTPS_HTTP=8443 PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin POLICY_APEX_PDP_PORT=tcp://10.233.6.130:6969 POLICY_APEX_PDP_PORT_6969_TCP=tcp://10.233.6.130:6969 POLICY_APEX_PDP_PORT_6969_TCP_ADDR=10.233.6.130 POLICY_APEX_PDP_PORT_6969_TCP_PORT=6969 POLICY_APEX_PDP_PORT_6969_TCP_PROTO=tcp POLICY_APEX_PDP_SERVICE_HOST=10.233.6.130 POLICY_APEX_PDP_SERVICE_PORT=6969 POLICY_APEX_PDP_SERVICE_PORT_POLICY_APEX_PDP=6969 POLICY_API_PORT=tcp://10.233.24.7:6969 POLICY_API_PORT_6969_TCP=tcp://10.233.24.7:6969 POLICY_API_PORT_6969_TCP_ADDR=10.233.24.7 POLICY_API_PORT_6969_TCP_PORT=6969 POLICY_API_PORT_6969_TCP_PROTO=tcp POLICY_API_SERVICE_HOST=10.233.24.7 POLICY_API_SERVICE_PORT=6969 POLICY_API_SERVICE_PORT_POLICY_API=6969 POLICY_CLAMP_BE_PORT=tcp://10.233.49.186:8443 POLICY_CLAMP_BE_PORT_8443_TCP=tcp://10.233.49.186:8443 POLICY_CLAMP_BE_PORT_8443_TCP_ADDR=10.233.49.186 POLICY_CLAMP_BE_PORT_8443_TCP_PORT=8443 POLICY_CLAMP_BE_PORT_8443_TCP_PROTO=tcp POLICY_CLAMP_BE_SERVICE_HOST=10.233.49.186 POLICY_CLAMP_BE_SERVICE_PORT=8443 POLICY_CLAMP_BE_SERVICE_PORT_POLICY_CLAMP_BE=8443 POLICY_CLAMP_CL_K8S_PPNT_PORT=tcp://10.233.28.98:8083 POLICY_CLAMP_CL_K8S_PPNT_PORT_8083_TCP=tcp://10.233.28.98:8083 POLICY_CLAMP_CL_K8S_PPNT_PORT_8083_TCP_ADDR=10.233.28.98 POLICY_CLAMP_CL_K8S_PPNT_PORT_8083_TCP_PORT=8083 POLICY_CLAMP_CL_K8S_PPNT_PORT_8083_TCP_PROTO=tcp POLICY_CLAMP_CL_K8S_PPNT_SERVICE_HOST=10.233.28.98 POLICY_CLAMP_CL_K8S_PPNT_SERVICE_PORT=8083 POLICY_CLAMP_CL_K8S_PPNT_SERVICE_PORT_HTTP_API=8083 POLICY_CLAMP_CL_RUNTIME_PORT=tcp://10.233.32.224:6969 POLICY_CLAMP_CL_RUNTIME_PORT_6969_TCP=tcp://10.233.32.224:6969 POLICY_CLAMP_CL_RUNTIME_PORT_6969_TCP_ADDR=10.233.32.224 POLICY_CLAMP_CL_RUNTIME_PORT_6969_TCP_PORT=6969 POLICY_CLAMP_CL_RUNTIME_PORT_6969_TCP_PROTO=tcp POLICY_CLAMP_CL_RUNTIME_SERVICE_HOST=10.233.32.224 POLICY_CLAMP_CL_RUNTIME_SERVICE_PORT=6969 POLICY_CLAMP_CL_RUNTIME_SERVICE_PORT_HTTP_API=6969 POLICY_CLAMP_FE_PORT=tcp://10.233.24.67:2443 POLICY_CLAMP_FE_PORT_2443_TCP=tcp://10.233.24.67:2443 POLICY_CLAMP_FE_PORT_2443_TCP_ADDR=10.233.24.67 POLICY_CLAMP_FE_PORT_2443_TCP_PORT=2443 POLICY_CLAMP_FE_PORT_2443_TCP_PROTO=tcp POLICY_CLAMP_FE_SERVICE_HOST=10.233.24.67 POLICY_CLAMP_FE_SERVICE_PORT=2443 POLICY_CLAMP_FE_SERVICE_PORT_POLICY_CLAMP_FE=2443 POLICY_DISTRIBUTION_PORT=tcp://10.233.26.167:6969 POLICY_DISTRIBUTION_PORT_6969_TCP=tcp://10.233.26.167:6969 POLICY_DISTRIBUTION_PORT_6969_TCP_ADDR=10.233.26.167 POLICY_DISTRIBUTION_PORT_6969_TCP_PORT=6969 POLICY_DISTRIBUTION_PORT_6969_TCP_PROTO=tcp POLICY_DISTRIBUTION_SERVICE_HOST=10.233.26.167 POLICY_DISTRIBUTION_SERVICE_PORT=6969 POLICY_DISTRIBUTION_SERVICE_PORT_POLICY_DISTRIBUTION=6969 POLICY_DROOLS_PDP_PORT=tcp://10.233.31.183:6969 POLICY_DROOLS_PDP_PORT_6969_TCP=tcp://10.233.31.183:6969 POLICY_DROOLS_PDP_PORT_6969_TCP_ADDR=10.233.31.183 POLICY_DROOLS_PDP_PORT_6969_TCP_PORT=6969 POLICY_DROOLS_PDP_PORT_6969_TCP_PROTO=tcp POLICY_DROOLS_PDP_PORT_9696_TCP=tcp://10.233.31.183:9696 POLICY_DROOLS_PDP_PORT_9696_TCP_ADDR=10.233.31.183 POLICY_DROOLS_PDP_PORT_9696_TCP_PORT=9696 POLICY_DROOLS_PDP_PORT_9696_TCP_PROTO=tcp POLICY_DROOLS_PDP_SERVICE_HOST=10.233.31.183 POLICY_DROOLS_PDP_SERVICE_PORT=6969 POLICY_DROOLS_PDP_SERVICE_PORT_POLICY_DROOLS_PDP_6969=6969 POLICY_DROOLS_PDP_SERVICE_PORT_POLICY_DROOLS_PDP_9696=9696 POLICY_GUI_PORT=tcp://10.233.8.4:2443 POLICY_GUI_PORT_2443_TCP=tcp://10.233.8.4:2443 POLICY_GUI_PORT_2443_TCP_ADDR=10.233.8.4 POLICY_GUI_PORT_2443_TCP_PORT=2443 POLICY_GUI_PORT_2443_TCP_PROTO=tcp POLICY_GUI_SERVICE_HOST=10.233.8.4 POLICY_GUI_SERVICE_PORT=2443 POLICY_GUI_SERVICE_PORT_POLICY_GUI=2443 POLICY_HANDLER_PORT=tcp://10.233.44.200:80 POLICY_HANDLER_PORT_80_TCP=tcp://10.233.44.200:80 POLICY_HANDLER_PORT_80_TCP_ADDR=10.233.44.200 POLICY_HANDLER_PORT_80_TCP_PORT=80 POLICY_HANDLER_PORT_80_TCP_PROTO=tcp POLICY_HANDLER_SERVICE_HOST=10.233.44.200 POLICY_HANDLER_SERVICE_PORT=80 POLICY_HANDLER_SERVICE_PORT_POLICY_HANDLER=80 POLICY_MARIADB_PORT=tcp://10.233.27.210:3306 POLICY_MARIADB_PORT_3306_TCP=tcp://10.233.27.210:3306 POLICY_MARIADB_PORT_3306_TCP_ADDR=10.233.27.210 POLICY_MARIADB_PORT_3306_TCP_PORT=3306 POLICY_MARIADB_PORT_3306_TCP_PROTO=tcp POLICY_MARIADB_SERVICE_HOST=10.233.27.210 POLICY_MARIADB_SERVICE_PORT=3306 POLICY_MARIADB_SERVICE_PORT_MYSQL=3306 POLICY_PAP_PORT=tcp://10.233.28.167:6969 POLICY_PAP_PORT_6969_TCP=tcp://10.233.28.167:6969 POLICY_PAP_PORT_6969_TCP_ADDR=10.233.28.167 POLICY_PAP_PORT_6969_TCP_PORT=6969 POLICY_PAP_PORT_6969_TCP_PROTO=tcp POLICY_PAP_SERVICE_HOST=10.233.28.167 POLICY_PAP_SERVICE_PORT=6969 POLICY_PAP_SERVICE_PORT_HTTP_API=6969 POLICY_XACML_PDP_PORT=tcp://10.233.40.166:6969 POLICY_XACML_PDP_PORT_6969_TCP=tcp://10.233.40.166:6969 POLICY_XACML_PDP_PORT_6969_TCP_ADDR=10.233.40.166 POLICY_XACML_PDP_PORT_6969_TCP_PORT=6969 POLICY_XACML_PDP_PORT_6969_TCP_PROTO=tcp POLICY_XACML_PDP_SERVICE_HOST=10.233.40.166 POLICY_XACML_PDP_SERVICE_PORT=6969 POLICY_XACML_PDP_SERVICE_PORT_POLICY_XACML_PDP=6969 PORTAL_APP_PORT=tcp://10.233.10.82:8443 PORTAL_APP_PORT_8443_TCP=tcp://10.233.10.82:8443 PORTAL_APP_PORT_8443_TCP_ADDR=10.233.10.82 PORTAL_APP_PORT_8443_TCP_PORT=8443 PORTAL_APP_PORT_8443_TCP_PROTO=tcp PORTAL_APP_SERVICE_HOST=10.233.10.82 PORTAL_APP_SERVICE_PORT=8443 PORTAL_APP_SERVICE_PORT_PORTAL_APP4=8443 PORTAL_CASSANDRA_PORT=tcp://10.233.37.68:9160 PORTAL_CASSANDRA_PORT_7000_TCP=tcp://10.233.37.68:7000 PORTAL_CASSANDRA_PORT_7000_TCP_ADDR=10.233.37.68 PORTAL_CASSANDRA_PORT_7000_TCP_PORT=7000 PORTAL_CASSANDRA_PORT_7000_TCP_PROTO=tcp PORTAL_CASSANDRA_PORT_7001_TCP=tcp://10.233.37.68:7001 PORTAL_CASSANDRA_PORT_7001_TCP_ADDR=10.233.37.68 PORTAL_CASSANDRA_PORT_7001_TCP_PORT=7001 PORTAL_CASSANDRA_PORT_7001_TCP_PROTO=tcp PORTAL_CASSANDRA_PORT_7199_TCP=tcp://10.233.37.68:7199 PORTAL_CASSANDRA_PORT_7199_TCP_ADDR=10.233.37.68 PORTAL_CASSANDRA_PORT_7199_TCP_PORT=7199 PORTAL_CASSANDRA_PORT_7199_TCP_PROTO=tcp PORTAL_CASSANDRA_PORT_9042_TCP=tcp://10.233.37.68:9042 PORTAL_CASSANDRA_PORT_9042_TCP_ADDR=10.233.37.68 PORTAL_CASSANDRA_PORT_9042_TCP_PORT=9042 PORTAL_CASSANDRA_PORT_9042_TCP_PROTO=tcp PORTAL_CASSANDRA_PORT_9160_TCP=tcp://10.233.37.68:9160 PORTAL_CASSANDRA_PORT_9160_TCP_ADDR=10.233.37.68 PORTAL_CASSANDRA_PORT_9160_TCP_PORT=9160 PORTAL_CASSANDRA_PORT_9160_TCP_PROTO=tcp PORTAL_CASSANDRA_SERVICE_HOST=10.233.37.68 PORTAL_CASSANDRA_SERVICE_PORT=9160 PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA2=7000 PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA3=7001 PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA4=7199 PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA5=9042 PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA=9160 PORTAL_DB_PORT=tcp://10.233.11.135:3306 PORTAL_DB_PORT_3306_TCP=tcp://10.233.11.135:3306 PORTAL_DB_PORT_3306_TCP_ADDR=10.233.11.135 PORTAL_DB_PORT_3306_TCP_PORT=3306 PORTAL_DB_PORT_3306_TCP_PROTO=tcp PORTAL_DB_SERVICE_HOST=10.233.11.135 PORTAL_DB_SERVICE_PORT=3306 PORTAL_DB_SERVICE_PORT_PORTAL_DB=3306 PORTAL_SDK_PORT=tcp://10.233.39.3:8443 PORTAL_SDK_PORT_8443_TCP=tcp://10.233.39.3:8443 PORTAL_SDK_PORT_8443_TCP_ADDR=10.233.39.3 PORTAL_SDK_PORT_8443_TCP_PORT=8443 PORTAL_SDK_PORT_8443_TCP_PROTO=tcp PORTAL_SDK_SERVICE_HOST=10.233.39.3 PORTAL_SDK_SERVICE_PORT=8443 PORTAL_SDK_SERVICE_PORT_PORTAL_SDK=8443 PORTAL_WIDGET_PORT=tcp://10.233.22.87:8082 PORTAL_WIDGET_PORT_8082_TCP=tcp://10.233.22.87:8082 PORTAL_WIDGET_PORT_8082_TCP_ADDR=10.233.22.87 PORTAL_WIDGET_PORT_8082_TCP_PORT=8082 PORTAL_WIDGET_PORT_8082_TCP_PROTO=tcp PORTAL_WIDGET_SERVICE_HOST=10.233.22.87 PORTAL_WIDGET_SERVICE_PORT=8082 PORTAL_WIDGET_SERVICE_PORT_PORTAL_WIDGET=8082 PWD=/ PYTHON_PIP_VERSION=8.1.2 PYTHON_VERSION=2.7.9-1 ROBOT_PORT=tcp://10.233.32.48:443 ROBOT_PORT_443_TCP=tcp://10.233.32.48:443 ROBOT_PORT_443_TCP_ADDR=10.233.32.48 ROBOT_PORT_443_TCP_PORT=443 ROBOT_PORT_443_TCP_PROTO=tcp ROBOT_SERVICE_HOST=10.233.32.48 ROBOT_SERVICE_PORT=443 ROBOT_SERVICE_PORT_HTTPD=443 SCALA_VERSION=2.12 SDC_BE_EXTERNAL_PORT=tcp://10.233.49.197:8443 SDC_BE_EXTERNAL_PORT_8443_TCP=tcp://10.233.49.197:8443 SDC_BE_EXTERNAL_PORT_8443_TCP_ADDR=10.233.49.197 SDC_BE_EXTERNAL_PORT_8443_TCP_PORT=8443 SDC_BE_EXTERNAL_PORT_8443_TCP_PROTO=tcp SDC_BE_EXTERNAL_SERVICE_HOST=10.233.49.197 SDC_BE_EXTERNAL_SERVICE_PORT=8443 SDC_BE_EXTERNAL_SERVICE_PORT_HTTPS_API=8443 SDC_BE_PORT=tcp://10.233.46.242:8443 SDC_BE_PORT_8080_TCP=tcp://10.233.46.242:8080 SDC_BE_PORT_8080_TCP_ADDR=10.233.46.242 SDC_BE_PORT_8080_TCP_PORT=8080 SDC_BE_PORT_8080_TCP_PROTO=tcp SDC_BE_PORT_8443_TCP=tcp://10.233.46.242:8443 SDC_BE_PORT_8443_TCP_ADDR=10.233.46.242 SDC_BE_PORT_8443_TCP_PORT=8443 SDC_BE_PORT_8443_TCP_PROTO=tcp SDC_BE_SERVICE_HOST=10.233.46.242 SDC_BE_SERVICE_PORT=8443 SDC_BE_SERVICE_PORT_HTTPS_API=8443 SDC_BE_SERVICE_PORT_HTTP_API=8080 SDC_FE_PORT=tcp://10.233.12.158:9443 SDC_FE_PORT_9443_TCP=tcp://10.233.12.158:9443 SDC_FE_PORT_9443_TCP_ADDR=10.233.12.158 SDC_FE_PORT_9443_TCP_PORT=9443 SDC_FE_PORT_9443_TCP_PROTO=tcp SDC_FE_SERVICE_HOST=10.233.12.158 SDC_FE_SERVICE_PORT=9443 SDC_FE_SERVICE_PORT_SDC_FE2=9443 SDC_HELM_VALIDATOR_PORT=tcp://10.233.22.166:8080 SDC_HELM_VALIDATOR_PORT_8080_TCP=tcp://10.233.22.166:8080 SDC_HELM_VALIDATOR_PORT_8080_TCP_ADDR=10.233.22.166 SDC_HELM_VALIDATOR_PORT_8080_TCP_PORT=8080 SDC_HELM_VALIDATOR_PORT_8080_TCP_PROTO=tcp SDC_HELM_VALIDATOR_SERVICE_HOST=10.233.22.166 SDC_HELM_VALIDATOR_SERVICE_PORT=8080 SDC_HELM_VALIDATOR_SERVICE_PORT_HTTP=8080 SDC_ONBOARDING_BE_PORT=tcp://10.233.50.234:8445 SDC_ONBOARDING_BE_PORT_8081_TCP=tcp://10.233.50.234:8081 SDC_ONBOARDING_BE_PORT_8081_TCP_ADDR=10.233.50.234 SDC_ONBOARDING_BE_PORT_8081_TCP_PORT=8081 SDC_ONBOARDING_BE_PORT_8081_TCP_PROTO=tcp SDC_ONBOARDING_BE_PORT_8445_TCP=tcp://10.233.50.234:8445 SDC_ONBOARDING_BE_PORT_8445_TCP_ADDR=10.233.50.234 SDC_ONBOARDING_BE_PORT_8445_TCP_PORT=8445 SDC_ONBOARDING_BE_PORT_8445_TCP_PROTO=tcp SDC_ONBOARDING_BE_SERVICE_HOST=10.233.50.234 SDC_ONBOARDING_BE_SERVICE_PORT=8445 SDC_ONBOARDING_BE_SERVICE_PORT_SDC_ONBOARDING_BE2=8081 SDC_ONBOARDING_BE_SERVICE_PORT_SDC_ONBOARDING_BE=8445 SDC_WFD_BE_PORT=tcp://10.233.9.135:8443 SDC_WFD_BE_PORT_8443_TCP=tcp://10.233.9.135:8443 SDC_WFD_BE_PORT_8443_TCP_ADDR=10.233.9.135 SDC_WFD_BE_PORT_8443_TCP_PORT=8443 SDC_WFD_BE_PORT_8443_TCP_PROTO=tcp SDC_WFD_BE_SERVICE_HOST=10.233.9.135 SDC_WFD_BE_SERVICE_PORT=8443 SDC_WFD_BE_SERVICE_PORT_SDC_WFD_BE=8443 SDC_WFD_FE_PORT=tcp://10.233.4.18:8443 SDC_WFD_FE_PORT_8443_TCP=tcp://10.233.4.18:8443 SDC_WFD_FE_PORT_8443_TCP_ADDR=10.233.4.18 SDC_WFD_FE_PORT_8443_TCP_PORT=8443 SDC_WFD_FE_PORT_8443_TCP_PROTO=tcp SDC_WFD_FE_SERVICE_HOST=10.233.4.18 SDC_WFD_FE_SERVICE_PORT=8443 SDC_WFD_FE_SERVICE_PORT_SDC_WFD_FE=8443 SHLVL=1 ZULU_OPENJDK_VERSION=8=8.38.0.13 _=/usr/bin/env aaf_locate_url=https://aaf-locate.onap:8095 enableCadi=true ===> User uid=1000(mrkafka) gid=0(root) groups=0(root) ===> Configuring ... SASL is enabled. ===> Running preflight checks ... ===> Check if /var/lib/kafka/data is writable ... ===> Check if Zookeeper is healthy ... [main] INFO io.confluent.admin.utils.ClusterStatus - SASL is enabled. java.security.auth.login.config=/etc/kafka/secrets/jaas/kafka_server_jaas.conf [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:zookeeper.version=3.4.14-4c25d480e66aadd371de8bd2fd8da255ac140bcf, built on 03/06/2019 16:18 GMT [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:host.name=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.version=1.8.0_212 [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.vendor=Azul Systems, Inc. [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.class.path=/etc/confluent/docker/docker-utils.jar [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.io.tmpdir=/tmp [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.compiler=
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.name=Linux [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.arch=amd64 [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.version=4.19.0-17-cloud-amd64 [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.name=mrkafka [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.home=/home/mrkafka [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.dir=/ [main] INFO org.apache.zookeeper.ZooKeeper - Initiating client connection, connectString=onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 sessionTimeout=40000 watcher=io.confluent.admin.utils.ZookeeperConnectionWatcher@30dae81 [main-SendThread(onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.Login - Client successfully logged in. [main-SendThread(onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.client.ZooKeeperSaslClient - Client will use DIGEST-MD5 as SASL mechanism. [main-SendThread(onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.ClientCnxn - Opening socket connection to server onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.68.136:2181. Will attempt to SASL-authenticate using Login Context section 'Client' [main-SendThread(onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.ClientCnxn - Socket connection established to onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.68.136:2181, initiating session [main-SendThread(onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.ClientCnxn - Session establishment complete on server onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.68.136:2181, sessionid = 0x1000096e9210000, negotiated timeout = 40000 [main] INFO org.apache.zookeeper.ZooKeeper - Session: 0x1000096e9210000 closed [main-EventThread] INFO org.apache.zookeeper.ClientCnxn - EventThread shut down for session: 0x1000096e9210000 ===> Launching ... ===> Launching kafka ... SLF4J: Class path contains multiple SLF4J bindings. SLF4J: Found binding in [jar:file:/usr/share/java/kafka/slf4j-log4j12-1.7.26.jar!/org/slf4j/impl/StaticLoggerBinder.class] SLF4J: Found binding in [jar:file:/usr/share/java/kafka/kafka11aaf-jar-with-dependencies.jar!/org/slf4j/impl/StaticLoggerBinder.class] SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation. SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory] [2021-10-15 11:01:27,185] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$) [2021-10-15 11:01:28,029] INFO KafkaConfig values: advertised.host.name = null advertised.listeners = EXTERNAL_SASL_PLAINTEXT://10.253.0.233:30492,INTERNAL_SASL_PLAINTEXT://:9092 advertised.port = null alter.config.policy.class.name = null alter.log.dirs.replication.quota.window.num = 11 alter.log.dirs.replication.quota.window.size.seconds = 1 authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer auto.create.topics.enable = true auto.leader.rebalance.enable = true background.threads = 10 broker.id = 2 broker.id.generation.enable = true broker.rack = null client.quota.callback.class = null compression.type = producer connection.failed.authentication.delay.ms = 100 connections.max.idle.ms = 600000 connections.max.reauth.ms = 0 control.plane.listener.name = null controlled.shutdown.enable = true controlled.shutdown.max.retries = 3 controlled.shutdown.retry.backoff.ms = 5000 controller.socket.timeout.ms = 30000 create.topic.policy.class.name = null default.replication.factor = 3 delegation.token.expiry.check.interval.ms = 3600000 delegation.token.expiry.time.ms = 86400000 delegation.token.master.key = null delegation.token.max.lifetime.ms = 604800000 delete.records.purgatory.purge.interval.requests = 1 delete.topic.enable = true fetch.purgatory.purge.interval.requests = 1000 group.initial.rebalance.delay.ms = 3000 group.max.session.timeout.ms = 1800000 group.max.size = 2147483647 group.min.session.timeout.ms = 6000 host.name = inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT inter.broker.protocol.version = 2.3-IV1 kafka.metrics.polling.interval.secs = 10 kafka.metrics.reporters = [] leader.imbalance.check.interval.seconds = 300 leader.imbalance.per.broker.percentage = 10 listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092 log.cleaner.backoff.ms = 15000 log.cleaner.dedupe.buffer.size = 134217728 log.cleaner.delete.retention.ms = 86400000 log.cleaner.enable = true log.cleaner.io.buffer.load.factor = 0.9 log.cleaner.io.buffer.size = 524288 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 log.cleaner.max.compaction.lag.ms = 9223372036854775807 log.cleaner.min.cleanable.ratio = 0.5 log.cleaner.min.compaction.lag.ms = 0 log.cleaner.threads = 1 log.cleanup.policy = [delete] log.dir = /tmp/kafka-logs log.dirs = /var/lib/kafka/data log.flush.interval.messages = 9223372036854775807 log.flush.interval.ms = null log.flush.offset.checkpoint.interval.ms = 60000 log.flush.scheduler.interval.ms = 9223372036854775807 log.flush.start.offset.checkpoint.interval.ms = 60000 log.index.interval.bytes = 4096 log.index.size.max.bytes = 10485760 log.message.downconversion.enable = true log.message.format.version = 2.3-IV1 log.message.timestamp.difference.max.ms = 9223372036854775807 log.message.timestamp.type = CreateTime log.preallocate = false log.retention.bytes = -1 log.retention.check.interval.ms = 300000 log.retention.hours = 168 log.retention.minutes = null log.retention.ms = null log.roll.hours = 168 log.roll.jitter.hours = 0 log.roll.jitter.ms = null log.roll.ms = null log.segment.bytes = 1073741824 log.segment.delete.delay.ms = 60000 max.connections = 2147483647 max.connections.per.ip = 2147483647 max.connections.per.ip.overrides = max.incremental.fetch.session.cache.slots = 1000 message.max.bytes = 1000012 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 min.insync.replicas = 1 num.io.threads = 8 num.network.threads = 3 num.partitions = 3 num.recovery.threads.per.data.dir = 5 num.replica.alter.log.dirs.threads = null num.replica.fetchers = 1 offset.metadata.max.bytes = 4096 offsets.commit.required.acks = -1 offsets.commit.timeout.ms = 5000 offsets.load.buffer.size = 5242880 offsets.retention.check.interval.ms = 600000 offsets.retention.minutes = 10080 offsets.topic.compression.codec = 0 offsets.topic.num.partitions = 50 offsets.topic.replication.factor = 3 offsets.topic.segment.bytes = 104857600 password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding password.encoder.iterations = 4096 password.encoder.key.length = 128 password.encoder.keyfactory.algorithm = null password.encoder.old.secret = null password.encoder.secret = null port = 9092 principal.builder.class = null producer.purgatory.purge.interval.requests = 1000 queued.max.request.bytes = -1 queued.max.requests = 500 quota.consumer.default = 9223372036854775807 quota.producer.default = 9223372036854775807 quota.window.num = 11 quota.window.size.seconds = 1 replica.fetch.backoff.ms = 1000 replica.fetch.max.bytes = 1048576 replica.fetch.min.bytes = 1 replica.fetch.response.max.bytes = 10485760 replica.fetch.wait.max.ms = 500 replica.high.watermark.checkpoint.interval.ms = 5000 replica.lag.time.max.ms = 10000 replica.socket.receive.buffer.bytes = 65536 replica.socket.timeout.ms = 30000 replication.quota.window.num = 11 replication.quota.window.size.seconds = 1 request.timeout.ms = 30000 reserved.broker.max.id = 1000 sasl.client.callback.handler.class = null sasl.enabled.mechanisms = [PLAIN] sasl.jaas.config = null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.principal.to.local.rules = [DEFAULT] sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.login.callback.handler.class = null sasl.login.class = null sasl.login.refresh.buffer.seconds = 300 sasl.login.refresh.min.period.seconds = 60 sasl.login.refresh.window.factor = 0.8 sasl.login.refresh.window.jitter = 0.05 sasl.mechanism.inter.broker.protocol = PLAIN sasl.server.callback.handler.class = null security.inter.broker.protocol = PLAINTEXT socket.receive.buffer.bytes = 102400 socket.request.max.bytes = 104857600 socket.send.buffer.bytes = 102400 ssl.cipher.suites = [] ssl.client.auth = none ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] ssl.endpoint.identification.algorithm = https ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.principal.mapping.rules = [DEFAULT] ssl.protocol = TLS ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000 transaction.max.timeout.ms = 900000 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 transaction.state.log.load.buffer.size = 5242880 transaction.state.log.min.isr = 1 transaction.state.log.num.partitions = 50 transaction.state.log.replication.factor = 1 transaction.state.log.segment.bytes = 104857600 transactional.id.expiration.ms = 604800000 unclean.leader.election.enable = false zookeeper.connect = onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 zookeeper.connection.timeout.ms = 6000 zookeeper.max.in.flight.requests = 10 zookeeper.session.timeout.ms = 6000 zookeeper.set.acl = true zookeeper.sync.time.ms = 2000 (kafka.server.KafkaConfig) [2021-10-15 11:01:28,547] WARN The package io.confluent.support.metrics.collectors.FullCollector for collecting the full set of support metrics could not be loaded, so we are reverting to anonymous, basic metric collection. If you are a Confluent customer, please refer to the Confluent Platform documentation, section Proactive Support, on how to activate full metrics collection. (io.confluent.support.metrics.KafkaSupportConfig) [2021-10-15 11:01:28,547] WARN The support metrics collection feature ("Metrics") of Proactive Support is disabled. (io.confluent.support.metrics.SupportedServerStartable) [2021-10-15 11:01:28,552] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler) [2021-10-15 11:01:28,553] INFO starting (kafka.server.KafkaServer) [2021-10-15 11:01:28,563] INFO Connecting to zookeeper on onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 (kafka.server.KafkaServer) [2021-10-15 11:01:28,639] INFO [ZooKeeperClient Kafka server] Initializing a new session to onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181. (kafka.zookeeper.ZooKeeperClient) [2021-10-15 11:01:28,649] INFO Client environment:zookeeper.version=3.4.14-4c25d480e66aadd371de8bd2fd8da255ac140bcf, built on 03/06/2019 16:18 GMT (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:01:28,649] INFO Client environment:host.name=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:01:28,649] INFO Client environment:java.version=1.8.0_212 (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:01:28,649] INFO Client environment:java.vendor=Azul Systems, Inc. (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:01:28,658] INFO Client environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:01:28,659] INFO Client environment:java.class.path=/usr/bin/../share/java/kafka/activation-1.1.1.jar:/usr/bin/../share/java/kafka/aopalliance-repackaged-2.5.0.jar:/usr/bin/../share/java/kafka/argparse4j-0.7.0.jar:/usr/bin/../share/java/kafka/audience-annotations-0.5.0.jar:/usr/bin/../share/java/kafka/avro-1.8.1.jar:/usr/bin/../share/java/kafka/commons-codec-1.11.jar:/usr/bin/../share/java/kafka/commons-compress-1.8.1.jar:/usr/bin/../share/java/kafka/commons-lang3-3.8.1.jar:/usr/bin/../share/java/kafka/commons-logging-1.2.jar:/usr/bin/../share/java/kafka/connect-api-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-basic-auth-extension-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-file-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-json-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-runtime-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-transforms-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/guava-20.0.jar:/usr/bin/../share/java/kafka/hk2-api-2.5.0.jar:/usr/bin/../share/java/kafka/hk2-locator-2.5.0.jar:/usr/bin/../share/java/kafka/hk2-utils-2.5.0.jar:/usr/bin/../share/java/kafka/httpclient-4.5.7.jar:/usr/bin/../share/java/kafka/httpcore-4.4.11.jar:/usr/bin/../share/java/kafka/httpmime-4.5.7.jar:/usr/bin/../share/java/kafka/jackson-annotations-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-core-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-core-asl-1.9.13.jar:/usr/bin/../share/java/kafka/jackson-databind-2.9.9.3.jar:/usr/bin/../share/java/kafka/jackson-dataformat-csv-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-datatype-jdk8-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-base-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-json-provider-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-mapper-asl-1.9.13.jar:/usr/bin/../share/java/kafka/jackson-module-jaxb-annotations-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-module-paranamer-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-module-scala_2.11-2.9.9.jar:/usr/bin/../share/java/kafka/jakarta.annotation-api-1.3.4.jar:/usr/bin/../share/java/kafka/jakarta.inject-2.5.0.jar:/usr/bin/../share/java/kafka/jakarta.ws.rs-api-2.1.5.jar:/usr/bin/../share/java/kafka/javassist-3.22.0-CR2.jar:/usr/bin/../share/java/kafka/javax.servlet-api-3.1.0.jar:/usr/bin/../share/java/kafka/javax.ws.rs-api-2.1.1.jar:/usr/bin/../share/java/kafka/jaxb-api-2.3.0.jar:/usr/bin/../share/java/kafka/jersey-client-2.28.jar:/usr/bin/../share/java/kafka/jersey-common-2.28.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-2.28.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-core-2.28.jar:/usr/bin/../share/java/kafka/jersey-hk2-2.28.jar:/usr/bin/../share/java/kafka/jersey-media-jaxb-2.28.jar:/usr/bin/../share/java/kafka/jersey-server-2.28.jar:/usr/bin/../share/java/kafka/jetty-client-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-continuation-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-http-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-io-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-security-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-server-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-servlet-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-servlets-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-util-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jopt-simple-5.0.4.jar:/usr/bin/../share/java/kafka/jsr305-3.0.2.jar:/usr/bin/../share/java/kafka/kafka-clients-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-log4j-appender-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-examples-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-scala_2.11-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-test-utils-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-tools-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-javadoc.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-scaladoc.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-sources.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-test-sources.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-test.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/log4j-1.2.17.jar:/usr/bin/../share/java/kafka/lz4-java-1.6.0.jar:/usr/bin/../share/java/kafka/maven-artifact-3.6.1.jar:/usr/bin/../share/java/kafka/metrics-core-2.2.0.jar:/usr/bin/../share/java/kafka/osgi-resource-locator-1.0.1.jar:/usr/bin/../share/java/kafka/paranamer-2.7.jar:/usr/bin/../share/java/kafka/paranamer-2.8.jar:/usr/bin/../share/java/kafka/plexus-utils-3.2.0.jar:/usr/bin/../share/java/kafka/reflections-0.9.11.jar:/usr/bin/../share/java/kafka/rocksdbjni-5.18.3.jar:/usr/bin/../share/java/kafka/scala-library-2.11.12.jar:/usr/bin/../share/java/kafka/scala-logging_2.11-3.9.0.jar:/usr/bin/../share/java/kafka/scala-reflect-2.11.12.jar:/usr/bin/../share/java/kafka/slf4j-api-1.7.26.jar:/usr/bin/../share/java/kafka/slf4j-log4j12-1.7.26.jar:/usr/bin/../share/java/kafka/snappy-java-1.1.7.3.jar:/usr/bin/../share/java/kafka/spotbugs-annotations-3.1.9.jar:/usr/bin/../share/java/kafka/support-metrics-client-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/support-metrics-common-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/validation-api-2.0.1.Final.jar:/usr/bin/../share/java/kafka/xz-1.5.jar:/usr/bin/../share/java/kafka/zkclient-0.11.jar:/usr/bin/../share/java/kafka/zookeeper-3.4.14.jar:/usr/bin/../share/java/kafka/zstd-jni-1.4.0-1.jar:/usr/bin/../share/java/kafka/kafka11aaf-jar-with-dependencies.jar:/usr/bin/../support-metrics-client/build/dependant-libs-2.12/*:/usr/bin/../support-metrics-client/build/libs/*:/usr/share/java/support-metrics-client/* (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:01:28,659] INFO Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:01:28,659] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:01:28,659] INFO Client environment:java.compiler= (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:01:28,659] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:01:28,659] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:01:28,661] INFO Client environment:os.version=4.19.0-17-cloud-amd64 (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:01:28,661] INFO Client environment:user.name=mrkafka (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:01:28,661] INFO Client environment:user.home=/home/mrkafka (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:01:28,661] INFO Client environment:user.dir=/ (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:01:28,664] INFO Initiating client connection, connectString=onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 sessionTimeout=6000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@28701274 (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:01:28,701] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) [2021-10-15 11:01:28,744] INFO Client successfully logged in. (org.apache.zookeeper.Login) [2021-10-15 11:01:28,755] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient) [2021-10-15 11:01:28,851] INFO Opening socket connection to server onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.68.136:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:01:28,918] INFO Socket connection established to onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.68.136:2181, initiating session (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:01:28,996] INFO Session establishment complete on server onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.68.136:2181, sessionid = 0x1000096e9210001, negotiated timeout = 6000 (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:01:29,060] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient) [2021-10-15 11:01:30,122] INFO Cluster ID = P3Yzo-7gQzaXtwUehK5FWw (kafka.server.KafkaServer) [2021-10-15 11:01:30,190] WARN No meta.properties file under dir /var/lib/kafka/data/meta.properties (kafka.server.BrokerMetadataCheckpoint) [2021-10-15 11:01:30,411] INFO KafkaConfig values: advertised.host.name = null advertised.listeners = EXTERNAL_SASL_PLAINTEXT://10.253.0.233:30492,INTERNAL_SASL_PLAINTEXT://:9092 advertised.port = null alter.config.policy.class.name = null alter.log.dirs.replication.quota.window.num = 11 alter.log.dirs.replication.quota.window.size.seconds = 1 authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer auto.create.topics.enable = true auto.leader.rebalance.enable = true background.threads = 10 broker.id = 2 broker.id.generation.enable = true broker.rack = null client.quota.callback.class = null compression.type = producer connection.failed.authentication.delay.ms = 100 connections.max.idle.ms = 600000 connections.max.reauth.ms = 0 control.plane.listener.name = null controlled.shutdown.enable = true controlled.shutdown.max.retries = 3 controlled.shutdown.retry.backoff.ms = 5000 controller.socket.timeout.ms = 30000 create.topic.policy.class.name = null default.replication.factor = 3 delegation.token.expiry.check.interval.ms = 3600000 delegation.token.expiry.time.ms = 86400000 delegation.token.master.key = null delegation.token.max.lifetime.ms = 604800000 delete.records.purgatory.purge.interval.requests = 1 delete.topic.enable = true fetch.purgatory.purge.interval.requests = 1000 group.initial.rebalance.delay.ms = 3000 group.max.session.timeout.ms = 1800000 group.max.size = 2147483647 group.min.session.timeout.ms = 6000 host.name = inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT inter.broker.protocol.version = 2.3-IV1 kafka.metrics.polling.interval.secs = 10 kafka.metrics.reporters = [] leader.imbalance.check.interval.seconds = 300 leader.imbalance.per.broker.percentage = 10 listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092 log.cleaner.backoff.ms = 15000 log.cleaner.dedupe.buffer.size = 134217728 log.cleaner.delete.retention.ms = 86400000 log.cleaner.enable = true log.cleaner.io.buffer.load.factor = 0.9 log.cleaner.io.buffer.size = 524288 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 log.cleaner.max.compaction.lag.ms = 9223372036854775807 log.cleaner.min.cleanable.ratio = 0.5 log.cleaner.min.compaction.lag.ms = 0 log.cleaner.threads = 1 log.cleanup.policy = [delete] log.dir = /tmp/kafka-logs log.dirs = /var/lib/kafka/data log.flush.interval.messages = 9223372036854775807 log.flush.interval.ms = null log.flush.offset.checkpoint.interval.ms = 60000 log.flush.scheduler.interval.ms = 9223372036854775807 log.flush.start.offset.checkpoint.interval.ms = 60000 log.index.interval.bytes = 4096 log.index.size.max.bytes = 10485760 log.message.downconversion.enable = true log.message.format.version = 2.3-IV1 log.message.timestamp.difference.max.ms = 9223372036854775807 log.message.timestamp.type = CreateTime log.preallocate = false log.retention.bytes = -1 log.retention.check.interval.ms = 300000 log.retention.hours = 168 log.retention.minutes = null log.retention.ms = null log.roll.hours = 168 log.roll.jitter.hours = 0 log.roll.jitter.ms = null log.roll.ms = null log.segment.bytes = 1073741824 log.segment.delete.delay.ms = 60000 max.connections = 2147483647 max.connections.per.ip = 2147483647 max.connections.per.ip.overrides = max.incremental.fetch.session.cache.slots = 1000 message.max.bytes = 1000012 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 min.insync.replicas = 1 num.io.threads = 8 num.network.threads = 3 num.partitions = 3 num.recovery.threads.per.data.dir = 5 num.replica.alter.log.dirs.threads = null num.replica.fetchers = 1 offset.metadata.max.bytes = 4096 offsets.commit.required.acks = -1 offsets.commit.timeout.ms = 5000 offsets.load.buffer.size = 5242880 offsets.retention.check.interval.ms = 600000 offsets.retention.minutes = 10080 offsets.topic.compression.codec = 0 offsets.topic.num.partitions = 50 offsets.topic.replication.factor = 3 offsets.topic.segment.bytes = 104857600 password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding password.encoder.iterations = 4096 password.encoder.key.length = 128 password.encoder.keyfactory.algorithm = null password.encoder.old.secret = null password.encoder.secret = null port = 9092 principal.builder.class = null producer.purgatory.purge.interval.requests = 1000 queued.max.request.bytes = -1 queued.max.requests = 500 quota.consumer.default = 9223372036854775807 quota.producer.default = 9223372036854775807 quota.window.num = 11 quota.window.size.seconds = 1 replica.fetch.backoff.ms = 1000 replica.fetch.max.bytes = 1048576 replica.fetch.min.bytes = 1 replica.fetch.response.max.bytes = 10485760 replica.fetch.wait.max.ms = 500 replica.high.watermark.checkpoint.interval.ms = 5000 replica.lag.time.max.ms = 10000 replica.socket.receive.buffer.bytes = 65536 replica.socket.timeout.ms = 30000 replication.quota.window.num = 11 replication.quota.window.size.seconds = 1 request.timeout.ms = 30000 reserved.broker.max.id = 1000 sasl.client.callback.handler.class = null sasl.enabled.mechanisms = [PLAIN] sasl.jaas.config = null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.principal.to.local.rules = [DEFAULT] sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.login.callback.handler.class = null sasl.login.class = null sasl.login.refresh.buffer.seconds = 300 sasl.login.refresh.min.period.seconds = 60 sasl.login.refresh.window.factor = 0.8 sasl.login.refresh.window.jitter = 0.05 sasl.mechanism.inter.broker.protocol = PLAIN sasl.server.callback.handler.class = null security.inter.broker.protocol = PLAINTEXT socket.receive.buffer.bytes = 102400 socket.request.max.bytes = 104857600 socket.send.buffer.bytes = 102400 ssl.cipher.suites = [] ssl.client.auth = none ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] ssl.endpoint.identification.algorithm = https ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.principal.mapping.rules = [DEFAULT] ssl.protocol = TLS ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000 transaction.max.timeout.ms = 900000 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 transaction.state.log.load.buffer.size = 5242880 transaction.state.log.min.isr = 1 transaction.state.log.num.partitions = 50 transaction.state.log.replication.factor = 1 transaction.state.log.segment.bytes = 104857600 transactional.id.expiration.ms = 604800000 unclean.leader.election.enable = false zookeeper.connect = onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 zookeeper.connection.timeout.ms = 6000 zookeeper.max.in.flight.requests = 10 zookeeper.session.timeout.ms = 6000 zookeeper.set.acl = true zookeeper.sync.time.ms = 2000 (kafka.server.KafkaConfig) [2021-10-15 11:01:30,437] INFO KafkaConfig values: advertised.host.name = null advertised.listeners = EXTERNAL_SASL_PLAINTEXT://10.253.0.233:30492,INTERNAL_SASL_PLAINTEXT://:9092 advertised.port = null alter.config.policy.class.name = null alter.log.dirs.replication.quota.window.num = 11 alter.log.dirs.replication.quota.window.size.seconds = 1 authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer auto.create.topics.enable = true auto.leader.rebalance.enable = true background.threads = 10 broker.id = 2 broker.id.generation.enable = true broker.rack = null client.quota.callback.class = null compression.type = producer connection.failed.authentication.delay.ms = 100 connections.max.idle.ms = 600000 connections.max.reauth.ms = 0 control.plane.listener.name = null controlled.shutdown.enable = true controlled.shutdown.max.retries = 3 controlled.shutdown.retry.backoff.ms = 5000 controller.socket.timeout.ms = 30000 create.topic.policy.class.name = null default.replication.factor = 3 delegation.token.expiry.check.interval.ms = 3600000 delegation.token.expiry.time.ms = 86400000 delegation.token.master.key = null delegation.token.max.lifetime.ms = 604800000 delete.records.purgatory.purge.interval.requests = 1 delete.topic.enable = true fetch.purgatory.purge.interval.requests = 1000 group.initial.rebalance.delay.ms = 3000 group.max.session.timeout.ms = 1800000 group.max.size = 2147483647 group.min.session.timeout.ms = 6000 host.name = inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT inter.broker.protocol.version = 2.3-IV1 kafka.metrics.polling.interval.secs = 10 kafka.metrics.reporters = [] leader.imbalance.check.interval.seconds = 300 leader.imbalance.per.broker.percentage = 10 listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092 log.cleaner.backoff.ms = 15000 log.cleaner.dedupe.buffer.size = 134217728 log.cleaner.delete.retention.ms = 86400000 log.cleaner.enable = true log.cleaner.io.buffer.load.factor = 0.9 log.cleaner.io.buffer.size = 524288 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 log.cleaner.max.compaction.lag.ms = 9223372036854775807 log.cleaner.min.cleanable.ratio = 0.5 log.cleaner.min.compaction.lag.ms = 0 log.cleaner.threads = 1 log.cleanup.policy = [delete] log.dir = /tmp/kafka-logs log.dirs = /var/lib/kafka/data log.flush.interval.messages = 9223372036854775807 log.flush.interval.ms = null log.flush.offset.checkpoint.interval.ms = 60000 log.flush.scheduler.interval.ms = 9223372036854775807 log.flush.start.offset.checkpoint.interval.ms = 60000 log.index.interval.bytes = 4096 log.index.size.max.bytes = 10485760 log.message.downconversion.enable = true log.message.format.version = 2.3-IV1 log.message.timestamp.difference.max.ms = 9223372036854775807 log.message.timestamp.type = CreateTime log.preallocate = false log.retention.bytes = -1 log.retention.check.interval.ms = 300000 log.retention.hours = 168 log.retention.minutes = null log.retention.ms = null log.roll.hours = 168 log.roll.jitter.hours = 0 log.roll.jitter.ms = null log.roll.ms = null log.segment.bytes = 1073741824 log.segment.delete.delay.ms = 60000 max.connections = 2147483647 max.connections.per.ip = 2147483647 max.connections.per.ip.overrides = max.incremental.fetch.session.cache.slots = 1000 message.max.bytes = 1000012 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 min.insync.replicas = 1 num.io.threads = 8 num.network.threads = 3 num.partitions = 3 num.recovery.threads.per.data.dir = 5 num.replica.alter.log.dirs.threads = null num.replica.fetchers = 1 offset.metadata.max.bytes = 4096 offsets.commit.required.acks = -1 offsets.commit.timeout.ms = 5000 offsets.load.buffer.size = 5242880 offsets.retention.check.interval.ms = 600000 offsets.retention.minutes = 10080 offsets.topic.compression.codec = 0 offsets.topic.num.partitions = 50 offsets.topic.replication.factor = 3 offsets.topic.segment.bytes = 104857600 password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding password.encoder.iterations = 4096 password.encoder.key.length = 128 password.encoder.keyfactory.algorithm = null password.encoder.old.secret = null password.encoder.secret = null port = 9092 principal.builder.class = null producer.purgatory.purge.interval.requests = 1000 queued.max.request.bytes = -1 queued.max.requests = 500 quota.consumer.default = 9223372036854775807 quota.producer.default = 9223372036854775807 quota.window.num = 11 quota.window.size.seconds = 1 replica.fetch.backoff.ms = 1000 replica.fetch.max.bytes = 1048576 replica.fetch.min.bytes = 1 replica.fetch.response.max.bytes = 10485760 replica.fetch.wait.max.ms = 500 replica.high.watermark.checkpoint.interval.ms = 5000 replica.lag.time.max.ms = 10000 replica.socket.receive.buffer.bytes = 65536 replica.socket.timeout.ms = 30000 replication.quota.window.num = 11 replication.quota.window.size.seconds = 1 request.timeout.ms = 30000 reserved.broker.max.id = 1000 sasl.client.callback.handler.class = null sasl.enabled.mechanisms = [PLAIN] sasl.jaas.config = null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.principal.to.local.rules = [DEFAULT] sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.login.callback.handler.class = null sasl.login.class = null sasl.login.refresh.buffer.seconds = 300 sasl.login.refresh.min.period.seconds = 60 sasl.login.refresh.window.factor = 0.8 sasl.login.refresh.window.jitter = 0.05 sasl.mechanism.inter.broker.protocol = PLAIN sasl.server.callback.handler.class = null security.inter.broker.protocol = PLAINTEXT socket.receive.buffer.bytes = 102400 socket.request.max.bytes = 104857600 socket.send.buffer.bytes = 102400 ssl.cipher.suites = [] ssl.client.auth = none ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] ssl.endpoint.identification.algorithm = https ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.principal.mapping.rules = [DEFAULT] ssl.protocol = TLS ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000 transaction.max.timeout.ms = 900000 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 transaction.state.log.load.buffer.size = 5242880 transaction.state.log.min.isr = 1 transaction.state.log.num.partitions = 50 transaction.state.log.replication.factor = 1 transaction.state.log.segment.bytes = 104857600 transactional.id.expiration.ms = 604800000 unclean.leader.election.enable = false zookeeper.connect = onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 zookeeper.connection.timeout.ms = 6000 zookeeper.max.in.flight.requests = 10 zookeeper.session.timeout.ms = 6000 zookeeper.set.acl = true zookeeper.sync.time.ms = 2000 (kafka.server.KafkaConfig) [2021-10-15 11:01:30,507] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2021-10-15 11:01:30,533] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2021-10-15 11:01:30,570] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2021-10-15 11:01:30,731] INFO Loading logs. (kafka.log.LogManager) [2021-10-15 11:01:30,797] INFO Logs loading complete in 65 ms. (kafka.log.LogManager) [2021-10-15 11:01:30,834] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager) [2021-10-15 11:01:30,858] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager) [2021-10-15 11:01:30,907] INFO Starting the log cleaner (kafka.log.LogCleaner) [2021-10-15 11:01:31,094] INFO [kafka-log-cleaner-thread-0]: Starting (kafka.log.LogCleaner) [2021-10-15 11:01:32,827] INFO Awaiting socket connections on 0.0.0.0:9091. (kafka.network.Acceptor) [2021-10-15 11:01:32,943] INFO Successfully logged in. (org.apache.kafka.common.security.authenticator.AbstractLogin) [2021-10-15 11:01:33,188] INFO [SocketServer brokerId=2] Created data-plane acceptor and processors for endpoint : EndPoint(0.0.0.0,9091,ListenerName(EXTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT) (kafka.network.SocketServer) [2021-10-15 11:01:33,189] INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.Acceptor) [2021-10-15 11:01:33,285] INFO [SocketServer brokerId=2] Created data-plane acceptor and processors for endpoint : EndPoint(0.0.0.0,9092,ListenerName(INTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT) (kafka.network.SocketServer) [2021-10-15 11:01:33,287] INFO [SocketServer brokerId=2] Started 2 acceptor threads for data-plane (kafka.network.SocketServer) [2021-10-15 11:01:33,431] INFO [ExpirationReaper-2-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2021-10-15 11:01:33,434] INFO [ExpirationReaper-2-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2021-10-15 11:01:33,455] INFO [ExpirationReaper-2-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2021-10-15 11:01:33,456] INFO [ExpirationReaper-2-ElectPreferredLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2021-10-15 11:01:33,670] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler) [2021-10-15 11:01:33,834] INFO Creating /brokers/ids/2 (is it secure? true) (kafka.zk.KafkaZkClient) [2021-10-15 11:01:33,941] INFO Stat of the created znode at /brokers/ids/2 is: 12884901948,12884901948,1634295693881,1634295693881,1,0,0,72058242194276353,366,0,12884901948 (kafka.zk.KafkaZkClient) [2021-10-15 11:01:33,942] INFO Registered broker 2 at path /brokers/ids/2 with addresses: ArrayBuffer(EndPoint(10.253.0.233,30492,ListenerName(EXTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT), EndPoint(onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local,9092,ListenerName(INTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT)), czxid (broker epoch): 12884901948 (kafka.zk.KafkaZkClient) [2021-10-15 11:01:33,944] WARN No meta.properties file under dir /var/lib/kafka/data/meta.properties (kafka.server.BrokerMetadataCheckpoint) [2021-10-15 11:01:34,147] INFO [ControllerEventThread controllerId=2] Starting (kafka.controller.ControllerEventManager$ControllerEventThread) [2021-10-15 11:01:34,256] INFO [ExpirationReaper-2-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2021-10-15 11:01:34,257] INFO [ExpirationReaper-2-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2021-10-15 11:01:34,258] INFO [ExpirationReaper-2-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2021-10-15 11:01:34,268] INFO [GroupCoordinator 2]: Starting up. (kafka.coordinator.group.GroupCoordinator) [2021-10-15 11:01:34,397] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 9 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:01:34,447] INFO [GroupCoordinator 2]: Startup complete. (kafka.coordinator.group.GroupCoordinator) [2021-10-15 11:01:34,485] DEBUG [Controller id=2] Broker 0 has been elected as the controller, so stopping the election process. (kafka.controller.KafkaController) [2021-10-15 11:01:34,633] INFO [ProducerId Manager 2]: Acquired new producerId block (brokerId:2,blockStartProducerId:10000,blockEndProducerId:10999) by writing to Zk with path version 11 (kafka.coordinator.transaction.ProducerIdManager) [2021-10-15 11:01:34,857] INFO [TransactionCoordinator id=2] Starting up. (kafka.coordinator.transaction.TransactionCoordinator) [2021-10-15 11:01:34,956] INFO [TransactionCoordinator id=2] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator) [2021-10-15 11:01:34,989] INFO [Transaction Marker Channel Manager 2]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager) [2021-10-15 11:01:35,313] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) [2021-10-15 11:01:35,492] INFO [SocketServer brokerId=2] Started data-plane processors for 2 acceptors (kafka.network.SocketServer) [2021-10-15 11:01:35,496] INFO Kafka version: 5.3.1-ccs (org.apache.kafka.common.utils.AppInfoParser) [2021-10-15 11:01:35,496] INFO Kafka commitId: 03799faf9878a999 (org.apache.kafka.common.utils.AppInfoParser) [2021-10-15 11:01:35,497] INFO Kafka startTimeMs: 1634295695493 (org.apache.kafka.common.utils.AppInfoParser) [2021-10-15 11:01:35,500] INFO [KafkaServer id=2] started (kafka.server.KafkaServer) 2021-10-15T11:03:04.744+0000 INIT [cadi] Loading CADI Properties from /opt/app/osaaf/local/org.onap.dmaap.mr.location.props 2021-10-15T11:03:04.746+0000 INIT [cadi] Loading CADI Properties from /opt/app/osaaf/local/org.onap.dmaap.mr.cred.props 2021-10-15T11:03:04.757+0000 INIT [cadi] cadi_keyfile points to /opt/app/osaaf/local/org.onap.dmaap.mr.keyfile 2021-10-15T11:03:05.272+0000 INIT [cadi] cadi_protocols is set to TLSv1.1,TLSv1.2 2021-10-15T11:03:05.749+0000 INIT [cadi] AAFLocator for https://aaf-locate.onap:8095/locate/%CNS.%AAF_NS.service:2.1 could not be created. java.net.URISyntaxException: Malformed escape pair at index 36: https://aaf-locate.onap:8095/locate/%CNS.%AAF_NS.service:2.1 2021-10-15T11:03:05.754+0000 ERROR [cadi] Null Locator passed [Ljava.lang.Object;@268e9f7f [2021-10-15 11:03:05,756] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:03:05,756] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) org.onap.aaf.cadi.LocatorException: Null Locator passed at org.onap.aaf.cadi.http.HMangr. (HMangr.java:53) at org.onap.aaf.cadi.aaf.v2_0.AAFConHttp. (AAFConHttp.java:54) at org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider.setup(Cadi3AAFProvider.java:141) at org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider. (Cadi3AAFProvider.java:111) at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:423) at java.lang.Class.newInstance(Class.java:442) at java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:380) at java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404) at java.util.ServiceLoader$1.next(ServiceLoader.java:480) at org.onap.dmaap.commonauth.kafka.base.authorization.AuthorizationProviderFactory. (AuthorizationProviderFactory.java:34) at org.onap.dmaap.commonauth.kafka.base.authorization.AuthorizationProviderFactory. (AuthorizationProviderFactory.java:29) at org.onap.dmaap.kafkaAuthorize.PlainSaslServer1.evaluateResponse(PlainSaslServer1.java:106) at org.apache.kafka.common.security.authenticator.SaslServerAuthenticator.handleSaslToken(SaslServerAuthenticator.java:451) at org.apache.kafka.common.security.authenticator.SaslServerAuthenticator.authenticate(SaslServerAuthenticator.java:291) at org.apache.kafka.common.network.KafkaChannel.prepare(KafkaChannel.java:173) at org.apache.kafka.common.network.Selector.pollSelectionKeys(Selector.java:547) at org.apache.kafka.common.network.Selector.poll(Selector.java:483) at kafka.network.Processor.poll(SocketServer.scala:863) at kafka.network.Processor.run(SocketServer.scala:762) at java.lang.Thread.run(Thread.java:748) [2021-10-15 11:11:34,277] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:14:33,539] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:14:33,540] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:16:20,456] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:16:20,464] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:16:20,483] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 0 epoch 8 for partition AAI-EVENT-1 (state.change.logger) [2021-10-15 11:16:20,483] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 0 epoch 8 for partition AAI-EVENT-0 (state.change.logger) [2021-10-15 11:16:20,483] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 0 epoch 8 for partition AAI-EVENT-2 (state.change.logger) [2021-10-15 11:16:20,502] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 0 epoch 8 starting the become-leader transition for partition AAI-EVENT-1 (state.change.logger) [2021-10-15 11:16:20,519] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(AAI-EVENT-1) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:16:20,881] INFO [Log partition=AAI-EVENT-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:20,916] INFO [Log partition=AAI-EVENT-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 321 ms (kafka.log.Log) [2021-10-15 11:16:20,921] INFO Created log for partition AAI-EVENT-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:20,953] INFO [Partition AAI-EVENT-1 broker=2] No checkpointed highwatermark is found for partition AAI-EVENT-1 (kafka.cluster.Partition) [2021-10-15 11:16:20,957] INFO Replica loaded for partition AAI-EVENT-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:20,959] INFO Replica loaded for partition AAI-EVENT-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:20,959] INFO Replica loaded for partition AAI-EVENT-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:20,962] INFO [Partition AAI-EVENT-1 broker=2] AAI-EVENT-1 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:21,026] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 4 for partition AAI-EVENT-1 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:21,028] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 0 epoch 8 for the become-leader transition for partition AAI-EVENT-1 (state.change.logger) [2021-10-15 11:16:21,029] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 0 epoch 8 starting the become-follower transition for partition AAI-EVENT-2 with leader 0 (state.change.logger) [2021-10-15 11:16:21,029] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 0 epoch 8 starting the become-follower transition for partition AAI-EVENT-0 with leader 1 (state.change.logger) [2021-10-15 11:16:21,036] INFO Replica loaded for partition AAI-EVENT-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:21,036] INFO Replica loaded for partition AAI-EVENT-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:21,147] INFO [Log partition=AAI-EVENT-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:21,275] INFO [Log partition=AAI-EVENT-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 232 ms (kafka.log.Log) [2021-10-15 11:16:21,276] INFO Created log for partition AAI-EVENT-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:21,277] INFO [Partition AAI-EVENT-2 broker=2] No checkpointed highwatermark is found for partition AAI-EVENT-2 (kafka.cluster.Partition) [2021-10-15 11:16:21,277] INFO Replica loaded for partition AAI-EVENT-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:21,279] INFO Replica loaded for partition AAI-EVENT-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:21,475] INFO [Log partition=AAI-EVENT-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:21,544] INFO [Log partition=AAI-EVENT-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 239 ms (kafka.log.Log) [2021-10-15 11:16:21,548] INFO Created log for partition AAI-EVENT-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:21,549] INFO [Partition AAI-EVENT-0 broker=2] No checkpointed highwatermark is found for partition AAI-EVENT-0 (kafka.cluster.Partition) [2021-10-15 11:16:21,549] INFO Replica loaded for partition AAI-EVENT-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:21,549] INFO Replica loaded for partition AAI-EVENT-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:21,552] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(AAI-EVENT-2, AAI-EVENT-0) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:16:21,558] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 4 for partition AAI-EVENT-0 with leader 1 (state.change.logger) [2021-10-15 11:16:21,573] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 4 for partition AAI-EVENT-2 with leader 0 (state.change.logger) [2021-10-15 11:16:21,588] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition AAI-EVENT-0 as part of become-follower request with correlation id 4 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:21,606] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition AAI-EVENT-2 as part of become-follower request with correlation id 4 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:21,844] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Starting (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:21,860] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(AAI-EVENT-0 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:16:21,888] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition AAI-EVENT-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:21,898] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(AAI-EVENT-2 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:16:21,900] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 4 for partition AAI-EVENT-0 with leader 1 (state.change.logger) [2021-10-15 11:16:21,900] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 4 for partition AAI-EVENT-2 with leader 0 (state.change.logger) [2021-10-15 11:16:21,901] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 0 epoch 8 for the become-follower transition for partition AAI-EVENT-2 with leader 0 (state.change.logger) [2021-10-15 11:16:21,902] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 0 epoch 8 for the become-follower transition for partition AAI-EVENT-0 with leader 1 (state.change.logger) [2021-10-15 11:16:21,893] INFO [Log partition=AAI-EVENT-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:21,941] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Starting (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:21,970] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition AAI-EVENT-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:21,972] INFO [Log partition=AAI-EVENT-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:21,986] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:16:21,986] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:16:22,257] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:16:22,305] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:16:22,452] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition AAI-EVENT-1 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 5 (state.change.logger) [2021-10-15 11:16:22,452] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition AAI-EVENT-0 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 5 (state.change.logger) [2021-10-15 11:16:22,452] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition AAI-EVENT-2 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 5 (state.change.logger) [2021-10-15 11:16:28,481] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:16:28,481] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:16:28,818] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-13 (state.change.logger) [2021-10-15 11:16:28,818] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-46 (state.change.logger) [2021-10-15 11:16:28,819] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-9 (state.change.logger) [2021-10-15 11:16:28,819] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-42 (state.change.logger) [2021-10-15 11:16:28,819] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-21 (state.change.logger) [2021-10-15 11:16:28,819] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-17 (state.change.logger) [2021-10-15 11:16:28,819] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-30 (state.change.logger) [2021-10-15 11:16:28,819] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-26 (state.change.logger) [2021-10-15 11:16:28,819] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-5 (state.change.logger) [2021-10-15 11:16:28,819] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-38 (state.change.logger) [2021-10-15 11:16:28,819] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-1 (state.change.logger) [2021-10-15 11:16:28,819] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-34 (state.change.logger) [2021-10-15 11:16:28,819] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-16 (state.change.logger) [2021-10-15 11:16:28,820] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-45 (state.change.logger) [2021-10-15 11:16:28,820] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-12 (state.change.logger) [2021-10-15 11:16:28,820] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-41 (state.change.logger) [2021-10-15 11:16:28,820] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-24 (state.change.logger) [2021-10-15 11:16:28,820] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-20 (state.change.logger) [2021-10-15 11:16:28,820] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-49 (state.change.logger) [2021-10-15 11:16:28,820] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-0 (state.change.logger) [2021-10-15 11:16:28,820] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-29 (state.change.logger) [2021-10-15 11:16:28,820] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-25 (state.change.logger) [2021-10-15 11:16:28,820] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-8 (state.change.logger) [2021-10-15 11:16:28,820] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-37 (state.change.logger) [2021-10-15 11:16:28,820] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-4 (state.change.logger) [2021-10-15 11:16:28,821] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-33 (state.change.logger) [2021-10-15 11:16:28,821] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-15 (state.change.logger) [2021-10-15 11:16:28,821] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-48 (state.change.logger) [2021-10-15 11:16:28,821] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-11 (state.change.logger) [2021-10-15 11:16:28,821] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-44 (state.change.logger) [2021-10-15 11:16:28,821] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-23 (state.change.logger) [2021-10-15 11:16:28,822] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-19 (state.change.logger) [2021-10-15 11:16:28,822] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-32 (state.change.logger) [2021-10-15 11:16:28,822] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-28 (state.change.logger) [2021-10-15 11:16:28,822] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-7 (state.change.logger) [2021-10-15 11:16:28,822] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-40 (state.change.logger) [2021-10-15 11:16:28,822] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-3 (state.change.logger) [2021-10-15 11:16:28,822] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-36 (state.change.logger) [2021-10-15 11:16:28,822] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-47 (state.change.logger) [2021-10-15 11:16:28,823] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-14 (state.change.logger) [2021-10-15 11:16:28,823] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-43 (state.change.logger) [2021-10-15 11:16:28,823] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-10 (state.change.logger) [2021-10-15 11:16:28,823] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-22 (state.change.logger) [2021-10-15 11:16:28,823] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-18 (state.change.logger) [2021-10-15 11:16:28,824] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-31 (state.change.logger) [2021-10-15 11:16:28,824] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-27 (state.change.logger) [2021-10-15 11:16:28,824] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-39 (state.change.logger) [2021-10-15 11:16:28,824] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-6 (state.change.logger) [2021-10-15 11:16:28,824] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-35 (state.change.logger) [2021-10-15 11:16:28,825] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 6 from controller 0 epoch 8 for partition __consumer_offsets-2 (state.change.logger) [2021-10-15 11:16:28,900] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-0 (state.change.logger) [2021-10-15 11:16:28,900] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-48 (state.change.logger) [2021-10-15 11:16:28,900] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-45 (state.change.logger) [2021-10-15 11:16:28,900] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-42 (state.change.logger) [2021-10-15 11:16:28,900] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-39 (state.change.logger) [2021-10-15 11:16:28,900] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-36 (state.change.logger) [2021-10-15 11:16:28,900] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-33 (state.change.logger) [2021-10-15 11:16:28,900] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-30 (state.change.logger) [2021-10-15 11:16:28,900] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-27 (state.change.logger) [2021-10-15 11:16:28,900] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-24 (state.change.logger) [2021-10-15 11:16:28,900] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-21 (state.change.logger) [2021-10-15 11:16:28,900] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-18 (state.change.logger) [2021-10-15 11:16:28,900] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-15 (state.change.logger) [2021-10-15 11:16:28,902] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-12 (state.change.logger) [2021-10-15 11:16:28,902] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-9 (state.change.logger) [2021-10-15 11:16:28,902] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-6 (state.change.logger) [2021-10-15 11:16:28,902] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-leader transition for partition __consumer_offsets-3 (state.change.logger) [2021-10-15 11:16:28,904] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(__consumer_offsets-30, __consumer_offsets-21, __consumer_offsets-27, __consumer_offsets-9, __consumer_offsets-33, __consumer_offsets-36, __consumer_offsets-42, __consumer_offsets-3, __consumer_offsets-18, __consumer_offsets-15, __consumer_offsets-24, __consumer_offsets-48, __consumer_offsets-6, __consumer_offsets-0, __consumer_offsets-39, __consumer_offsets-12, __consumer_offsets-45) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:16:28,990] INFO [Log partition=__consumer_offsets-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:28,996] INFO [Log partition=__consumer_offsets-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 38 ms (kafka.log.Log) [2021-10-15 11:16:28,997] INFO Created log for partition __consumer_offsets-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:29,010] INFO [Partition __consumer_offsets-0 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-0 (kafka.cluster.Partition) [2021-10-15 11:16:29,011] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,011] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,011] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,012] INFO [Partition __consumer_offsets-0 broker=2] __consumer_offsets-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:29,016] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-0 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:29,043] INFO [Log partition=__consumer_offsets-48, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:29,053] INFO [Log partition=__consumer_offsets-48, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 32 ms (kafka.log.Log) [2021-10-15 11:16:29,056] INFO Created log for partition __consumer_offsets-48 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:29,067] INFO [Partition __consumer_offsets-48 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-48 (kafka.cluster.Partition) [2021-10-15 11:16:29,067] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,067] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,067] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,068] INFO [Partition __consumer_offsets-48 broker=2] __consumer_offsets-48 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:29,079] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-48 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:29,129] INFO [Log partition=__consumer_offsets-45, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:29,138] INFO [Log partition=__consumer_offsets-45, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 54 ms (kafka.log.Log) [2021-10-15 11:16:29,139] INFO Created log for partition __consumer_offsets-45 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:29,140] INFO [Partition __consumer_offsets-45 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-45 (kafka.cluster.Partition) [2021-10-15 11:16:29,140] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,140] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,140] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,141] INFO [Partition __consumer_offsets-45 broker=2] __consumer_offsets-45 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:29,145] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-45 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:29,182] INFO [Log partition=__consumer_offsets-42, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:29,186] INFO [Log partition=__consumer_offsets-42, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 38 ms (kafka.log.Log) [2021-10-15 11:16:29,188] INFO Created log for partition __consumer_offsets-42 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:29,188] INFO [Partition __consumer_offsets-42 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-42 (kafka.cluster.Partition) [2021-10-15 11:16:29,189] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,189] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,189] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,189] INFO [Partition __consumer_offsets-42 broker=2] __consumer_offsets-42 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:29,197] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-42 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:29,249] INFO [Log partition=__consumer_offsets-39, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:29,280] INFO [Log partition=__consumer_offsets-39, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 76 ms (kafka.log.Log) [2021-10-15 11:16:29,281] INFO Created log for partition __consumer_offsets-39 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:29,282] INFO [Partition __consumer_offsets-39 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-39 (kafka.cluster.Partition) [2021-10-15 11:16:29,282] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,283] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,283] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,283] INFO [Partition __consumer_offsets-39 broker=2] __consumer_offsets-39 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:29,288] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-39 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:29,338] INFO [Log partition=__consumer_offsets-36, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:29,344] INFO [Log partition=__consumer_offsets-36, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 30 ms (kafka.log.Log) [2021-10-15 11:16:29,345] INFO Created log for partition __consumer_offsets-36 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:29,346] INFO [Partition __consumer_offsets-36 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-36 (kafka.cluster.Partition) [2021-10-15 11:16:29,346] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,346] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,346] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,346] INFO [Partition __consumer_offsets-36 broker=2] __consumer_offsets-36 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:29,352] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-36 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:29,423] INFO [Log partition=__consumer_offsets-33, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:29,431] INFO [Log partition=__consumer_offsets-33, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 64 ms (kafka.log.Log) [2021-10-15 11:16:29,434] INFO Created log for partition __consumer_offsets-33 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:29,435] INFO [Partition __consumer_offsets-33 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-33 (kafka.cluster.Partition) [2021-10-15 11:16:29,435] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,435] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,435] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,436] INFO [Partition __consumer_offsets-33 broker=2] __consumer_offsets-33 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:29,440] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-33 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:29,502] INFO [Log partition=__consumer_offsets-30, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:29,506] INFO [Log partition=__consumer_offsets-30, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 47 ms (kafka.log.Log) [2021-10-15 11:16:29,507] INFO Created log for partition __consumer_offsets-30 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:29,508] INFO [Partition __consumer_offsets-30 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-30 (kafka.cluster.Partition) [2021-10-15 11:16:29,509] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,509] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,509] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,509] INFO [Partition __consumer_offsets-30 broker=2] __consumer_offsets-30 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:29,523] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-30 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:29,563] INFO [Log partition=__consumer_offsets-27, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:29,570] INFO [Log partition=__consumer_offsets-27, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 42 ms (kafka.log.Log) [2021-10-15 11:16:29,571] INFO Created log for partition __consumer_offsets-27 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:29,572] INFO [Partition __consumer_offsets-27 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-27 (kafka.cluster.Partition) [2021-10-15 11:16:29,572] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,572] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,572] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,572] INFO [Partition __consumer_offsets-27 broker=2] __consumer_offsets-27 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:29,582] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-27 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:29,738] INFO [Log partition=__consumer_offsets-24, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:29,747] INFO [Log partition=__consumer_offsets-24, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 139 ms (kafka.log.Log) [2021-10-15 11:16:29,749] INFO Created log for partition __consumer_offsets-24 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:29,749] INFO [Partition __consumer_offsets-24 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-24 (kafka.cluster.Partition) [2021-10-15 11:16:29,749] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,749] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,749] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,750] INFO [Partition __consumer_offsets-24 broker=2] __consumer_offsets-24 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:29,764] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-24 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:29,838] INFO [Log partition=__consumer_offsets-21, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:29,850] INFO [Log partition=__consumer_offsets-21, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 77 ms (kafka.log.Log) [2021-10-15 11:16:29,855] INFO Created log for partition __consumer_offsets-21 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:29,856] INFO [Partition __consumer_offsets-21 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-21 (kafka.cluster.Partition) [2021-10-15 11:16:29,856] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,856] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,856] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,856] INFO [Partition __consumer_offsets-21 broker=2] __consumer_offsets-21 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:29,875] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-21 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:29,967] INFO [Log partition=__consumer_offsets-18, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:29,986] INFO [Log partition=__consumer_offsets-18, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 103 ms (kafka.log.Log) [2021-10-15 11:16:29,987] INFO Created log for partition __consumer_offsets-18 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:29,993] INFO [Partition __consumer_offsets-18 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-18 (kafka.cluster.Partition) [2021-10-15 11:16:29,993] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,993] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,994] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:29,994] INFO [Partition __consumer_offsets-18 broker=2] __consumer_offsets-18 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:30,010] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-18 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:30,042] INFO [Log partition=__consumer_offsets-15, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:30,056] INFO [Log partition=__consumer_offsets-15, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 41 ms (kafka.log.Log) [2021-10-15 11:16:30,057] INFO Created log for partition __consumer_offsets-15 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:30,063] INFO [Partition __consumer_offsets-15 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-15 (kafka.cluster.Partition) [2021-10-15 11:16:30,063] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,063] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,063] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,063] INFO [Partition __consumer_offsets-15 broker=2] __consumer_offsets-15 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:30,076] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-15 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:30,163] INFO [Log partition=__consumer_offsets-12, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:30,193] INFO [Log partition=__consumer_offsets-12, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 112 ms (kafka.log.Log) [2021-10-15 11:16:30,195] INFO Created log for partition __consumer_offsets-12 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:30,196] INFO [Partition __consumer_offsets-12 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-12 (kafka.cluster.Partition) [2021-10-15 11:16:30,196] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,196] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,196] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,197] INFO [Partition __consumer_offsets-12 broker=2] __consumer_offsets-12 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:30,209] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-12 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:30,256] INFO [Log partition=__consumer_offsets-9, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:30,260] INFO [Log partition=__consumer_offsets-9, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 47 ms (kafka.log.Log) [2021-10-15 11:16:30,262] INFO Created log for partition __consumer_offsets-9 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:30,263] INFO [Partition __consumer_offsets-9 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-9 (kafka.cluster.Partition) [2021-10-15 11:16:30,263] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,263] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,263] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,263] INFO [Partition __consumer_offsets-9 broker=2] __consumer_offsets-9 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:30,267] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-9 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:30,449] INFO [Log partition=__consumer_offsets-6, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:30,458] INFO [Log partition=__consumer_offsets-6, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 181 ms (kafka.log.Log) [2021-10-15 11:16:30,460] INFO Created log for partition __consumer_offsets-6 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:30,463] INFO [Partition __consumer_offsets-6 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-6 (kafka.cluster.Partition) [2021-10-15 11:16:30,463] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,464] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,464] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,464] INFO [Partition __consumer_offsets-6 broker=2] __consumer_offsets-6 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:30,478] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-6 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:30,543] INFO [Log partition=__consumer_offsets-3, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:30,585] INFO [Log partition=__consumer_offsets-3, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 100 ms (kafka.log.Log) [2021-10-15 11:16:30,587] INFO Created log for partition __consumer_offsets-3 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:30,590] INFO [Partition __consumer_offsets-3 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-3 (kafka.cluster.Partition) [2021-10-15 11:16:30,591] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,591] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,591] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,591] INFO [Partition __consumer_offsets-3 broker=2] __consumer_offsets-3 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:30,609] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-3 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:30,609] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-0 (state.change.logger) [2021-10-15 11:16:30,609] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-48 (state.change.logger) [2021-10-15 11:16:30,609] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-45 (state.change.logger) [2021-10-15 11:16:30,609] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-42 (state.change.logger) [2021-10-15 11:16:30,609] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-39 (state.change.logger) [2021-10-15 11:16:30,609] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-36 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-33 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-30 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-27 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-24 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-21 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-18 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-15 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-12 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-9 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-6 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-leader transition for partition __consumer_offsets-3 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-29 with leader 1 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-10 with leader 0 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-26 with leader 1 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-7 with leader 0 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-4 with leader 0 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-23 with leader 1 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-1 with leader 0 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-20 with leader 1 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-17 with leader 1 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-14 with leader 1 (state.change.logger) [2021-10-15 11:16:30,610] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-49 with leader 0 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-11 with leader 1 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-46 with leader 0 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-8 with leader 1 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-43 with leader 0 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-5 with leader 1 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-2 with leader 1 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-40 with leader 0 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-37 with leader 0 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-34 with leader 0 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-31 with leader 0 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-47 with leader 1 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-19 with leader 0 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-28 with leader 0 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-38 with leader 1 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-35 with leader 1 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-44 with leader 1 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-25 with leader 0 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-16 with leader 0 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-22 with leader 0 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-41 with leader 1 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-32 with leader 1 (state.change.logger) [2021-10-15 11:16:30,611] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-13 with leader 0 (state.change.logger) [2021-10-15 11:16:30,612] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,699] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:30,712] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 86 ms (kafka.log.Log) [2021-10-15 11:16:30,714] INFO Created log for partition __consumer_offsets-29 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:30,714] INFO [Partition __consumer_offsets-29 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-29 (kafka.cluster.Partition) [2021-10-15 11:16:30,715] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,715] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,715] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,715] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,774] INFO [Log partition=__consumer_offsets-10, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:30,782] INFO [Log partition=__consumer_offsets-10, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 61 ms (kafka.log.Log) [2021-10-15 11:16:30,783] INFO Created log for partition __consumer_offsets-10 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:30,784] INFO [Partition __consumer_offsets-10 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-10 (kafka.cluster.Partition) [2021-10-15 11:16:30,784] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,784] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,784] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,877] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:30,902] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 107 ms (kafka.log.Log) [2021-10-15 11:16:30,903] INFO Created log for partition __consumer_offsets-26 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:30,904] INFO [Partition __consumer_offsets-26 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-26 (kafka.cluster.Partition) [2021-10-15 11:16:30,904] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,904] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:30,967] INFO [Log partition=__consumer_offsets-7, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:31,007] INFO [Log partition=__consumer_offsets-7, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 96 ms (kafka.log.Log) [2021-10-15 11:16:31,009] INFO Created log for partition __consumer_offsets-7 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:31,010] INFO [Partition __consumer_offsets-7 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-7 (kafka.cluster.Partition) [2021-10-15 11:16:31,010] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,010] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,010] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,010] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,083] INFO [Log partition=__consumer_offsets-4, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:31,133] INFO [Log partition=__consumer_offsets-4, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 110 ms (kafka.log.Log) [2021-10-15 11:16:31,134] INFO Created log for partition __consumer_offsets-4 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:31,135] INFO [Partition __consumer_offsets-4 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-4 (kafka.cluster.Partition) [2021-10-15 11:16:31,135] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,136] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,475] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:31,514] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 334 ms (kafka.log.Log) [2021-10-15 11:16:31,515] INFO Created log for partition __consumer_offsets-23 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:31,516] INFO [Partition __consumer_offsets-23 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-23 (kafka.cluster.Partition) [2021-10-15 11:16:31,516] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,516] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,516] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,553] INFO [Log partition=__consumer_offsets-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:31,562] INFO [Log partition=__consumer_offsets-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 31 ms (kafka.log.Log) [2021-10-15 11:16:31,564] INFO Created log for partition __consumer_offsets-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:31,565] INFO [Partition __consumer_offsets-1 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-1 (kafka.cluster.Partition) [2021-10-15 11:16:31,565] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,565] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,565] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,565] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,625] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:31,628] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 48 ms (kafka.log.Log) [2021-10-15 11:16:31,629] INFO Created log for partition __consumer_offsets-20 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:31,630] INFO [Partition __consumer_offsets-20 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-20 (kafka.cluster.Partition) [2021-10-15 11:16:31,630] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,631] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,824] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:31,843] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 200 ms (kafka.log.Log) [2021-10-15 11:16:31,844] INFO Created log for partition __consumer_offsets-17 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:31,845] INFO [Partition __consumer_offsets-17 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-17 (kafka.cluster.Partition) [2021-10-15 11:16:31,845] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,845] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,845] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:31,845] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:32,207] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:32,237] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 346 ms (kafka.log.Log) [2021-10-15 11:16:32,238] INFO Created log for partition __consumer_offsets-14 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:32,242] INFO [Partition __consumer_offsets-14 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-14 (kafka.cluster.Partition) [2021-10-15 11:16:32,242] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:32,243] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:32,323] INFO [Log partition=__consumer_offsets-49, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:32,442] INFO [Log partition=__consumer_offsets-49, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 191 ms (kafka.log.Log) [2021-10-15 11:16:32,443] INFO Created log for partition __consumer_offsets-49 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:32,455] INFO [Partition __consumer_offsets-49 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-49 (kafka.cluster.Partition) [2021-10-15 11:16:32,455] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:32,456] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:32,456] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:32,541] INFO [Log partition=__consumer_offsets-11, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:32,609] INFO [Log partition=__consumer_offsets-11, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 147 ms (kafka.log.Log) [2021-10-15 11:16:32,610] INFO Created log for partition __consumer_offsets-11 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:32,620] INFO [Partition __consumer_offsets-11 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-11 (kafka.cluster.Partition) [2021-10-15 11:16:32,620] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:32,620] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:32,621] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:32,621] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:32,731] INFO [Log partition=__consumer_offsets-46, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:32,777] INFO [Log partition=__consumer_offsets-46, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 142 ms (kafka.log.Log) [2021-10-15 11:16:32,779] INFO Created log for partition __consumer_offsets-46 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:32,781] INFO [Partition __consumer_offsets-46 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-46 (kafka.cluster.Partition) [2021-10-15 11:16:32,781] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:32,781] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:32,781] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:32,889] INFO [Log partition=__consumer_offsets-8, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:32,899] INFO [Log partition=__consumer_offsets-8, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 112 ms (kafka.log.Log) [2021-10-15 11:16:32,900] INFO Created log for partition __consumer_offsets-8 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:32,901] INFO [Partition __consumer_offsets-8 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-8 (kafka.cluster.Partition) [2021-10-15 11:16:32,901] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:32,902] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,011] INFO [Log partition=__consumer_offsets-43, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:33,019] INFO [Log partition=__consumer_offsets-43, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 108 ms (kafka.log.Log) [2021-10-15 11:16:33,021] INFO Created log for partition __consumer_offsets-43 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:33,026] INFO [Partition __consumer_offsets-43 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-43 (kafka.cluster.Partition) [2021-10-15 11:16:33,026] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,026] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,027] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,077] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:33,082] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 36 ms (kafka.log.Log) [2021-10-15 11:16:33,084] INFO Created log for partition __consumer_offsets-5 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:33,085] INFO [Partition __consumer_offsets-5 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-5 (kafka.cluster.Partition) [2021-10-15 11:16:33,085] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,085] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,086] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,086] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,145] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:33,167] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 76 ms (kafka.log.Log) [2021-10-15 11:16:33,168] INFO Created log for partition __consumer_offsets-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:33,169] INFO [Partition __consumer_offsets-2 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-2 (kafka.cluster.Partition) [2021-10-15 11:16:33,174] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,179] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,179] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,241] INFO [Log partition=__consumer_offsets-40, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:33,248] INFO [Log partition=__consumer_offsets-40, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 41 ms (kafka.log.Log) [2021-10-15 11:16:33,249] INFO Created log for partition __consumer_offsets-40 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:33,250] INFO [Partition __consumer_offsets-40 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-40 (kafka.cluster.Partition) [2021-10-15 11:16:33,250] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,251] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,277] INFO [Log partition=__consumer_offsets-37, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:33,285] INFO [Log partition=__consumer_offsets-37, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 30 ms (kafka.log.Log) [2021-10-15 11:16:33,286] INFO Created log for partition __consumer_offsets-37 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:33,287] INFO [Partition __consumer_offsets-37 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-37 (kafka.cluster.Partition) [2021-10-15 11:16:33,287] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,288] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,288] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,288] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,327] INFO [Log partition=__consumer_offsets-34, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:33,333] INFO [Log partition=__consumer_offsets-34, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 42 ms (kafka.log.Log) [2021-10-15 11:16:33,334] INFO Created log for partition __consumer_offsets-34 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:33,335] INFO [Partition __consumer_offsets-34 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-34 (kafka.cluster.Partition) [2021-10-15 11:16:33,335] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,336] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,428] INFO [Log partition=__consumer_offsets-31, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:33,436] INFO [Log partition=__consumer_offsets-31, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 81 ms (kafka.log.Log) [2021-10-15 11:16:33,437] INFO Created log for partition __consumer_offsets-31 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:33,438] INFO [Partition __consumer_offsets-31 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-31 (kafka.cluster.Partition) [2021-10-15 11:16:33,438] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,438] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,439] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,510] INFO [Log partition=__consumer_offsets-47, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:33,548] INFO [Log partition=__consumer_offsets-47, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 105 ms (kafka.log.Log) [2021-10-15 11:16:33,549] INFO Created log for partition __consumer_offsets-47 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:33,551] INFO [Partition __consumer_offsets-47 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-47 (kafka.cluster.Partition) [2021-10-15 11:16:33,551] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,551] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,551] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,637] INFO [Log partition=__consumer_offsets-19, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:33,646] INFO [Log partition=__consumer_offsets-19, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 75 ms (kafka.log.Log) [2021-10-15 11:16:33,647] INFO Created log for partition __consumer_offsets-19 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:33,648] INFO [Partition __consumer_offsets-19 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-19 (kafka.cluster.Partition) [2021-10-15 11:16:33,648] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,648] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,648] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,648] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,697] INFO [Log partition=__consumer_offsets-28, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:33,704] INFO [Log partition=__consumer_offsets-28, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 49 ms (kafka.log.Log) [2021-10-15 11:16:33,705] INFO Created log for partition __consumer_offsets-28 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:33,706] INFO [Partition __consumer_offsets-28 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-28 (kafka.cluster.Partition) [2021-10-15 11:16:33,706] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,707] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,707] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,748] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:33,798] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 82 ms (kafka.log.Log) [2021-10-15 11:16:33,799] INFO Created log for partition __consumer_offsets-38 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:33,800] INFO [Partition __consumer_offsets-38 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-38 (kafka.cluster.Partition) [2021-10-15 11:16:33,800] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,801] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,852] INFO [Log partition=__consumer_offsets-35, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:33,859] INFO [Log partition=__consumer_offsets-35, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 52 ms (kafka.log.Log) [2021-10-15 11:16:33,860] INFO Created log for partition __consumer_offsets-35 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:33,861] INFO [Partition __consumer_offsets-35 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-35 (kafka.cluster.Partition) [2021-10-15 11:16:33,861] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,861] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,861] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,861] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,918] INFO [Log partition=__consumer_offsets-44, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:33,939] INFO [Log partition=__consumer_offsets-44, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 70 ms (kafka.log.Log) [2021-10-15 11:16:33,940] INFO Created log for partition __consumer_offsets-44 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:33,942] INFO [Partition __consumer_offsets-44 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-44 (kafka.cluster.Partition) [2021-10-15 11:16:33,942] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,943] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:33,991] INFO [Log partition=__consumer_offsets-25, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:34,004] INFO [Log partition=__consumer_offsets-25, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 58 ms (kafka.log.Log) [2021-10-15 11:16:34,005] INFO Created log for partition __consumer_offsets-25 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:34,006] INFO [Partition __consumer_offsets-25 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-25 (kafka.cluster.Partition) [2021-10-15 11:16:34,006] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,006] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,007] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,007] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,041] INFO [Log partition=__consumer_offsets-16, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:34,047] INFO [Log partition=__consumer_offsets-16, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 35 ms (kafka.log.Log) [2021-10-15 11:16:34,048] INFO Created log for partition __consumer_offsets-16 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:34,054] INFO [Partition __consumer_offsets-16 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-16 (kafka.cluster.Partition) [2021-10-15 11:16:34,054] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,054] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,054] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,193] INFO [Log partition=__consumer_offsets-22, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:34,232] INFO [Log partition=__consumer_offsets-22, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 163 ms (kafka.log.Log) [2021-10-15 11:16:34,233] INFO Created log for partition __consumer_offsets-22 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:34,234] INFO [Partition __consumer_offsets-22 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-22 (kafka.cluster.Partition) [2021-10-15 11:16:34,235] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,235] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,399] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:34,418] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 166 ms (kafka.log.Log) [2021-10-15 11:16:34,419] INFO Created log for partition __consumer_offsets-41 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:34,420] INFO [Partition __consumer_offsets-41 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-41 (kafka.cluster.Partition) [2021-10-15 11:16:34,420] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,420] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,420] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,426] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,476] INFO [Log partition=__consumer_offsets-32, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:34,480] INFO [Log partition=__consumer_offsets-32, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 48 ms (kafka.log.Log) [2021-10-15 11:16:34,481] INFO Created log for partition __consumer_offsets-32 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:34,482] INFO [Partition __consumer_offsets-32 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-32 (kafka.cluster.Partition) [2021-10-15 11:16:34,482] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,482] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,509] INFO [Log partition=__consumer_offsets-13, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:34,513] INFO [Log partition=__consumer_offsets-13, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 26 ms (kafka.log.Log) [2021-10-15 11:16:34,515] INFO Created log for partition __consumer_offsets-13 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:34,515] INFO [Partition __consumer_offsets-13 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-13 (kafka.cluster.Partition) [2021-10-15 11:16:34,515] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,515] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:34,516] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(__consumer_offsets-28, __consumer_offsets-10, __consumer_offsets-32, __consumer_offsets-14, __consumer_offsets-40, __consumer_offsets-37, __consumer_offsets-22, __consumer_offsets-41, __consumer_offsets-4, __consumer_offsets-23, __consumer_offsets-26, __consumer_offsets-8, __consumer_offsets-49, __consumer_offsets-31, __consumer_offsets-13, __consumer_offsets-35, __consumer_offsets-17, __consumer_offsets-43, __consumer_offsets-25, __consumer_offsets-44, __consumer_offsets-47, __consumer_offsets-7, __consumer_offsets-29, __consumer_offsets-11, __consumer_offsets-34, __consumer_offsets-19, __consumer_offsets-16, __consumer_offsets-38, __consumer_offsets-1, __consumer_offsets-20, __consumer_offsets-5, __consumer_offsets-46, __consumer_offsets-2) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:16:34,516] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-22 with leader 0 (state.change.logger) [2021-10-15 11:16:34,516] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-25 with leader 0 (state.change.logger) [2021-10-15 11:16:34,516] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-28 with leader 0 (state.change.logger) [2021-10-15 11:16:34,516] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-31 with leader 0 (state.change.logger) [2021-10-15 11:16:34,516] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-34 with leader 0 (state.change.logger) [2021-10-15 11:16:34,516] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-37 with leader 0 (state.change.logger) [2021-10-15 11:16:34,516] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-40 with leader 0 (state.change.logger) [2021-10-15 11:16:34,516] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-43 with leader 0 (state.change.logger) [2021-10-15 11:16:34,516] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-46 with leader 0 (state.change.logger) [2021-10-15 11:16:34,516] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-49 with leader 0 (state.change.logger) [2021-10-15 11:16:34,516] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-41 with leader 1 (state.change.logger) [2021-10-15 11:16:34,516] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-44 with leader 1 (state.change.logger) [2021-10-15 11:16:34,516] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-47 with leader 1 (state.change.logger) [2021-10-15 11:16:34,516] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-1 with leader 0 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-4 with leader 0 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-7 with leader 0 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-10 with leader 0 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-13 with leader 0 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-16 with leader 0 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-19 with leader 0 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-2 with leader 1 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-5 with leader 1 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-11 with leader 1 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-14 with leader 1 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-17 with leader 1 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-20 with leader 1 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-23 with leader 1 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-26 with leader 1 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-29 with leader 1 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-32 with leader 1 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-35 with leader 1 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-38 with leader 1 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-22 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-25 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-28 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-31 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-34 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-37 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-40 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-43 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,517] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-46 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-49 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-41 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-44 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-47 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-1 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-4 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-7 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-10 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-13 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-16 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-19 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-2 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-5 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-8 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-11 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-14 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-17 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-20 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-23 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-26 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-29 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-32 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-35 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,518] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-38 as part of become-follower request with correlation id 6 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,536] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(__consumer_offsets-8 -> (offset=0, leaderEpoch=0), __consumer_offsets-35 -> (offset=0, leaderEpoch=0), __consumer_offsets-41 -> (offset=0, leaderEpoch=0), __consumer_offsets-23 -> (offset=0, leaderEpoch=0), __consumer_offsets-47 -> (offset=0, leaderEpoch=0), __consumer_offsets-38 -> (offset=0, leaderEpoch=0), __consumer_offsets-17 -> (offset=0, leaderEpoch=0), __consumer_offsets-11 -> (offset=0, leaderEpoch=0), __consumer_offsets-2 -> (offset=0, leaderEpoch=0), __consumer_offsets-14 -> (offset=0, leaderEpoch=0), __consumer_offsets-20 -> (offset=0, leaderEpoch=0), __consumer_offsets-44 -> (offset=0, leaderEpoch=0), __consumer_offsets-5 -> (offset=0, leaderEpoch=0), __consumer_offsets-26 -> (offset=0, leaderEpoch=0), __consumer_offsets-29 -> (offset=0, leaderEpoch=0), __consumer_offsets-32 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:16:34,536] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(__consumer_offsets-22 -> (offset=0, leaderEpoch=0), __consumer_offsets-4 -> (offset=0, leaderEpoch=0), __consumer_offsets-7 -> (offset=0, leaderEpoch=0), __consumer_offsets-46 -> (offset=0, leaderEpoch=0), __consumer_offsets-25 -> (offset=0, leaderEpoch=0), __consumer_offsets-49 -> (offset=0, leaderEpoch=0), __consumer_offsets-16 -> (offset=0, leaderEpoch=0), __consumer_offsets-28 -> (offset=0, leaderEpoch=0), __consumer_offsets-31 -> (offset=0, leaderEpoch=0), __consumer_offsets-37 -> (offset=0, leaderEpoch=0), __consumer_offsets-19 -> (offset=0, leaderEpoch=0), __consumer_offsets-13 -> (offset=0, leaderEpoch=0), __consumer_offsets-43 -> (offset=0, leaderEpoch=0), __consumer_offsets-1 -> (offset=0, leaderEpoch=0), __consumer_offsets-34 -> (offset=0, leaderEpoch=0), __consumer_offsets-10 -> (offset=0, leaderEpoch=0), __consumer_offsets-40 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:16:34,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-22 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-25 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-28 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-31 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-34 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-37 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-40 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-43 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-46 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-49 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-41 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-44 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-47 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-1 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-4 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-7 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-10 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-13 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-16 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-19 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-2 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-5 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-11 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-14 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-17 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-20 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-23 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-26 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-29 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-32 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-35 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 6 for partition __consumer_offsets-38 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-29 with leader 1 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-10 with leader 0 (state.change.logger) [2021-10-15 11:16:34,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-26 with leader 1 (state.change.logger) [2021-10-15 11:16:34,538] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-7 with leader 0 (state.change.logger) [2021-10-15 11:16:34,538] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-4 with leader 0 (state.change.logger) [2021-10-15 11:16:34,538] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-23 with leader 1 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-1 with leader 0 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-20 with leader 1 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-17 with leader 1 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-14 with leader 1 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-49 with leader 0 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-11 with leader 1 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-46 with leader 0 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-8 with leader 1 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-43 with leader 0 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-5 with leader 1 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-2 with leader 1 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-40 with leader 0 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-37 with leader 0 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-34 with leader 0 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-31 with leader 0 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-47 with leader 1 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-19 with leader 0 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-28 with leader 0 (state.change.logger) [2021-10-15 11:16:34,542] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-38 with leader 1 (state.change.logger) [2021-10-15 11:16:34,543] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-35 with leader 1 (state.change.logger) [2021-10-15 11:16:34,543] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-44 with leader 1 (state.change.logger) [2021-10-15 11:16:34,543] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-25 with leader 0 (state.change.logger) [2021-10-15 11:16:34,543] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-16 with leader 0 (state.change.logger) [2021-10-15 11:16:34,543] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-22 with leader 0 (state.change.logger) [2021-10-15 11:16:34,543] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-41 with leader 1 (state.change.logger) [2021-10-15 11:16:34,543] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-32 with leader 1 (state.change.logger) [2021-10-15 11:16:34,543] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-13 with leader 0 (state.change.logger) [2021-10-15 11:16:34,546] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-0 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,553] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-3 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,553] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-6 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,553] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-9 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,553] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-12 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,554] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-15 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,554] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-18 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,554] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-21 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,554] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-24 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,554] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-27 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,554] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-30 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,554] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-33 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,554] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-36 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,554] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-39 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,554] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-42 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,555] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-45 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,555] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-48 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-22 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-25 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-28 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-31 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-34 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-37 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-40 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-43 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-46 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-49 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-41 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-44 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-47 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-1 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-4 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-7 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,556] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-10 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,557] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-13 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,557] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-16 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,557] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-19 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,557] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-2 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,557] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-5 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,557] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-8 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,557] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-11 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,557] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-14 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,557] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-17 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,557] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-20 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,557] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-23 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,557] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-26 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,557] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-29 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,557] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-32 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,557] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-35 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,557] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-38 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,563] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-13 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,564] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-46 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,564] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-9 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,564] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-42 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,564] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-21 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,564] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-17 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,564] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-30 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,564] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-26 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,564] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-5 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,564] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-38 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-1 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-34 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-16 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-45 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-12 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-41 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-24 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-20 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-49 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-0 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-29 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-25 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-8 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-37 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-4 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-33 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,565] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-15 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,566] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-48 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,566] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-11 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,566] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-44 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,566] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-23 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,566] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-19 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,566] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-32 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,566] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-28 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,566] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-7 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,566] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-40 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,566] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-3 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,566] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-36 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,566] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-47 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,566] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-14 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,567] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-43 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,567] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-0 in 14 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,567] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-10 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,567] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-22 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,567] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-18 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,567] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-31 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,567] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-27 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,567] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-39 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,567] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-6 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,567] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-35 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,567] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-2 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 7 (state.change.logger) [2021-10-15 11:16:34,568] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-3 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,568] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-6 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,568] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-9 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,568] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-12 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,568] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-15 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,568] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-18 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,568] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-21 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,569] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-24 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,569] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-27 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,569] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-30 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,579] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-33 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,579] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-36 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,579] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-39 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,580] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-42 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,580] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-45 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,580] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-48 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-22. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-25. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-28. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-31. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-34. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-37. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-40. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-43. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-46. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-49. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-41. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-44. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-47. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-1. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-4. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-7. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-10. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-13. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,585] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-16. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,586] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-19. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,586] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-2. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,586] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-5. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,586] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-8. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,586] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-11. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,586] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-14. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,586] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-17. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,586] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-20. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,586] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-23. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,586] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-26. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,586] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-29. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,586] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-32. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,586] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-35. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,586] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-38. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:16:34,675] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:16:34,675] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:16:34,721] INFO [GroupCoordinator 2]: Preparing to rebalance group NBICG1--AAI-EVENT in state PreparingRebalance with old generation 0 (__consumer_offsets-6) (reason: Adding new member NBIC1-5c719e38-5597-4b4f-bc76-2ea688b51b4d with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-10-15 11:16:34,822] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-28 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,822] INFO [Log partition=__consumer_offsets-28, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:34,822] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-43 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,822] INFO [Log partition=__consumer_offsets-43, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:34,822] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-10 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,822] INFO [Log partition=__consumer_offsets-10, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:34,822] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-25 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,822] INFO [Log partition=__consumer_offsets-25, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:34,823] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-7 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,823] INFO [Log partition=__consumer_offsets-7, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:34,823] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-37 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,823] INFO [Log partition=__consumer_offsets-37, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:34,823] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-40 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,823] INFO [Log partition=__consumer_offsets-40, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:34,823] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-22 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,823] INFO [Log partition=__consumer_offsets-22, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:34,823] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-4 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,823] INFO [Log partition=__consumer_offsets-4, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:34,823] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-34 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,823] INFO [Log partition=__consumer_offsets-34, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:34,823] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-19 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,823] INFO [Log partition=__consumer_offsets-19, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:34,823] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-49 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,823] INFO [Log partition=__consumer_offsets-49, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:34,823] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-16 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,823] INFO [Log partition=__consumer_offsets-16, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:34,823] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-1 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,823] INFO [Log partition=__consumer_offsets-1, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:34,823] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-31 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,824] INFO [Log partition=__consumer_offsets-31, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:34,824] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-46 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,824] INFO [Log partition=__consumer_offsets-46, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:34,824] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-13 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:34,824] INFO [Log partition=__consumer_offsets-13, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:35,019] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-17 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:35,019] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:35,019] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-32 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:35,019] INFO [Log partition=__consumer_offsets-32, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:35,019] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-47 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:35,019] INFO [Log partition=__consumer_offsets-47, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:35,019] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-14 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:35,020] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:35,020] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-44 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:35,020] INFO [Log partition=__consumer_offsets-44, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:35,020] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-29 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:35,020] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:35,020] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-11 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:35,020] INFO [Log partition=__consumer_offsets-11, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:35,020] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-41 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:35,020] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:35,020] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-26 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:35,020] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:35,020] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-23 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:35,020] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:35,020] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-8 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:35,020] INFO [Log partition=__consumer_offsets-8, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:35,020] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-38 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:35,020] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:35,020] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-20 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:35,020] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:35,020] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-5 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:35,020] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:35,020] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-35 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:35,020] INFO [Log partition=__consumer_offsets-35, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:35,020] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:35,021] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:37,735] INFO [GroupCoordinator 2]: Stabilized group NBICG1--AAI-EVENT generation 1 (__consumer_offsets-6) (kafka.coordinator.group.GroupCoordinator) [2021-10-15 11:16:37,752] INFO [GroupCoordinator 2]: Assignment received from leader for group NBICG1--AAI-EVENT for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-10-15 11:16:50,742] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 8 from controller 0 epoch 8 for partition POLICY-PDP-PAP-1 (state.change.logger) [2021-10-15 11:16:50,743] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 8 from controller 0 epoch 8 for partition POLICY-PDP-PAP-2 (state.change.logger) [2021-10-15 11:16:50,743] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 8 from controller 0 epoch 8 for partition POLICY-PDP-PAP-0 (state.change.logger) [2021-10-15 11:16:50,745] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 8 from controller 0 epoch 8 starting the become-leader transition for partition POLICY-PDP-PAP-0 (state.change.logger) [2021-10-15 11:16:50,745] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(POLICY-PDP-PAP-0) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:16:50,812] INFO [Log partition=POLICY-PDP-PAP-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:50,837] INFO [Log partition=POLICY-PDP-PAP-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 87 ms (kafka.log.Log) [2021-10-15 11:16:50,839] INFO Created log for partition POLICY-PDP-PAP-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:50,860] INFO [Partition POLICY-PDP-PAP-0 broker=2] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-0 (kafka.cluster.Partition) [2021-10-15 11:16:50,861] INFO Replica loaded for partition POLICY-PDP-PAP-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:50,861] INFO Replica loaded for partition POLICY-PDP-PAP-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:50,861] INFO Replica loaded for partition POLICY-PDP-PAP-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:50,861] INFO [Partition POLICY-PDP-PAP-0 broker=2] POLICY-PDP-PAP-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:16:50,909] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 8 for partition POLICY-PDP-PAP-0 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:16:50,909] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 8 from controller 0 epoch 8 for the become-leader transition for partition POLICY-PDP-PAP-0 (state.change.logger) [2021-10-15 11:16:50,909] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 8 from controller 0 epoch 8 starting the become-follower transition for partition POLICY-PDP-PAP-1 with leader 0 (state.change.logger) [2021-10-15 11:16:50,910] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 8 from controller 0 epoch 8 starting the become-follower transition for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger) [2021-10-15 11:16:50,910] INFO Replica loaded for partition POLICY-PDP-PAP-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:50,982] INFO [Log partition=POLICY-PDP-PAP-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:51,039] INFO [Log partition=POLICY-PDP-PAP-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 109 ms (kafka.log.Log) [2021-10-15 11:16:51,041] INFO Created log for partition POLICY-PDP-PAP-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:51,044] INFO [Partition POLICY-PDP-PAP-1 broker=2] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-1 (kafka.cluster.Partition) [2021-10-15 11:16:51,044] INFO Replica loaded for partition POLICY-PDP-PAP-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:51,044] INFO Replica loaded for partition POLICY-PDP-PAP-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:51,045] INFO Replica loaded for partition POLICY-PDP-PAP-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:51,045] INFO Replica loaded for partition POLICY-PDP-PAP-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:51,069] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:16:51,075] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 25 ms (kafka.log.Log) [2021-10-15 11:16:51,077] INFO Created log for partition POLICY-PDP-PAP-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:16:51,078] INFO [Partition POLICY-PDP-PAP-2 broker=2] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-2 (kafka.cluster.Partition) [2021-10-15 11:16:51,078] INFO Replica loaded for partition POLICY-PDP-PAP-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:16:51,079] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(POLICY-PDP-PAP-2, POLICY-PDP-PAP-1) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:16:51,079] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 8 for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger) [2021-10-15 11:16:51,079] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 8 for partition POLICY-PDP-PAP-1 with leader 0 (state.change.logger) [2021-10-15 11:16:51,079] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition POLICY-PDP-PAP-2 as part of become-follower request with correlation id 8 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:16:51,080] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition POLICY-PDP-PAP-1 as part of become-follower request with correlation id 8 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:16:51,080] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-PDP-PAP-2 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:16:51,080] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-PDP-PAP-1 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:16:51,080] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 8 for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger) [2021-10-15 11:16:51,081] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 8 for partition POLICY-PDP-PAP-1 with leader 0 (state.change.logger) [2021-10-15 11:16:51,081] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 8 from controller 0 epoch 8 for the become-follower transition for partition POLICY-PDP-PAP-1 with leader 0 (state.change.logger) [2021-10-15 11:16:51,081] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 8 from controller 0 epoch 8 for the become-follower transition for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger) [2021-10-15 11:16:51,098] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition POLICY-PDP-PAP-1 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 9 (state.change.logger) [2021-10-15 11:16:51,099] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition POLICY-PDP-PAP-2 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 9 (state.change.logger) [2021-10-15 11:16:51,099] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition POLICY-PDP-PAP-0 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 9 (state.change.logger) [2021-10-15 11:16:51,147] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition POLICY-PDP-PAP-1 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:51,148] INFO [Log partition=POLICY-PDP-PAP-1, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:51,167] ERROR [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Error for partition POLICY-PDP-PAP-1 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-10-15 11:16:51,357] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition POLICY-PDP-PAP-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-10-15 11:16:51,357] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-10-15 11:16:52,627] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:16:52,627] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:16:55,954] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:16:55,955] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:18:52,775] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-0 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 10 (state.change.logger) [2021-10-15 11:18:52,775] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-1 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 10 (state.change.logger) [2021-10-15 11:18:56,334] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 11 (state.change.logger) [2021-10-15 11:18:56,334] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 11 (state.change.logger) [2021-10-15 11:18:59,734] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.mirrormakeragent-0 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 12 (state.change.logger) [2021-10-15 11:19:56,064] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:19:56,065] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:21:34,281] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 3 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:22:25,031] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:22:25,031] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:29:33,988] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:29:33,988] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:29:54,266] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:29:54,267] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:29:54,278] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2, zkVersion=0, replicas=2, isNew=true) correlation id 13 from controller 0 epoch 8 for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (state.change.logger) [2021-10-15 11:29:54,279] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 13 from controller 0 epoch 8 starting the become-leader transition for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (state.change.logger) [2021-10-15 11:29:54,280] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(SDC-DISTR-NOTIF-TOPIC-AUTO-0) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:29:54,347] INFO [Log partition=SDC-DISTR-NOTIF-TOPIC-AUTO-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:29:54,362] INFO [Log partition=SDC-DISTR-NOTIF-TOPIC-AUTO-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 73 ms (kafka.log.Log) [2021-10-15 11:29:54,363] INFO Created log for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:29:54,367] INFO [Partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 broker=2] No checkpointed highwatermark is found for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (kafka.cluster.Partition) [2021-10-15 11:29:54,367] INFO Replica loaded for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:29:54,367] INFO [Partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 broker=2] SDC-DISTR-NOTIF-TOPIC-AUTO-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:29:54,384] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 13 for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:29:54,384] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 13 from controller 0 epoch 8 for the become-leader transition for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (state.change.logger) [2021-10-15 11:29:54,388] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 14 (state.change.logger) [2021-10-15 11:29:56,389] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2, zkVersion=0, replicas=2, isNew=true) correlation id 15 from controller 0 epoch 8 for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (state.change.logger) [2021-10-15 11:29:56,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 15 from controller 0 epoch 8 starting the become-leader transition for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (state.change.logger) [2021-10-15 11:29:56,391] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(SDC-DISTR-STATUS-TOPIC-AUTO-0) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:29:56,423] INFO [Log partition=SDC-DISTR-STATUS-TOPIC-AUTO-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-10-15 11:29:56,427] INFO [Log partition=SDC-DISTR-STATUS-TOPIC-AUTO-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 28 ms (kafka.log.Log) [2021-10-15 11:29:56,428] INFO Created log for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-10-15 11:29:56,429] INFO [Partition SDC-DISTR-STATUS-TOPIC-AUTO-0 broker=2] No checkpointed highwatermark is found for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (kafka.cluster.Partition) [2021-10-15 11:29:56,430] INFO Replica loaded for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-10-15 11:29:56,430] INFO [Partition SDC-DISTR-STATUS-TOPIC-AUTO-0 broker=2] SDC-DISTR-STATUS-TOPIC-AUTO-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-10-15 11:29:56,436] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 0 epoch 8 with correlation id 15 for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (last update controller epoch 8) (state.change.logger) [2021-10-15 11:29:56,436] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 15 from controller 0 epoch 8 for the become-leader transition for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (state.change.logger) [2021-10-15 11:29:56,438] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 16 (state.change.logger) [2021-10-15 11:29:57,110] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:29:57,110] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:30:00,297] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:30:00,297] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:08,730] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:08,730] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:12,044] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:12,044] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:17,302] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:17,302] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:17,313] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:17,313] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:17,318] INFO [GroupCoordinator 2]: Preparing to rebalance group aai-ml-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-27) (reason: Adding new member aai-ml-ed66ee21-9e94-479b-b38f-908ff0885126 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-10-15 11:31:17,578] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:17,578] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:20,319] INFO [GroupCoordinator 2]: Stabilized group aai-ml-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-27) (kafka.coordinator.group.GroupCoordinator) [2021-10-15 11:31:20,322] INFO [GroupCoordinator 2]: Assignment received from leader for group aai-ml-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-10-15 11:31:20,465] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:20,465] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:26,659] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:26,659] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:29,835] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:29,835] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:31:34,277] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:32:14,332] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:32:14,332] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:32:17,496] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:32:17,496] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:32:32,834] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:32:32,834] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:32:32,838] INFO [GroupCoordinator 2]: Preparing to rebalance group cds--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-18) (reason: Adding new member cds-5fee6651-ae69-438a-ba24-c21ee267756d with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-10-15 11:32:35,839] INFO [GroupCoordinator 2]: Stabilized group cds--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-18) (kafka.coordinator.group.GroupCoordinator) [2021-10-15 11:32:35,842] INFO [GroupCoordinator 2]: Assignment received from leader for group cds--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-10-15 11:32:35,990] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:32:35,990] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:33:43,114] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:33:43,114] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:33:43,119] INFO [GroupCoordinator 2]: Preparing to rebalance group dcae--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-3) (reason: Adding new member dcae-sch-c829a2f8-d88a-472e-8afe-23dab29007fb with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-10-15 11:33:43,537] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:33:43,537] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:33:44,092] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:33:44,092] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:33:46,120] INFO [GroupCoordinator 2]: Stabilized group dcae--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-3) (kafka.coordinator.group.GroupCoordinator) [2021-10-15 11:33:46,122] INFO [GroupCoordinator 2]: Assignment received from leader for group dcae--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-10-15 11:33:46,244] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:33:46,244] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:39:34,116] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:39:34,116] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:41:34,277] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:51:34,277] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:54:34,533] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:54:34,534] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:57:08,652] INFO [Partition __consumer_offsets-18 broker=2] Shrinking ISR from 2,1,0 to 2. Leader: (highWatermark: 98, endOffset: 99). Out of sync replicas: (brokerId: 1, endOffset: 98) (brokerId: 0, endOffset: 98). (kafka.cluster.Partition) [2021-10-15 11:57:10,421] INFO [Partition __consumer_offsets-27 broker=2] Shrinking ISR from 2,0,1 to 2. Leader: (highWatermark: 52, endOffset: 53). Out of sync replicas: (brokerId: 0, endOffset: 52) (brokerId: 1, endOffset: 52). (kafka.cluster.Partition) [2021-10-15 11:57:11,852] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:57:11,853] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:57:11,870] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:57:11,871] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:57:12,694] INFO [Partition __consumer_offsets-6 broker=2] Shrinking ISR from 2,1,0 to 2. Leader: (highWatermark: 616, endOffset: 619). Out of sync replicas: (brokerId: 1, endOffset: 616) (brokerId: 0, endOffset: 616). (kafka.cluster.Partition) [2021-10-15 11:57:13,089] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:57:13,089] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:57:13,556] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:57:13,557] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:57:13,564] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2], zkVersion=1, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-18 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 17 (state.change.logger) [2021-10-15 11:57:14,561] INFO [Partition __consumer_offsets-18 broker=2] Expanding ISR from 2 to 2,0 (kafka.cluster.Partition) [2021-10-15 11:57:15,435] INFO [Partition __consumer_offsets-27 broker=2] Expanding ISR from 2 to 2,1 (kafka.cluster.Partition) [2021-10-15 11:57:15,494] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0], zkVersion=1, replicas=[0, 2, 1], offlineReplicas=[]) for partition POLICY-PDP-PAP-1 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 18 (state.change.logger) [2021-10-15 11:57:15,494] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0], zkVersion=1, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-46 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 18 (state.change.logger) [2021-10-15 11:57:15,494] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-1 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 18 (state.change.logger) [2021-10-15 11:57:15,495] INFO [Partition __consumer_offsets-18 broker=2] Expanding ISR from 2,0 to 2,0,1 (kafka.cluster.Partition) [2021-10-15 11:57:15,514] INFO [Partition __consumer_offsets-6 broker=2] Expanding ISR from 2 to 2,0 (kafka.cluster.Partition) [2021-10-15 11:57:15,546] INFO [Partition __consumer_offsets-27 broker=2] Expanding ISR from 2,1 to 2,1,0 (kafka.cluster.Partition) [2021-10-15 11:57:15,632] INFO [Partition __consumer_offsets-6 broker=2] Expanding ISR from 2,0 to 2,0,1 (kafka.cluster.Partition) [2021-10-15 11:57:35,527] INFO Unable to read additional data from server sessionid 0x1000096e9210001, likely server has closed socket, closing socket connection and attempting reconnect (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:35,602] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=3, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-27 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 19 (state.change.logger) [2021-10-15 11:57:35,602] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=3, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-6 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 19 (state.change.logger) [2021-10-15 11:57:35,602] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=3, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-18 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 19 (state.change.logger) [2021-10-15 11:57:36,038] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient) [2021-10-15 11:57:36,038] INFO Opening socket connection to server onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.233.76.159:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:36,043] INFO Socket connection established to onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.233.76.159:2181, initiating session (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:36,065] WARN Unable to reconnect to ZooKeeper service, session 0x1000096e9210001 has expired (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:36,072] INFO Unable to reconnect to ZooKeeper service, session 0x1000096e9210001 has expired, closing socket connection (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:36,067] INFO [ZooKeeperClient Kafka server] Session expired. (kafka.zookeeper.ZooKeeperClient) [2021-10-15 11:57:36,065] INFO EventThread shut down for session: 0x1000096e9210001 (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:36,078] DEBUG [Controller id=2] Resigning (kafka.controller.KafkaController) [2021-10-15 11:57:36,079] DEBUG [Controller id=2] Unregister BrokerModifications handler for Set() (kafka.controller.KafkaController) [2021-10-15 11:57:36,080] INFO [ZooKeeperClient Kafka server] Initializing a new session to onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181. (kafka.zookeeper.ZooKeeperClient) [2021-10-15 11:57:36,080] INFO Initiating client connection, connectString=onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 sessionTimeout=6000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@28701274 (org.apache.zookeeper.ZooKeeper) [2021-10-15 11:57:36,081] INFO [PartitionStateMachine controllerId=2] Stopped partition state machine (kafka.controller.ZkPartitionStateMachine) [2021-10-15 11:57:36,082] INFO [ReplicaStateMachine controllerId=2] Stopped replica state machine (kafka.controller.ZkReplicaStateMachine) [2021-10-15 11:57:36,083] INFO [Controller id=2] Resigned (kafka.controller.KafkaController) [2021-10-15 11:57:36,139] INFO Creating /brokers/ids/2 (is it secure? true) (kafka.zk.KafkaZkClient) [2021-10-15 11:57:36,148] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient) [2021-10-15 11:57:36,148] INFO Opening socket connection to server onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.68.136:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:36,162] INFO Socket connection established to onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.68.136:2181, initiating session (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:38,165] WARN Client session timed out, have not heard from server in 2002ms for sessionid 0x0 (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:38,165] INFO Client session timed out, have not heard from server in 2002ms for sessionid 0x0, closing socket connection and attempting reconnect (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:38,270] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) [2021-10-15 11:57:38,619] INFO [Partition __consumer_offsets-3 broker=2] Shrinking ISR from 2,0,1 to 2,1. Leader: (highWatermark: 72, endOffset: 73). Out of sync replicas: (brokerId: 0, endOffset: 72). (kafka.cluster.Partition) [2021-10-15 11:57:38,966] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient) [2021-10-15 11:57:38,966] INFO Opening socket connection to server onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.233.76.159:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:38,974] INFO Socket connection established to onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.233.76.159:2181, initiating session (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:40,977] WARN Client session timed out, have not heard from server in 2002ms for sessionid 0x0 (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:40,977] INFO Client session timed out, have not heard from server in 2002ms for sessionid 0x0, closing socket connection and attempting reconnect (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:41,078] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) [2021-10-15 11:57:41,659] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) [2021-10-15 11:57:41,801] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient) [2021-10-15 11:57:41,808] INFO Opening socket connection to server onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.233.67.179:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:41,810] INFO Socket connection established to onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.233.67.179:2181, initiating session (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:43,813] WARN Client session timed out, have not heard from server in 2002ms for sessionid 0x0 (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:43,813] INFO Client session timed out, have not heard from server in 2002ms for sessionid 0x0, closing socket connection and attempting reconnect (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:45,592] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient) [2021-10-15 11:57:45,592] INFO Opening socket connection to server onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.68.136:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:45,594] INFO Socket connection established to onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.68.136:2181, initiating session (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:46,256] INFO Unable to read additional data from server sessionid 0x0, likely server has closed socket, closing socket connection and attempting reconnect (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:47,041] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient) [2021-10-15 11:57:47,042] INFO Opening socket connection to server onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.233.76.159:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:47,058] INFO Socket connection established to onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.233.76.159:2181, initiating session (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:47,086] INFO Unable to read additional data from server sessionid 0x0, likely server has closed socket, closing socket connection and attempting reconnect (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:47,489] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient) [2021-10-15 11:57:47,490] INFO Opening socket connection to server onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.233.67.179:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:47,522] INFO Socket connection established to onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.233.67.179:2181, initiating session (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:48,254] INFO Unable to read additional data from server sessionid 0x0, likely server has closed socket, closing socket connection and attempting reconnect (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:49,894] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient) [2021-10-15 11:57:49,895] INFO Opening socket connection to server onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.68.136:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:49,897] INFO Socket connection established to onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.68.136:2181, initiating session (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:50,014] INFO Unable to read additional data from server sessionid 0x0, likely server has closed socket, closing socket connection and attempting reconnect (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:50,565] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient) [2021-10-15 11:57:50,567] INFO Opening socket connection to server onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.233.76.159:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:50,625] INFO Socket connection established to onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.233.76.159:2181, initiating session (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:50,646] INFO Unable to read additional data from server sessionid 0x0, likely server has closed socket, closing socket connection and attempting reconnect (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:51,710] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient) [2021-10-15 11:57:51,711] INFO Opening socket connection to server onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.233.67.179:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:51,715] INFO Socket connection established to onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.233.67.179:2181, initiating session (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:51,777] INFO Session establishment complete on server onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.233.67.179:2181, sessionid = 0x30000e069d50000, negotiated timeout = 6000 (org.apache.zookeeper.ClientCnxn) [2021-10-15 11:57:51,779] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient) [2021-10-15 11:57:51,781] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient) [2021-10-15 11:57:51,781] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient) [2021-10-15 11:57:51,909] INFO Stat of the created znode at /brokers/ids/2 is: 17179869187,17179869187,1634299071789,1634299071789,1,0,0,216173745962024960,366,0,17179869187 (kafka.zk.KafkaZkClient) [2021-10-15 11:57:51,910] INFO Registered broker 2 at path /brokers/ids/2 with addresses: ArrayBuffer(EndPoint(10.253.0.233,30492,ListenerName(EXTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT), EndPoint(onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local,9092,ListenerName(INTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT)), czxid (broker epoch): 17179869187 (kafka.zk.KafkaZkClient) [2021-10-15 11:57:51,932] INFO [Partition __consumer_offsets-3 broker=2] Cached zkVersion [0] not equal to that in zookeeper, skip updating ISR (kafka.cluster.Partition) [2021-10-15 11:57:51,933] INFO [Partition __consumer_offsets-18 broker=2] Shrinking ISR from 2,0,1 to 2. Leader: (highWatermark: 100, endOffset: 101). Out of sync replicas: (brokerId: 0, endOffset: 100) (brokerId: 1, endOffset: 100). (kafka.cluster.Partition) [2021-10-15 11:57:51,954] DEBUG [Controller id=2] Broker 0 has been elected as the controller, so stopping the election process. (kafka.controller.KafkaController) [2021-10-15 11:57:51,983] INFO [Partition __consumer_offsets-18 broker=2] Cached zkVersion [3] not equal to that in zookeeper, skip updating ISR (kafka.cluster.Partition) [2021-10-15 11:57:51,983] INFO [Partition __consumer_offsets-27 broker=2] Shrinking ISR from 2,1,0 to 2. Leader: (highWatermark: 54, endOffset: 55). Out of sync replicas: (brokerId: 1, endOffset: 54) (brokerId: 0, endOffset: 54). (kafka.cluster.Partition) [2021-10-15 11:57:52,001] INFO [Partition __consumer_offsets-27 broker=2] Cached zkVersion [3] not equal to that in zookeeper, skip updating ISR (kafka.cluster.Partition) [2021-10-15 11:57:52,007] INFO [Partition __consumer_offsets-6 broker=2] Shrinking ISR from 2,0,1 to 2. Leader: (highWatermark: 622, endOffset: 625). Out of sync replicas: (brokerId: 0, endOffset: 622) (brokerId: 1, endOffset: 622). (kafka.cluster.Partition) [2021-10-15 11:57:52,063] INFO [Partition __consumer_offsets-6 broker=2] Cached zkVersion [3] not equal to that in zookeeper, skip updating ISR (kafka.cluster.Partition) [2021-10-15 11:57:52,064] INFO [Partition __consumer_offsets-3 broker=2] Shrinking ISR from 2,0,1 to 2. Leader: (highWatermark: 72, endOffset: 73). Out of sync replicas: (brokerId: 0, endOffset: 72) (brokerId: 1, endOffset: 72). (kafka.cluster.Partition) [2021-10-15 11:57:52,221] INFO [Partition __consumer_offsets-3 broker=2] Cached zkVersion [0] not equal to that in zookeeper, skip updating ISR (kafka.cluster.Partition) [2021-10-15 11:57:52,221] INFO [Partition __consumer_offsets-18 broker=2] Shrinking ISR from 2,0,1 to 2. Leader: (highWatermark: 100, endOffset: 101). Out of sync replicas: (brokerId: 0, endOffset: 100) (brokerId: 1, endOffset: 100). (kafka.cluster.Partition) [2021-10-15 11:57:52,309] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:57:52,322] INFO [Partition __consumer_offsets-18 broker=2] Cached zkVersion [3] not equal to that in zookeeper, skip updating ISR (kafka.cluster.Partition) [2021-10-15 11:57:52,337] INFO [Partition __consumer_offsets-27 broker=2] Shrinking ISR from 2,1,0 to 2. Leader: (highWatermark: 54, endOffset: 55). Out of sync replicas: (brokerId: 1, endOffset: 54) (brokerId: 0, endOffset: 54). (kafka.cluster.Partition) [2021-10-15 11:57:52,338] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-10-15 11:57:52,356] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-13 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,361] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,362] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=4, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-46 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,362] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-9 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,362] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-42 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,362] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-21 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,362] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-17 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,362] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=4, replicas=[0, 2, 1], offlineReplicas=[]) for partition POLICY-PDP-PAP-1 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,362] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-30 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,366] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-26 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-5 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-38 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=4, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-1 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-34 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-16 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-45 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-12 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-41 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[0, 1, 2], offlineReplicas=[]) for partition AAI-EVENT-2 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-24 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-20 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-49 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 0, 2], offlineReplicas=[]) for partition POLICY-PDP-PAP-2 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-0 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-29 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=4, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-25 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-8 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,367] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-37 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-4 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-33 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-15 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-48 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[2, 0, 1], offlineReplicas=[]) for partition AAI-EVENT-1 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-11 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=-1, leaderEpoch=1, isr=[2], zkVersion=1, replicas=[2], offlineReplicas=[]) for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-44 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-1 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-23 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=4, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-19 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-32 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-28 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-7 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-40 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-3 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-36 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.mirrormakeragent-0 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-47 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-14 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-43 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,368] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 2, 0], offlineReplicas=[]) for partition AAI-EVENT-0 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,369] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-0 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,369] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=-1, leaderEpoch=1, isr=[2], zkVersion=1, replicas=[2], offlineReplicas=[]) for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,369] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-10 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,369] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-22 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,369] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[0, 1], zkVersion=4, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-18 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,369] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=4, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-31 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,369] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[1, 0], zkVersion=4, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-27 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,369] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[2, 1, 0], offlineReplicas=[]) for partition POLICY-PDP-PAP-0 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,369] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-39 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,369] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[0, 1], zkVersion=4, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-6 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,369] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-35 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,369] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-2 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 0 (state.change.logger) [2021-10-15 11:57:52,379] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=0,2,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-13 (state.change.logger) [2021-10-15 11:57:52,379] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=4, replicas=0,1,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-46 (state.change.logger) [2021-10-15 11:57:52,379] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=2,0,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-9 (state.change.logger) [2021-10-15 11:57:52,379] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=2,1,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-42 (state.change.logger) [2021-10-15 11:57:52,379] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=2,0,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-21 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,2,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-17 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=4, replicas=0,2,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition POLICY-PDP-PAP-1 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=2,1,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-30 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,0,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-26 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,2,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-5 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,0,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-38 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=4, replicas=0,2,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-1 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=0,1,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-34 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=0,1,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-16 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=2,0,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-45 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=2,1,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-12 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,2,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-41 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=0,1,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition AAI-EVENT-2 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=2,1,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-24 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,0,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-20 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=0,2,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-49 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,0,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition POLICY-PDP-PAP-2 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=2,1,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-0 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,2,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-29 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=4, replicas=0,2,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-25 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,0,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-8 (state.change.logger) [2021-10-15 11:57:52,380] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=0,2,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-37 (state.change.logger) [2021-10-15 11:57:52,381] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=0,1,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-4 (state.change.logger) [2021-10-15 11:57:52,381] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=2,0,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-33 (state.change.logger) [2021-10-15 11:57:52,381] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=2,0,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-15 (state.change.logger) [2021-10-15 11:57:52,381] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=2,1,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-48 (state.change.logger) [2021-10-15 11:57:52,381] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=2,0,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition AAI-EVENT-1 (state.change.logger) [2021-10-15 11:57:52,381] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,2,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-11 (state.change.logger) [2021-10-15 11:57:52,381] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=-1, leaderEpoch=1, isr=2, zkVersion=1, replicas=2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (state.change.logger) [2021-10-15 11:57:52,381] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,0,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-44 (state.change.logger) [2021-10-15 11:57:52,381] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,2,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-23 (state.change.logger) [2021-10-15 11:57:52,381] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=4, replicas=0,2,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-19 (state.change.logger) [2021-10-15 11:57:52,381] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,0,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-32 (state.change.logger) [2021-10-15 11:57:52,382] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=0,1,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-28 (state.change.logger) [2021-10-15 11:57:52,382] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=0,2,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-7 (state.change.logger) [2021-10-15 11:57:52,382] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=0,1,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-40 (state.change.logger) [2021-10-15 11:57:52,382] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=2,0,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-3 (state.change.logger) [2021-10-15 11:57:52,383] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=2,1,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-36 (state.change.logger) [2021-10-15 11:57:52,383] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,2,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-47 (state.change.logger) [2021-10-15 11:57:52,383] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,0,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-14 (state.change.logger) [2021-10-15 11:57:52,383] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=0,2,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-43 (state.change.logger) [2021-10-15 11:57:52,383] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,2,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition AAI-EVENT-0 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=-1, leaderEpoch=1, isr=2, zkVersion=1, replicas=2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=0,1,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-10 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=0,1,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-22 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=0,1, zkVersion=4, replicas=2,1,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-18 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=4, replicas=0,2,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-31 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=1,0, zkVersion=4, replicas=2,0,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-27 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=2,1,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition POLICY-PDP-PAP-0 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=0,1, zkVersion=1, replicas=2,0,1, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-39 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=0,1, zkVersion=4, replicas=2,1,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-6 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,2,0, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-35 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=1,0, zkVersion=1, replicas=1,0,2, isNew=false) correlation id 1 from controller 0 epoch 8 for partition __consumer_offsets-2 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition AAI-EVENT-1 with leader 0 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-0 with leader 1 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-29 with leader 1 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-48 with leader 1 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-10 with leader 0 (state.change.logger) [2021-10-15 11:57:52,384] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition POLICY-PDP-PAP-0 with leader 1 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-45 with leader 0 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-26 with leader 1 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-7 with leader 0 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-42 with leader 1 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-4 with leader 0 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-23 with leader 1 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 with leader -1 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-1 with leader 0 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-20 with leader 1 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-39 with leader 0 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-17 with leader 1 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-36 with leader 1 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-14 with leader 1 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-33 with leader 0 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-49 with leader 0 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition AAI-EVENT-2 with leader 0 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-11 with leader 1 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-30 with leader 1 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-46 with leader 0 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition POLICY-PDP-PAP-1 with leader 0 (state.change.logger) [2021-10-15 11:57:52,385] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-27 with leader 0 (state.change.logger) [2021-10-15 11:57:52,392] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,392] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-24 with leader 1 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-43 with leader 0 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-5 with leader 1 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-21 with leader 0 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-2 with leader 1 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-40 with leader 0 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-37 with leader 0 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-18 with leader 1 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-34 with leader 0 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-15 with leader 0 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-12 with leader 1 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-31 with leader 0 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-9 with leader 0 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-47 with leader 1 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-19 with leader 0 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-28 with leader 0 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-38 with leader 1 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition AAI-EVENT-0 with leader 1 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-35 with leader 1 (state.change.logger) [2021-10-15 11:57:52,393] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-44 with leader 1 (state.change.logger) [2021-10-15 11:57:52,398] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-6 with leader 1 (state.change.logger) [2021-10-15 11:57:52,398] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 with leader -1 (state.change.logger) [2021-10-15 11:57:52,398] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-25 with leader 0 (state.change.logger) [2021-10-15 11:57:52,398] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-16 with leader 0 (state.change.logger) [2021-10-15 11:57:52,398] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-22 with leader 0 (state.change.logger) [2021-10-15 11:57:52,398] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-41 with leader 1 (state.change.logger) [2021-10-15 11:57:52,398] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-32 with leader 1 (state.change.logger) [2021-10-15 11:57:52,398] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-3 with leader 0 (state.change.logger) [2021-10-15 11:57:52,398] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 0 epoch 8 starting the become-follower transition for partition __consumer_offsets-13 with leader 0 (state.change.logger) [2021-10-15 11:57:52,432] ERROR [Broker id=2] Received LeaderAndIsrRequest with correlation id 1 from controller 0 epoch 8 for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (last update controller epoch 8) but cannot become follower since the new leader -1 is unavailable. (state.change.logger) [2021-10-15 11:57:52,603] INFO [Partition __consumer_offsets-27 broker=2] Cached zkVersion [3] not equal to that in zookeeper, skip updating ISR (kafka.cluster.Partition) [2021-10-15 11:57:52,603] INFO [Partition __consumer_offsets-6 broker=2] Shrinking ISR from 2,0,1 to 2. Leader: (highWatermark: 622, endOffset: 625). Out of sync replicas: (brokerId: 0, endOffset: 622) (brokerId: 1, endOffset: 622). (kafka.cluster.Partition) [2021-10-15 11:57:52,680] INFO [Partition __consumer_offsets-6 broker=2] Cached zkVersion [3] not equal to that in zookeeper, skip updating ISR (kafka.cluster.Partition) [2021-10-15 11:57:52,682] INFO [Partition __consumer_offsets-3 broker=2] Shrinking ISR from 2,0,1 to 2. Leader: (highWatermark: 72, endOffset: 73). Out of sync replicas: (brokerId: 0, endOffset: 72) (brokerId: 1, endOffset: 72). (kafka.cluster.Partition) [2021-10-15 11:57:52,685] ERROR [Broker id=2] Received LeaderAndIsrRequest with correlation id 1 from controller 0 epoch 8 for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (last update controller epoch 8) but cannot become follower since the new leader -1 is unavailable. (state.change.logger) [2021-10-15 11:57:52,819] INFO [Partition __consumer_offsets-3 broker=2] Cached zkVersion [0] not equal to that in zookeeper, skip updating ISR (kafka.cluster.Partition) [2021-10-15 11:57:52,824] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(__consumer_offsets-28, __consumer_offsets-6, __consumer_offsets-10, __consumer_offsets-32, __consumer_offsets-14, __consumer_offsets-36, POLICY-PDP-PAP-1, __consumer_offsets-40, __consumer_offsets-37, __consumer_offsets-18, __consumer_offsets-22, __consumer_offsets-41, __consumer_offsets-0, __consumer_offsets-4, __consumer_offsets-23, __consumer_offsets-26, __consumer_offsets-45, __consumer_offsets-8, __consumer_offsets-49, __consumer_offsets-27, __consumer_offsets-12, __consumer_offsets-31, __consumer_offsets-9, __consumer_offsets-13, __consumer_offsets-35, POLICY-PDP-PAP-0, __consumer_offsets-17, __consumer_offsets-39, AAI-EVENT-1, __consumer_offsets-43, __consumer_offsets-21, AAI-EVENT-2, __consumer_offsets-25, __consumer_offsets-44, __consumer_offsets-47, __consumer_offsets-3, __consumer_offsets-7, __consumer_offsets-29, __consumer_offsets-48, __consumer_offsets-11, __consumer_offsets-30, __consumer_offsets-33, __consumer_offsets-15, __consumer_offsets-34, POLICY-PDP-PAP-2, __consumer_offsets-19, __consumer_offsets-16, __consumer_offsets-38, AAI-EVENT-0, __consumer_offsets-1, __consumer_offsets-20, __consumer_offsets-42, __consumer_offsets-5, __consumer_offsets-46, __consumer_offsets-2, __consumer_offsets-24) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-22 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-25 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-28 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-31 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-34 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-37 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-40 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-43 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-46 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-49 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition POLICY-PDP-PAP-1 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition AAI-EVENT-2 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-41 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-44 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-47 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-1 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-4 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-7 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-10 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-13 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-16 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-19 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition AAI-EVENT-0 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-2 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-5 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-11 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-14 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-17 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-20 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-23 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-26 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-29 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-32 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-35 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-38 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-0 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-3 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-6 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-9 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-12 with leader 1 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-15 with leader 0 (state.change.logger) [2021-10-15 11:57:52,825] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-18 with leader 1 (state.change.logger) [2021-10-15 11:57:52,826] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-21 with leader 0 (state.change.logger) [2021-10-15 11:57:52,826] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-24 with leader 1 (state.change.logger) [2021-10-15 11:57:52,826] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-27 with leader 0 (state.change.logger) [2021-10-15 11:57:52,826] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-30 with leader 1 (state.change.logger) [2021-10-15 11:57:52,826] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-33 with leader 0 (state.change.logger) [2021-10-15 11:57:52,826] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-36 with leader 1 (state.change.logger) [2021-10-15 11:57:52,826] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-39 with leader 0 (state.change.logger) [2021-10-15 11:57:52,826] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-42 with leader 1 (state.change.logger) [2021-10-15 11:57:52,826] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-45 with leader 0 (state.change.logger) [2021-10-15 11:57:52,826] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-48 with leader 1 (state.change.logger) [2021-10-15 11:57:52,826] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger) [2021-10-15 11:57:52,826] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition POLICY-PDP-PAP-0 with leader 1 (state.change.logger) [2021-10-15 11:57:52,826] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition AAI-EVENT-1 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-22 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-25 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-28 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-31 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-34 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-37 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-40 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-43 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-46 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-49 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition POLICY-PDP-PAP-1 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition AAI-EVENT-2 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-41 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-44 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-47 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-1 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-4 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-7 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-10 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-13 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-16 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-19 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition AAI-EVENT-0 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-2 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-5 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-8 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-11 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-14 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-17 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-20 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-23 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-26 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-29 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-32 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-35 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-38 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-0 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-3 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-6 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-9 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-12 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,827] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-15 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,828] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-18 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,828] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-21 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,828] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-24 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,828] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-27 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,828] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-30 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,828] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-33 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,828] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-36 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,828] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-39 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,828] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-42 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,828] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-45 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,828] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-48 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,828] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition POLICY-PDP-PAP-2 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,828] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition POLICY-PDP-PAP-0 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,828] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition AAI-EVENT-1 as part of become-follower request with correlation id 1 from controller 0 epoch 8 with leader 0 (state.change.logger) [2021-10-15 11:57:52,856] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(__consumer_offsets-30 -> (offset=0, leaderEpoch=1), __consumer_offsets-8 -> (offset=0, leaderEpoch=1), __consumer_offsets-35 -> (offset=0, leaderEpoch=1), __consumer_offsets-41 -> (offset=0, leaderEpoch=1), __consumer_offsets-23 -> (offset=523, leaderEpoch=1), __consumer_offsets-47 -> (offset=81, leaderEpoch=1), __consumer_offsets-36 -> (offset=0, leaderEpoch=1), __consumer_offsets-42 -> (offset=0, leaderEpoch=1), __consumer_offsets-18 -> (offset=100, leaderEpoch=1), __consumer_offsets-24 -> (offset=0, leaderEpoch=1), __consumer_offsets-38 -> (offset=0, leaderEpoch=1), AAI-EVENT-0 -> (offset=0, leaderEpoch=1), __consumer_offsets-17 -> (offset=0, leaderEpoch=1), __consumer_offsets-48 -> (offset=0, leaderEpoch=1), __consumer_offsets-11 -> (offset=0, leaderEpoch=1), POLICY-PDP-PAP-0 -> (offset=0, leaderEpoch=1), __consumer_offsets-2 -> (offset=0, leaderEpoch=1), __consumer_offsets-6 -> (offset=622, leaderEpoch=1), __consumer_offsets-14 -> (offset=0, leaderEpoch=1), POLICY-PDP-PAP-2 -> (offset=17, leaderEpoch=1), __consumer_offsets-20 -> (offset=0, leaderEpoch=1), __consumer_offsets-0 -> (offset=0, leaderEpoch=1), __consumer_offsets-44 -> (offset=0, leaderEpoch=1), __consumer_offsets-12 -> (offset=0, leaderEpoch=1), __consumer_offsets-5 -> (offset=0, leaderEpoch=1), __consumer_offsets-26 -> (offset=0, leaderEpoch=1), __consumer_offsets-29 -> (offset=0, leaderEpoch=1), __consumer_offsets-32 -> (offset=29, leaderEpoch=1)) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:57:52,856] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(__consumer_offsets-22 -> (offset=0, leaderEpoch=1), __consumer_offsets-21 -> (offset=0, leaderEpoch=1), __consumer_offsets-4 -> (offset=0, leaderEpoch=1), __consumer_offsets-27 -> (offset=54, leaderEpoch=1), __consumer_offsets-7 -> (offset=0, leaderEpoch=1), __consumer_offsets-9 -> (offset=0, leaderEpoch=1), __consumer_offsets-46 -> (offset=81, leaderEpoch=1), __consumer_offsets-25 -> (offset=214, leaderEpoch=1), __consumer_offsets-33 -> (offset=0, leaderEpoch=1), __consumer_offsets-49 -> (offset=0, leaderEpoch=1), __consumer_offsets-16 -> (offset=0, leaderEpoch=1), __consumer_offsets-28 -> (offset=49, leaderEpoch=1), __consumer_offsets-31 -> (offset=1160, leaderEpoch=1), __consumer_offsets-3 -> (offset=72, leaderEpoch=1), AAI-EVENT-1 -> (offset=0, leaderEpoch=1), __consumer_offsets-37 -> (offset=0, leaderEpoch=1), __consumer_offsets-15 -> (offset=0, leaderEpoch=1), POLICY-PDP-PAP-1 -> (offset=68, leaderEpoch=1), __consumer_offsets-19 -> (offset=78, leaderEpoch=1), __consumer_offsets-13 -> (offset=0, leaderEpoch=1), __consumer_offsets-43 -> (offset=0, leaderEpoch=1), AAI-EVENT-2 -> (offset=205, leaderEpoch=1), __consumer_offsets-39 -> (offset=0, leaderEpoch=1), __consumer_offsets-45 -> (offset=0, leaderEpoch=1), __consumer_offsets-1 -> (offset=53, leaderEpoch=1), __consumer_offsets-34 -> (offset=0, leaderEpoch=1), __consumer_offsets-10 -> (offset=0, leaderEpoch=1), __consumer_offsets-40 -> (offset=0, leaderEpoch=1)) (kafka.server.ReplicaFetcherManager) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-22 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-25 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-28 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-31 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-34 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-37 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-40 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-43 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-46 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-49 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition POLICY-PDP-PAP-1 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition AAI-EVENT-2 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-41 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-44 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-47 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-1 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-4 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-7 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-10 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-13 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-16 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-19 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition AAI-EVENT-0 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-2 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-5 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-11 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-14 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-17 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-20 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-23 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-26 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-29 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-32 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-35 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-38 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-0 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-3 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-6 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-9 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-12 with leader 1 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-15 with leader 0 (state.change.logger) [2021-10-15 11:57:52,857] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-18 with leader 1 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-21 with leader 0 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-24 with leader 1 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-27 with leader 0 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-30 with leader 1 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-33 with leader 0 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-36 with leader 1 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-39 with leader 0 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-42 with leader 1 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-45 with leader 0 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition __consumer_offsets-48 with leader 1 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition POLICY-PDP-PAP-0 with leader 1 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 0 epoch 8 with correlation id 1 for partition AAI-EVENT-1 with leader 0 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition AAI-EVENT-1 with leader 0 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-0 with leader 1 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-29 with leader 1 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-48 with leader 1 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-10 with leader 0 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition POLICY-PDP-PAP-0 with leader 1 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-45 with leader 0 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-26 with leader 1 (state.change.logger) [2021-10-15 11:57:52,858] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-7 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-42 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-4 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-23 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 with leader -1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-1 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-20 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-39 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-17 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-36 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-14 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-33 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-49 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition AAI-EVENT-2 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-11 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-30 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-46 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition POLICY-PDP-PAP-1 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-27 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-8 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-24 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-43 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-5 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-21 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-2 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-40 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-37 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-18 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-34 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-15 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-12 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-31 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-9 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-47 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-19 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-28 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-38 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition AAI-EVENT-0 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-35 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-44 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-6 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 with leader -1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-25 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-16 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-22 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-41 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-32 with leader 1 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-3 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 0 epoch 8 for the become-follower transition for partition __consumer_offsets-13 with leader 0 (state.change.logger) [2021-10-15 11:57:52,861] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-22 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,862] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-25 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,862] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-28 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,862] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-31 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,862] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-34 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,862] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-37 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,862] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-40 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,862] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-43 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,862] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-46 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,862] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-49 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,862] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-41 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,862] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-44 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,862] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-47 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,862] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-1 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,862] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-22. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,862] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-4 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,924] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-7 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,924] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-10 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,924] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-13 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-16 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-19 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-2 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-5 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-8 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-11 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-14 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-17 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-20 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-23 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-26 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-29 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-32 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-35 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-38 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-0 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-3 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-6 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-9 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-12 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-15 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-18 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-21 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-24 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-27 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-30 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-33 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-36 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-39 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-42 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-45 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,925] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-48 (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-25. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-28. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-31. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-34. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-37. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-40. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-43. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-46. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-49. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-41. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-44. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-47. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-1. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-4. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-7. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-10. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-13. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-16. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-19. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-2. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-5. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-8. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-11. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-14. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-17. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-20. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-23. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,927] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-26. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,928] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-29. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,928] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-32. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,928] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-35. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,928] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-38. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,928] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-0. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-10-15 11:57:52,930] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-13 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 2 (state.change.logger) [2021-10-15 11:57:52,930] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=4, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-46 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 2 (state.change.logger) [2021-10-15 11:57:52,930] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-9 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 2 (state.change.logger) [2021-10-15 11:57:52,930] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-42 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 2 (state.change.logger) [2021-10-15 11:57:52,930] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=1, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-21 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 2 (state.change.logger) [2021-10-15 11:57:52,930] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-17 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 2 (state.change.logger) [2021-10-15 11:57:52,930] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=1, isr=[0, 1], zkVersion=4, replicas=[0, 2, 1], offlineReplicas=[]) for partition POLICY-PDP-PAP-1 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 2 (state.change.logger) [2021-10-15 11:57:52,930] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-30 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 2 (state.change.logger) [2021-10-15 11:57:52,930] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=1, isr=[1, 0], zkVersion=1, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-26 in response to UpdateMetadata request sent by controller 0 epoch 8 with correlation id 2 (state.