Results

By type

          + export KAFKA_BROKER_ID=1
+ KAFKA_BROKER_ID=1
+ cp /opt/app/osaaf/local/cadi.properties /etc/kafka/data/cadi.properties
+ export KAFKA_ADVERTISED_LISTENERS=EXTERNAL_SASL_PLAINTEXT://10.253.0.184:30491,INTERNAL_SASL_PLAINTEXT://:9092
+ KAFKA_ADVERTISED_LISTENERS=EXTERNAL_SASL_PLAINTEXT://10.253.0.184:30491,INTERNAL_SASL_PLAINTEXT://:9092
+ exec /etc/confluent/docker/run
===> ENV Variables ...
HOLMES_POSTGRES_PORT_5432_TCP_PORT=5432
HOLMES_POSTGRES_REPLICA_SERVICE_PORT=5432
MULTICLOUD_PIKE_PORT_9007_TCP_PORT=9007
MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_ADDR=10.233.14.135
AAF_CM_SERVICE_PORT=8150
AAF_SERVICE_SERVICE_PORT_API=8100
CHART_MUSEUM_SERVICE_PORT=80
AAI_SCHEMA_SERVICE_PORT_5005_TCP=tcp://10.233.46.36:5005
HOLMES_RULE_MGMT_PORT_9104_TCP_PROTO=tcp
POLICY_DISTRIBUTION_PORT_6969_TCP=tcp://10.233.44.100:6969
POLICY_APEX_PDP_PORT_6969_TCP_PROTO=tcp
CDS_PY_EXECUTOR_PORT_50052_TCP_ADDR=10.233.53.129
PORTAL_APP_PORT_8443_TCP_PORT=8443
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_ADDR=10.233.0.93
DMAAP_BC_SERVICE_HOST=10.233.50.121
MODELING_ETSICATALOG_SERVICE_PORT=8806
MSB_DISCOVERY_SERVICE_PORT_HTTP_MSB_DISCOVERY=10081
MESSAGE_ROUTER_PORT_3905_TCP_PROTO=tcp
NBI_MONGOHOST_READ_SERVICE_PORT=27017
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_PORT_BLUEPRINTS_PROCESSOR_CLUSTER=5701
AAF_GUI_SERVICE_PORT=8200
CDS_PY_EXECUTOR_SERVICE_PORT_MANAGER_GRPC=50053
AAI_GRAPHADMIN_PORT_5005_TCP=tcp://10.233.2.190:5005
MULTICLOUD_K8S_MONGO_READ_PORT=tcp://10.233.33.206:27017
HOLMES_POSTGRES_PRIMARY_PORT=tcp://10.233.3.55:5432
HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_PORT=5432
HOLMES_POSTGRES_PORT_5432_TCP=tcp://10.233.31.215:5432
MULTICLOUD_PIKE_PORT_9007_TCP=tcp://10.233.39.25:9007
TCP_PGSET_REPLICA_SERVICE_PORT=5432
MULTICLOUD_K8S_SERVICE_PORT=9015
SDC_ONBOARDING_BE_PORT_8081_TCP_PROTO=tcp
MULTICLOUD_K8S_PORT_9015_TCP_ADDR=10.233.7.173
MULTICLOUD_FCAPS_SERVICE_PORT=9011
MESSAGE_ROUTER_PORT=tcp://10.233.19.176:3905
AAF_CM_PORT_8150_TCP_PROTO=tcp
KAFKA_INTER_BROKER_LISTENER_NAME=INTERNAL_SASL_PLAINTEXT
CDS_PY_EXECUTOR_SERVICE_PORT=50052
POLICY_MARIADB_SERVICE_HOST=10.233.40.194
MULTICLOUD_PIKE_SERVICE_HOST=10.233.39.25
SDC_HELM_VALIDATOR_PORT=tcp://10.233.6.252:8080
DMAAP_DR_NODE_PORT_8080_TCP_PORT=8080
HOLMES_POSTGRES_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
SDC_HELM_VALIDATOR_PORT_8080_TCP_PORT=8080
PORTAL_DB_SERVICE_PORT_PORTAL_DB=3306
DBC_POSTGRES_PORT_5432_TCP_ADDR=10.233.34.43
HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_ADDR=10.233.3.55
POLICY_PAP_PORT_6969_TCP_PROTO=tcp
CONSUL_SERVER_UI_SERVICE_PORT=8500
POLICY_CLAMP_CL_RUNTIME_SERVICE_HOST=10.233.22.170
SDC_HELM_VALIDATOR_PORT_8080_TCP_PROTO=tcp
OOF_OSDF_SERVICE_PORT=8698
OOM_CERT_SERVICE_PORT_8443_TCP_ADDR=10.233.4.61
HOLMES_RULE_MGMT_SERVICE_PORT_HTTPS_REST=9101
AAI_SPARKY_BE_PORT=tcp://10.233.25.166:8000
AAF_GUI_SERVICE_HOST=10.233.46.67
POLICY_CLAMP_CL_RUNTIME_PORT_6969_TCP_PROTO=tcp
HOLMES_ENGINE_MGMT_PORT_9102_TCP=tcp://10.233.27.207:9102
POLICY_PAP_SERVICE_PORT_HTTP_API=6969
POLICY_MARIADB_SERVICE_PORT_MYSQL=3306
AAF_CASS_PORT_9042_TCP_ADDR=10.233.38.219
HOLMES_POSTGRES_PORT=tcp://10.233.31.215:5432
DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP=tcp://10.233.53.234:8443
AAF_OAUTH_PORT_8140_TCP=tcp://10.233.3.102:8140
ROBOT_PORT_443_TCP=tcp://10.233.23.175:443
PORTAL_WIDGET_PORT_8082_TCP=tcp://10.233.47.162:8082
MESSAGE_ROUTER_SERVICE_PORT=3905
AAI_GRAPHADMIN_PORT=tcp://10.233.2.190:8449
POLICY_APEX_PDP_SERVICE_PORT=6969
CDS_DB_PORT_3306_TCP=tcp://10.233.51.197:3306
PORTAL_DB_PORT_3306_TCP_PROTO=tcp
MSB_DISCOVERY_SERVICE_HOST=10.233.14.24
ONAP_MARIADB_GALERA_METRICS_PORT_9104_TCP_PORT=9104
CONSUL_SERVER_UI_SERVICE_HOST=10.233.8.125
AAF_LOCATE_SERVICE_PORT_API=8095
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_HOST=10.233.26.11
PORTAL_SDK_SERVICE_PORT=8443
AAF_FS_PORT=tcp://10.233.19.181:8096
AAF_SERVICE_PORT_8100_TCP=tcp://10.233.29.31:8100
HOLMES_POSTGRES_REPLICA_PORT_5432_TCP=tcp://10.233.60.48:5432
OOF_HAS_API_SERVICE_HOST=10.233.35.91
AAI_RESOURCES_SERVICE_PORT=8447
AAF_CASS_PORT_7000_TCP_PROTO=tcp
NBI_SERVICE_PORT=8443
A1POLICYMANAGEMENT_SERVICE_PORT=8433
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT=tcp://10.233.10.54:8080
ONAP_CDS_DB_METRICS_SERVICE_PORT=9104
PORTAL_APP_PORT=tcp://10.233.10.179:8443
TCP_PGSET_PRIMARY_PORT_5432_TCP_PROTO=tcp
LANG=C.UTF-8
AAI_TRAVERSAL_SERVICE_HOST=10.233.20.193
MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_PORT=9091
CDS_SDC_LISTENER_SERVICE_PORT_CDS_SDC_LISTENER_HTTP=8080
MULTICLOUD_FCAPS_PORT_9011_TCP_ADDR=10.233.8.48
SDC_BE_EXTERNAL_SERVICE_PORT_HTTPS_API=8443
SDC_BE_SERVICE_PORT_HTTP_API=8080
PGSET_PORT_5432_TCP=tcp://10.233.39.254:5432
MARIADB_GALERA_PORT_3306_TCP_ADDR=10.233.19.211
SDC_BE_EXTERNAL_SERVICE_HOST=10.233.8.133
CONSUL_SERVER_UI_PORT=tcp://10.233.8.125:8500
CDS_COMMAND_EXECUTOR_SERVICE_HOST=10.233.27.108
CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_PORT_BLUEPRINTS_PROCESSOR_HTTP=8080
CLI_PORT=tcp://10.233.31.117:443
POLICY_PAP_SERVICE_PORT=6969
MARIADB_GALERA_SERVICE_HOST=10.233.19.211
HOLMES_RULE_MGMT_PORT_9101_TCP_PORT=9101
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP=tcp://10.233.0.93:9111
POLICY_CLAMP_CL_RUNTIME_SERVICE_PORT=6969
PORTAL_DB_PORT_3306_TCP=tcp://10.233.20.183:3306
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_PROTO=tcp
POLICY_DISTRIBUTION_SERVICE_PORT_POLICY_DISTRIBUTION=6969
SDC_WFD_FE_PORT_8443_TCP_PROTO=tcp
PORTAL_APP_PORT_8443_TCP_PROTO=tcp
POLICY_CLAMP_BE_SERVICE_HOST=10.233.53.29
DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_PORT=8443
CDS_COMMAND_EXECUTOR_PORT_50051_TCP_PROTO=tcp
AAF_CASS_PORT_7001_TCP_ADDR=10.233.38.219
AAI_RESOURCES_PORT_8447_TCP_PROTO=tcp
SDC_BE_SERVICE_PORT=8443
CDS_DB_SERVICE_PORT_MYSQL=3306
HOSTNAME=onap-message-router-kafka-1
A1POLICYMANAGEMENT_PORT_8081_TCP_PROTO=tcp
AAI_SCHEMA_SERVICE_PORT_5005_TCP_PROTO=tcp
SDC_WFD_FE_PORT_8443_TCP_PORT=8443
AAF_OAUTH_PORT_8140_TCP_PORT=8140
PORTAL_CASSANDRA_PORT_9042_TCP_PROTO=tcp
ROBOT_PORT_443_TCP_PORT=443
DMAAP_DR_PROV_PORT_443_TCP_PROTO=tcp
MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP=tcp://10.233.14.135:9091
AAF_SMS_DB_SERVICE_HOST=10.233.21.28
PORTAL_CASSANDRA_PORT_7001_TCP=tcp://10.233.59.84:7001
AAF_LOCATE_PORT_8095_TCP_PROTO=tcp
POLICY_DROOLS_PDP_PORT_9696_TCP_PROTO=tcp
AAI_BABEL_SERVICE_HOST=10.233.23.138
POLICY_DISTRIBUTION_PORT_6969_TCP_PORT=6969
MULTICLOUD_PIKE_SERVICE_PORT=9007
KAFKA_JMX_PORT=5555
POLICY_CLAMP_FE_SERVICE_PORT=2443
DBC_POSTGRES_PORT=tcp://10.233.34.43:5432
A1POLICYMANAGEMENT_EXTERNAL_PORT=tcp://10.233.40.11:8433
POLICY_CLAMP_CL_RUNTIME_PORT_6969_TCP_ADDR=10.233.22.170
SDC_FE_SERVICE_PORT=9443
DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_ADDR=10.233.53.234
DMAAP_DR_NODE_PORT=tcp://10.233.19.60:8443
KAFKA_NUM_PARTITIONS=3
POLICY_MARIADB_PORT_3306_TCP_ADDR=10.233.40.194
HOLMES_RULE_MGMT_SERVICE_PORT=9101
NBI_SERVICE_HOST=10.233.13.60
A1POLICYMANAGEMENT_SERVICE_HOST=10.233.2.242
AAI_SPARKY_BE_SERVICE_PORT=8000
PORTAL_DB_PORT_3306_TCP_PORT=3306
POLICY_DISTRIBUTION_PORT_6969_TCP_PROTO=tcp
NBI_PORT=tcp://10.233.13.60:8443
SDC_WFD_FE_SERVICE_HOST=10.233.44.244
A1POLICYMANAGEMENT_PORT=tcp://10.233.2.242:8433
MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP=tcp://10.233.33.99:3905
POLICY_API_SERVICE_HOST=10.233.22.124
A1POLICYMANAGEMENT_SERVICE_PORT_HTTP_API=8081
HOST_IP=10.253.0.184
DMAAP_DR_NODE_EXTERNAL_SERVICE_HOST=10.233.53.234
POLICY_CLAMP_BE_SERVICE_PORT_POLICY_CLAMP_BE=8443
CDS_PY_EXECUTOR_PORT=tcp://10.233.53.129:50052
MODELING_ETSICATALOG_PORT=tcp://10.233.31.168:8806
AAF_FS_PORT_8096_TCP_ADDR=10.233.19.181
POLICY_DROOLS_PDP_SERVICE_PORT_POLICY_DROOLS_PDP_6969=6969
ONAP_CDS_DB_METRICS_PORT_9104_TCP_PROTO=tcp
MESSAGE_ROUTER_KAFKA_0_PORT=tcp://10.233.37.226:9091
AAF_CASS_PORT_7000_TCP_PORT=7000
AAI_MODELLOADER_SERVICE_PORT_HTTP=8080
KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS=6000
SDC_FE_SERVICE_HOST=10.233.34.114
SDC_WFD_FE_SERVICE_PORT_SDC_WFD_FE=8443
ONAP_MARIADB_GALERA_METRICS_SERVICE_PORT=9104
AAI_PORT_8443_TCP_PROTO=tcp
NBI_PORT_8443_TCP_ADDR=10.233.13.60
PORTAL_SDK_PORT=tcp://10.233.48.181:8443
AAI_MODELLOADER_SERVICE_PORT_HTTPS=8443
AAI_MODELLOADER_SERVICE_PORT=8080
MESSAGE_ROUTER_KAFKA_1_SERVICE_PORT=9091
TCP_PGSET_PRIMARY_SERVICE_PORT=5432
POLICY_APEX_PDP_PORT_6969_TCP_PORT=6969
AAF_FS_SERVICE_PORT_API=8096
POLICY_MARIADB_PORT_3306_TCP=tcp://10.233.40.194:3306
KAFKA_DEFAULT_REPLICATION_FACTOR=3
HOLMES_POSTGRES_REPLICA_PORT=tcp://10.233.60.48:5432
AAI_RESOURCES_PORT_8447_TCP_PORT=8447
POLICY_CLAMP_BE_PORT_8443_TCP=tcp://10.233.53.29:8443
CDS_UI_SERVICE_PORT=3000
SDC_WFD_FE_PORT_8443_TCP=tcp://10.233.44.244:8443
PGSET_SERVICE_HOST=10.233.39.254
MARIADB_GALERA_PORT_3306_TCP=tcp://10.233.19.211:3306
AAI_GRAPHADMIN_SERVICE_PORT_HTTPS=8449
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT=tcp://10.233.26.11:5701
POLICY_CLAMP_CL_RUNTIME_PORT_6969_TCP_PORT=6969
AAF_CASS_SERVICE_PORT=7000
PORTAL_APP_PORT_8443_TCP_ADDR=10.233.10.179
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR=1
AAI_PORT_8443_TCP_ADDR=10.233.62.159
SDC_WFD_BE_PORT_8443_TCP_PORT=8443
CDS_SDC_LISTENER_PORT_8080_TCP_PORT=8080
CDS_PY_EXECUTOR_PORT_50053_TCP_ADDR=10.233.53.129
CONSUL_SERVER_UI_SERVICE_PORT_CONSUL_UI=8500
MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_ADDR=10.233.33.99
MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP=tcp://10.233.24.58:9091
OOF_HAS_API_PORT_8091_TCP_PROTO=tcp
PGSET_SERVICE_PORT_TCP_POSTGRES=5432
HOLMES_POSTGRES_SERVICE_PORT=5432
DMAAP_DR_NODE_PORT_8443_TCP_PORT=8443
DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_PROTO=tcp
HOLMES_RULE_MGMT_PORT_9104_TCP_PORT=9104
AAF_SERVICE_SERVICE_HOST=10.233.29.31
ONAP_MARIADB_GALERA_METRICS_PORT_9104_TCP=tcp://10.233.37.145:9104
POLICY_XACML_PDP_PORT_6969_TCP=tcp://10.233.58.218:6969
SDC_ONBOARDING_BE_PORT_8445_TCP=tcp://10.233.63.31:8445
A1POLICYMANAGEMENT_PORT_8433_TCP_PORT=8433
A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_PROTO=tcp
MSB_CONSUL_PORT_8500_TCP_ADDR=10.233.50.184
MSB_CONSUL_PORT_8500_TCP_PROTO=tcp
HOLMES_ENGINE_MGMT_SERVICE_HOST=10.233.27.207
PORTAL_CASSANDRA_PORT_7001_TCP_ADDR=10.233.59.84
MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP=tcp://10.233.33.206:27017
POLICY_CLAMP_CL_K8S_PPNT_PORT_8083_TCP_ADDR=10.233.26.113
POLICY_CLAMP_CL_K8S_PPNT_SERVICE_PORT=8083
POLICY_CLAMP_BE_PORT_8443_TCP_PORT=8443
POLICY_GUI_PORT=tcp://10.233.15.121:2443
TCP_PGSET_REPLICA_PORT_5432_TCP=tcp://10.233.17.3:5432
POLICY_CLAMP_FE_SERVICE_PORT_POLICY_CLAMP_FE=2443
AAF_CASS_SERVICE_PORT_TCP_CQL=9042
CLI_PORT_9090_TCP_PROTO=tcp
DMAAP_DR_PROV_SERVICE_PORT=443
MSB_IAG_PORT_443_TCP=tcp://10.233.42.68:443
POLICY_GUI_PORT_2443_TCP=tcp://10.233.15.121:2443
CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_PORT=9111
AAF_GUI_PORT=tcp://10.233.46.67:8200
DBC_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
OOF_HAS_API_PORT_8091_TCP_ADDR=10.233.35.91
AAF_OAUTH_SERVICE_PORT_API=8140
SDC_ONBOARDING_BE_SERVICE_HOST=10.233.63.31
DMAAP_DR_PROV_PORT_443_TCP_ADDR=10.233.13.189
OOF_OSDF_SERVICE_HOST=10.233.5.226
MESSAGE_ROUTER_KAFKA_1_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_1=9091
PGSET_PORT_5432_TCP_ADDR=10.233.39.254
AAI_MODELLOADER_PORT=tcp://10.233.58.11:8080
MESSAGE_ROUTER_KAFKA_1_PORT=tcp://10.233.24.58:9091
PORTAL_DB_PORT=tcp://10.233.20.183:3306
PORTAL_WIDGET_PORT=tcp://10.233.47.162:8082
MODELING_ETSICATALOG_PORT_8806_TCP_PORT=8806
MESSAGE_ROUTER_KAFKA_2_PORT=tcp://10.233.14.135:9091
MSB_IAG_SERVICE_PORT=443
CDS_UI_PORT_3000_TCP_ADDR=10.233.4.73
DMAAP_DR_PROV_SERVICE_HOST=10.233.13.189
SDC_WFD_FE_SERVICE_PORT=8443
SDC_BE_EXTERNAL_PORT_8443_TCP_PROTO=tcp
AAI_SCHEMA_SERVICE_PORT_8452_TCP_PORT=8452
POLICY_API_PORT_6969_TCP_ADDR=10.233.22.124
OOM_CERT_SERVICE_PORT_8443_TCP_PORT=8443
MSB_DISCOVERY_SERVICE_PORT=10081
CDS_PY_EXECUTOR_PORT_50052_TCP_PORT=50052
DBC_PG_PRIMARY_PORT=tcp://10.233.42.64:5432
A1POLICYMANAGEMENT_PORT_8433_TCP_PROTO=tcp
AAI_PORT=tcp://10.233.62.159:8443
ONAP_MARIADB_GALERA_METRICS_PORT_9104_TCP_PROTO=tcp
ONAP_MARIADB_GALERA_METRICS_PORT_9104_TCP_ADDR=10.233.37.145
MESSAGE_ROUTER_EXTERNAL_SERVICE_HOST=10.233.33.99
AAI_RESOURCES_PORT_5005_TCP_PORT=5005
MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP=tcp://10.233.37.226:9091
SDC_BE_PORT_8080_TCP_PORT=8080
PORTAL_SDK_SERVICE_PORT_PORTAL_SDK=8443
POLICY_DROOLS_PDP_PORT_6969_TCP=tcp://10.233.55.134:6969
MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_ADDR=10.233.33.206
CDS_DB_PORT_3306_TCP_PROTO=tcp
SDC_WFD_BE_SERVICE_PORT=8443
CLI_PORT_443_TCP=tcp://10.233.31.117:443
MULTICLOUD_PIKE_PORT=tcp://10.233.39.25:9007
KUBERNETES_PORT_443_TCP_PROTO=tcp
AAF_CASS_PORT_9042_TCP=tcp://10.233.38.219:9042
HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP=tcp://10.233.3.55:5432
AAI_RESOURCES_SERVICE_PORT_TCP_5005=5005
AAI_SPARKY_BE_PORT_8000_TCP_PROTO=tcp
KUBERNETES_PORT_443_TCP_ADDR=10.233.0.1
MULTICLOUD_FRAMEWORK_PORT_9001_TCP_PORT=9001
OOM_CERT_SERVICE_PORT=tcp://10.233.4.61:8443
OOF_HAS_API_SERVICE_PORT_OOF_HAS_API=8091
PORTAL_WIDGET_SERVICE_PORT_PORTAL_WIDGET=8082
SDC_WFD_BE_PORT_8443_TCP_PROTO=tcp
container=oci
CLI_PORT_9090_TCP_ADDR=10.233.31.117
DMAAP_DR_NODE_EXTERNAL_PORT=tcp://10.233.53.234:8443
AAI_SERVICE_PORT_HTTPS=8443
AAF_CASS_PORT_9042_TCP_PORT=9042
ONAP_CDS_DB_METRICS_PORT_9104_TCP_ADDR=10.233.43.42
SDC_BE_EXTERNAL_PORT_8443_TCP_PORT=8443
AAF_GUI_PORT_8200_TCP_ADDR=10.233.46.67
AAI_RESOURCES_SERVICE_PORT_HTTPS=8447
PORTAL_WIDGET_PORT_8082_TCP_PROTO=tcp
MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_ADDR=10.233.37.226
ONAP_POLICY_MARIADB_METRICS_PORT_9104_TCP_PORT=9104
AAF_CASS_PORT_7001_TCP_PROTO=tcp
PORTAL_APP_SERVICE_HOST=10.233.10.179
TCP_PGSET_PRIMARY_PORT=tcp://10.233.5.218:5432
CDS_DB_PORT=tcp://10.233.51.197:3306
SDC_FE_PORT_9443_TCP_ADDR=10.233.34.114
POLICY_API_SERVICE_PORT_POLICY_API=6969
DMAAP_DR_NODE_EXTERNAL_SERVICE_PORT_HTTPS_API=8443
PORTAL_APP_SERVICE_PORT=8443
SDC_BE_PORT=tcp://10.233.2.145:8443
POLICY_CLAMP_FE_SERVICE_HOST=10.233.18.50
CONSUL_SERVER_UI_PORT_8500_TCP_ADDR=10.233.8.125
CDS_PY_EXECUTOR_PORT_50053_TCP=tcp://10.233.53.129:50053
MSB_CONSUL_PORT_8500_TCP_PORT=8500
POLICY_XACML_PDP_SERVICE_HOST=10.233.58.218
OOF_HAS_API_PORT_8091_TCP_PORT=8091
MESSAGE_ROUTER_PORT_3905_TCP_ADDR=10.233.19.176
DBC_PG_REPLICA_PORT_5432_TCP=tcp://10.233.10.9:5432
A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_ADDR=10.233.40.11
POLICY_XACML_PDP_PORT=tcp://10.233.58.218:6969
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_PROTO=tcp
SDC_HELM_VALIDATOR_SERVICE_PORT=8080
POLICY_CLAMP_FE_PORT_2443_TCP_PORT=2443
SDC_HELM_VALIDATOR_SERVICE_HOST=10.233.6.252
POLICY_CLAMP_FE_PORT_2443_TCP_PROTO=tcp
A1POLICYMANAGEMENT_EXTERNAL_SERVICE_PORT_HTTPS_API=8433
DMAAP_DR_NODE_PORT_8443_TCP_ADDR=10.233.19.60
CLI_PORT_443_TCP_PORT=443
CDS_UI_PORT=tcp://10.233.4.73:3000
MESSAGE_ROUTER_KAFKA_0_SERVICE_PORT=9091
AAF_GUI_PORT_8200_TCP=tcp://10.233.46.67:8200
ONAP_CDS_DB_METRICS_SERVICE_PORT_METRICS=9104
DBC_PG_REPLICA_SERVICE_PORT=5432
CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_PORT=8080
CHART_MUSEUM_SERVICE_PORT_HTTP=80
CDS_PY_EXECUTOR_PORT_50052_TCP_PROTO=tcp
MSB_CONSUL_SERVICE_HOST=10.233.50.184
PORTAL_CASSANDRA_PORT_7001_TCP_PROTO=tcp
CHART_MUSEUM_PORT_80_TCP_ADDR=10.233.35.125
POLICY_DROOLS_PDP_SERVICE_PORT_POLICY_DROOLS_PDP_9696=9696
AAF_CASS_SERVICE_PORT_TCP_THRIFT=9160
SDC_FE_PORT=tcp://10.233.34.114:9443
CDS_UI_PORT_3000_TCP_PROTO=tcp
AAF_GUI_SERVICE_PORT_GUI=8200
KUBERNETES_PORT=tcp://10.233.0.1:443
NBI_PORT_8443_TCP_PROTO=tcp
AAF_CASS_PORT_7001_TCP_PORT=7001
DMAAP_DR_NODE_PORT_8443_TCP_PROTO=tcp
PORTAL_WIDGET_PORT_8082_TCP_PORT=8082
PORTAL_CASSANDRA_PORT=tcp://10.233.59.84:9160
AAI_PORT_8443_TCP=tcp://10.233.62.159:8443
POLICY_PAP_PORT=tcp://10.233.15.181:6969
HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_PROTO=tcp
AAF_SERVICE_PORT_8100_TCP_PROTO=tcp
AAF_FS_PORT_8096_TCP_PORT=8096
MULTICLOUD_K8S_MONGO_READ_SERVICE_PORT_MONGO=27017
AAF_CASS_SERVICE_PORT_TCP_INTRA=7000
AAI_GRAPHADMIN_SERVICE_HOST=10.233.2.190
OOF_OSDF_PORT_8698_TCP_PROTO=tcp
MESSAGE_ROUTER_KAFKA_2_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_2=9091
AAI_PORT_8443_TCP_PORT=8443
DMAAP_DR_NODE_PORT_8443_TCP=tcp://10.233.19.60:8443
DBC_PG_PRIMARY_PORT_5432_TCP_PORT=5432
MULTICLOUD_FRAMEWORK_SERVICE_PORT_MULTICLOUD_FRAMEWORK=9001
POLICY_API_PORT_6969_TCP=tcp://10.233.22.124:6969
AAI_SCHEMA_SERVICE_SERVICE_PORT_TCP_5005=5005
DBC_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
HOLMES_ENGINE_MGMT_SERVICE_PORT=9102
AAF_CASS_PORT_9160_TCP_PROTO=tcp
ONAP_POLICY_MARIADB_METRICS_PORT=tcp://10.233.11.156:9104
PWD=/home/mrkafka
MARIADB_GALERA_PORT_3306_TCP_PROTO=tcp
POLICY_CLAMP_CL_RUNTIME_SERVICE_PORT_HTTP_API=6969
DBC_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.42.64:5432
SDC_ONBOARDING_BE_PORT_8445_TCP_ADDR=10.233.63.31
POLICY_XACML_PDP_PORT_6969_TCP_ADDR=10.233.58.218
SDC_BE_PORT_8080_TCP_ADDR=10.233.2.145
AAI_RESOURCES_SERVICE_HOST=10.233.23.142
ONAP_CDS_DB_METRICS_PORT_9104_TCP_PORT=9104
AAI_TRAVERSAL_PORT_5005_TCP=tcp://10.233.20.193:5005
PORTAL_DB_SERVICE_PORT=3306
PORTAL_CASSANDRA_PORT_7000_TCP=tcp://10.233.59.84:7000
HOME=/home/mrkafka
AAI_SPARKY_BE_SERVICE_PORT_HTTPS=8000
SDC_WFD_FE_PORT=tcp://10.233.44.244:8443
MULTICLOUD_K8S_MONGO_READ_SERVICE_HOST=10.233.33.206
AAI_BABEL_PORT_9516_TCP_PORT=9516
MSB_DISCOVERY_PORT_10081_TCP_PORT=10081
AAI_GRAPHADMIN_PORT_8449_TCP_ADDR=10.233.2.190
MULTICLOUD_FRAMEWORK_SERVICE_HOST=10.233.60.180
PORTAL_CASSANDRA_PORT_9160_TCP=tcp://10.233.59.84:9160
AAI_BABEL_SERVICE_PORT_HTTPS=9516
CDS_DB_PORT_3306_TCP_PORT=3306
MSB_CONSUL_PORT_8500_TCP=tcp://10.233.50.184:8500
DBC_PG_PRIMARY_SERVICE_HOST=10.233.42.64
HOLMES_POSTGRES_REPLICA_SERVICE_HOST=10.233.60.48
POLICY_CLAMP_CL_K8S_PPNT_SERVICE_HOST=10.233.26.113
POLICY_CLAMP_BE_PORT_8443_TCP_PROTO=tcp
CLI_PORT_443_TCP_PROTO=tcp
AAF_CASS_PORT_7000_TCP=tcp://10.233.38.219:7000
NBI_MONGOHOST_READ_PORT_27017_TCP_ADDR=10.233.14.29
MULTICLOUD_PIKE_SERVICE_PORT_MULTICLOUD_PIKE=9007
MODELING_ETSICATALOG_PORT_8806_TCP=tcp://10.233.31.168:8806
MSB_EAG_PORT=tcp://10.233.22.215:443
SDC_BE_EXTERNAL_SERVICE_PORT=8443
CDS_COMMAND_EXECUTOR_SERVICE_PORT=50051
A1POLICYMANAGEMENT_PORT_8081_TCP_ADDR=10.233.2.242
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT=tcp://10.233.0.93:9111
AAI_MODELLOADER_PORT_8443_TCP_PROTO=tcp
SDC_ONBOARDING_BE_PORT_8081_TCP_ADDR=10.233.63.31
MESSAGE_ROUTER_PORT_3905_TCP_PORT=3905
SDC_BE_EXTERNAL_PORT=tcp://10.233.8.133:8443
CDS_COMMAND_EXECUTOR_PORT=tcp://10.233.27.108:50051
POLICY_MARIADB_PORT_3306_TCP_PROTO=tcp
MESSAGE_ROUTER_KAFKA_0_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_0=9091
AAI_RESOURCES_PORT_5005_TCP_PROTO=tcp
AAI_TRAVERSAL_SERVICE_PORT=8446
DBC_PG_REPLICA_PORT_5432_TCP_PORT=5432
DBC_PG_REPLICA_SERVICE_HOST=10.233.10.9
PORTAL_WIDGET_SERVICE_HOST=10.233.47.162
MULTICLOUD_FCAPS_PORT_9011_TCP_PORT=9011
DBC_PG_PRIMARY_SERVICE_PORT=5432
AAI_SCHEMA_SERVICE_SERVICE_HOST=10.233.46.36
DMAAP_DR_NODE_PORT_8080_TCP=tcp://10.233.19.60:8080
MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_PROTO=tcp
TCP_PGSET_REPLICA_PORT_5432_TCP_PORT=5432
POLICY_APEX_PDP_PORT_6969_TCP=tcp://10.233.1.84:6969
SDC_BE_PORT_8443_TCP=tcp://10.233.2.145:8443
MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_PROTO=tcp
CDS_PY_EXECUTOR_PORT_50052_TCP=tcp://10.233.53.129:50052
SDC_WFD_BE_SERVICE_PORT_SDC_WFD_BE=8443
POLICY_CLAMP_FE_PORT_2443_TCP=tcp://10.233.18.50:2443
CHART_MUSEUM_PORT=tcp://10.233.35.125:80
AAF_CM_PORT=tcp://10.233.35.113:8150
AAI_TRAVERSAL_PORT_8446_TCP=tcp://10.233.20.193:8446
AAI_RESOURCES_PORT_8447_TCP_ADDR=10.233.23.142
AAI_BABEL_SERVICE_PORT=9516
AAF_OAUTH_SERVICE_HOST=10.233.3.102
AAI_SCHEMA_SERVICE_SERVICE_PORT=8452
MULTICLOUD_K8S_PORT_9015_TCP=tcp://10.233.7.173:9015
ONAP_POLICY_MARIADB_METRICS_SERVICE_PORT=9104
MULTICLOUD_FCAPS_PORT_9011_TCP_PROTO=tcp
MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_PORT=9091
TCP_PGSET_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
POLICY_DROOLS_PDP_SERVICE_PORT=6969
SDC_WFD_BE_PORT_8443_TCP_ADDR=10.233.45.72
AAI_TRAVERSAL_PORT_8446_TCP_PORT=8446
KUBERNETES_SERVICE_PORT_HTTPS=443
MULTICLOUD_FCAPS_SERVICE_PORT_MULTICLOUD_FCAPS=9011
DMAAP_BC_SERVICE_PORT_HTTPS_API=8443
SDC_WFD_BE_PORT_8443_TCP=tcp://10.233.45.72:8443
MULTICLOUD_K8S_MONGO_READ_SERVICE_PORT=27017
ONAP_MARIADB_GALERA_METRICS_PORT=tcp://10.233.37.145:9104
AAI_RESOURCES_PORT_5005_TCP=tcp://10.233.23.142:5005
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_PROTO=tcp
KAFKA_NUM_RECOVERY_THREADS_PER_DATA_DIR=5
AAI_TRAVERSAL_SERVICE_PORT_HTTPS=8446
AAF_GUI_PORT_8200_TCP_PROTO=tcp
A1POLICYMANAGEMENT_PORT_8433_TCP_ADDR=10.233.2.242
DMAAP_BC_PORT=tcp://10.233.50.121:8443
CDS_SDC_LISTENER_PORT_8080_TCP=tcp://10.233.22.253:8080
HOLMES_ENGINE_MGMT_PORT_9102_TCP_ADDR=10.233.27.207
ONAP_MARIADB_GALERA_METRICS_SERVICE_PORT_METRICS=9104
MSB_EAG_PORT_443_TCP_PROTO=tcp
A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP=tcp://10.233.40.11:8433
MARIADB_GALERA_SERVICE_PORT_MYSQL=3306
KUBERNETES_PORT_443_TCP_PORT=443
DMAAP_DR_PROV_PORT_443_TCP=tcp://10.233.13.189:443
KAFKA_ADVERTISED_LISTENERS=EXTERNAL_SASL_PLAINTEXT://10.253.0.184:30491,INTERNAL_SASL_PLAINTEXT://:9092
SDC_FE_PORT_9443_TCP_PORT=9443
AAF_CASS_SERVICE_HOST=10.233.38.219
PORTAL_CASSANDRA_PORT_9160_TCP_PORT=9160
AAI_SCHEMA_SERVICE_SERVICE_PORT_HTTPS=8452
SDC_ONBOARDING_BE_PORT_8081_TCP_PORT=8081
SDC_WFD_FE_PORT_8443_TCP_ADDR=10.233.44.244
PORTAL_SDK_PORT_8443_TCP_PROTO=tcp
KAFKA_SASL_ENABLED_MECHANISMS=PLAIN
MULTICLOUD_K8S_PORT_9015_TCP_PROTO=tcp
ONAP_MARIADB_GALERA_METRICS_SERVICE_HOST=10.233.37.145
AAI_SCHEMA_SERVICE_PORT_5005_TCP_PORT=5005
A1POLICYMANAGEMENT_SERVICE_PORT_HTTPS_API=8433
PORTAL_SDK_SERVICE_HOST=10.233.48.181
AAF_SMS_PORT=tcp://10.233.33.191:10443
OOM_CERT_SERVICE_SERVICE_HOST=10.233.4.61
SDC_FE_SERVICE_PORT_SDC_FE2=9443
MULTICLOUD_FRAMEWORK_PORT=tcp://10.233.60.180:9001
KAFKA_ZOOKEEPER_SET_ACL=true
CDS_UI_PORT_3000_TCP=tcp://10.233.4.73:3000
NBI_MONGOHOST_READ_PORT_27017_TCP_PORT=27017
CONSUL_SERVER_UI_PORT_8500_TCP=tcp://10.233.8.125:8500
MSB_IAG_PORT=tcp://10.233.42.68:443
CDS_SDC_LISTENER_PORT=tcp://10.233.22.253:8080
POLICY_CLAMP_BE_PORT_8443_TCP_ADDR=10.233.53.29
SDC_ONBOARDING_BE_PORT_8081_TCP=tcp://10.233.63.31:8081
AAF_LOCATE_SERVICE_PORT=8095
AAF_SMS_DB_SERVICE_PORT_AAF_SMS_DB=8200
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_PORT=8080
SDC_ONBOARDING_BE_SERVICE_PORT_SDC_ONBOARDING_BE2=8081
MESSAGE_ROUTER_PORT_3904_TCP_PORT=3904
ONAP_POLICY_MARIADB_METRICS_SERVICE_PORT_METRICS=9104
DMAAP_DR_NODE_SERVICE_HOST=10.233.19.60
CDS_COMMAND_EXECUTOR_PORT_50051_TCP=tcp://10.233.27.108:50051
AAI_GRAPHADMIN_PORT_5005_TCP_PORT=5005
CDS_DB_SERVICE_PORT=3306
ROBOT_SERVICE_PORT=443
MSB_EAG_SERVICE_HOST=10.233.22.215
NBI_MONGOHOST_READ_PORT=tcp://10.233.14.29:27017
SDC_FE_PORT_9443_TCP_PROTO=tcp
POLICY_CLAMP_CL_K8S_PPNT_PORT=tcp://10.233.26.113:8083
POLICY_DROOLS_PDP_PORT_6969_TCP_ADDR=10.233.55.134
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP=tcp://10.233.26.11:5701
AAI_MODELLOADER_PORT_8080_TCP_PORT=8080
HOLMES_RULE_MGMT_PORT_9101_TCP_ADDR=10.233.54.71
POLICY_APEX_PDP_SERVICE_HOST=10.233.1.84
PORTAL_CASSANDRA_PORT_9042_TCP_PORT=9042
AAI_RESOURCES_PORT=tcp://10.233.23.142:8447
MULTICLOUD_FRAMEWORK_SERVICE_PORT=9001
AAI_GRAPHADMIN_PORT_8449_TCP_PROTO=tcp
AAI_MODELLOADER_PORT_8443_TCP_ADDR=10.233.58.11
PORTAL_CASSANDRA_PORT_7199_TCP=tcp://10.233.59.84:7199
POLICY_APEX_PDP_PORT_6969_TCP_ADDR=10.233.1.84
HOLMES_POSTGRES_PORT_5432_TCP_PROTO=tcp
MULTICLOUD_PIKE_PORT_9007_TCP_PROTO=tcp
DMAAP_DR_PROV_PORT=tcp://10.233.13.189:443
AAF_CASS_PORT_9160_TCP_ADDR=10.233.38.219
OOM_CERT_SERVICE_SERVICE_PORT_HTTPS_HTTP=8443
POLICY_DROOLS_PDP_PORT=tcp://10.233.55.134:6969
NBI_MONGOHOST_READ_PORT_27017_TCP=tcp://10.233.14.29:27017
POLICY_CLAMP_CL_K8S_PPNT_PORT_8083_TCP=tcp://10.233.26.113:8083
CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_PORT_BLUEPRINTS_PROCESSOR_GRPC=9111
A1POLICYMANAGEMENT_EXTERNAL_SERVICE_PORT=8433
DBC_POSTGRES_SERVICE_PORT=5432
AAF_SERVICE_SERVICE_PORT=8100
AAF_SMS_PORT_10443_TCP_PORT=10443
ROBOT_PORT_443_TCP_PROTO=tcp
OOF_OSDF_PORT_8698_TCP_ADDR=10.233.5.226
AAF_OAUTH_PORT_8140_TCP_PROTO=tcp
ONAP_CDS_DB_METRICS_SERVICE_HOST=10.233.43.42
PORTAL_CASSANDRA_SERVICE_HOST=10.233.59.84
PGSET_PORT_5432_TCP_PORT=5432
AAF_SMS_DB_PORT=tcp://10.233.21.28:8200
AAF_SMS_DB_PORT_8200_TCP=tcp://10.233.21.28:8200
CLI_SERVICE_PORT_CLI9090=9090
POLICY_DROOLS_PDP_PORT_9696_TCP_ADDR=10.233.55.134
AAF_LOCATE_PORT_8095_TCP_ADDR=10.233.35.19
POLICY_CLAMP_FE_PORT=tcp://10.233.18.50:2443
PORTAL_CASSANDRA_PORT_9042_TCP=tcp://10.233.59.84:9042
AAF_GUI_PORT_8200_TCP_PORT=8200
PGSET_PORT_5432_TCP_PROTO=tcp
POLICY_GUI_SERVICE_PORT=2443
PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA5=9042
PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA4=7199
PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA3=7001
PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA2=7000
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT
AAF_SMS_SERVICE_PORT=10443
MSB_IAG_PORT_443_TCP_ADDR=10.233.42.68
POLICY_GUI_PORT_2443_TCP_ADDR=10.233.15.121
PORTAL_WIDGET_SERVICE_PORT=8082
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_ADDR=10.233.26.11
AAI_MODELLOADER_PORT_8443_TCP=tcp://10.233.58.11:8443
CLI_SERVICE_PORT_CLI443=443
AAI_TRAVERSAL_PORT=tcp://10.233.20.193:8446
MULTICLOUD_FRAMEWORK_PORT_9001_TCP_ADDR=10.233.60.180
DBC_PG_REPLICA_PORT=tcp://10.233.10.9:5432
CDS_SDC_LISTENER_SERVICE_HOST=10.233.22.253
SDC_ONBOARDING_BE_SERVICE_PORT=8445
POLICY_API_PORT_6969_TCP_PORT=6969
CDS_COMMAND_EXECUTOR_SERVICE_PORT_COMMAND_EXECUTOR_GRPC=50051
DMAAP_DR_NODE_SERVICE_PORT=8443
SDC_BE_PORT_8443_TCP_PORT=8443
POLICY_CLAMP_CL_RUNTIME_PORT_6969_TCP=tcp://10.233.22.170:6969
PORTAL_SDK_PORT_8443_TCP_ADDR=10.233.48.181
OOM_CERT_SERVICE_PORT_8443_TCP=tcp://10.233.4.61:8443
PORTAL_CASSANDRA_PORT_9160_TCP_PROTO=tcp
KUBERNETES_PORT_443_TCP=tcp://10.233.0.1:443
AAI_TRAVERSAL_PORT_5005_TCP_PROTO=tcp
HOLMES_POSTGRES_PRIMARY_SERVICE_HOST=10.233.3.55
HOLMES_RULE_MGMT_PORT=tcp://10.233.54.71:9101
AAF_LOCATE_PORT_8095_TCP=tcp://10.233.35.19:8095
POLICY_DROOLS_PDP_PORT_9696_TCP=tcp://10.233.55.134:9696
HOLMES_RULE_MGMT_PORT_9104_TCP_ADDR=10.233.54.71
MSB_CONSUL_PORT=tcp://10.233.50.184:8500
TCP_PGSET_REPLICA_PORT_5432_TCP_PROTO=tcp
MULTICLOUD_PIKE_PORT_9007_TCP_ADDR=10.233.39.25
HOLMES_POSTGRES_PORT_5432_TCP_ADDR=10.233.31.215
TCP_PGSET_PRIMARY_SERVICE_HOST=10.233.5.218
MSB_DISCOVERY_PORT_10081_TCP=tcp://10.233.14.24:10081
AAI_BABEL_PORT_9516_TCP=tcp://10.233.23.138:9516
KAFKA_AUTHORIZER_CLASS_NAME=org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer
MESSAGE_ROUTER_KAFKA_0_SERVICE_HOST=10.233.37.226
KAFKA_GROUP=onap
POLICY_CLAMP_BE_SERVICE_PORT=8443
POLICY_PAP_PORT_6969_TCP=tcp://10.233.15.181:6969
SDC_WFD_BE_PORT=tcp://10.233.45.72:8443
AAF_FS_SERVICE_PORT=8096
AAF_CASS_PORT_9160_TCP_PORT=9160
POLICY_CLAMP_CL_RUNTIME_PORT=tcp://10.233.22.170:6969
AAI_GRAPHADMIN_PORT_8449_TCP_PORT=8449
MESSAGE_ROUTER_SERVICE_PORT_HTTPS_API=3905
AAI_GRAPHADMIN_PORT_8449_TCP=tcp://10.233.2.190:8449
DBC_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.10.9
NBI_MONGOHOST_READ_PORT_27017_TCP_PROTO=tcp
ROBOT_PORT=tcp://10.233.23.175:443
ROBOT_SERVICE_PORT_HTTPD=443
AAI_MODELLOADER_PORT_8443_TCP_PORT=8443
KAFKA_LOG_RETENTION_HOURS=168
AAI_SCHEMA_SERVICE_PORT_8452_TCP=tcp://10.233.46.36:8452
AAI_TRAVERSAL_PORT_8446_TCP_PROTO=tcp
SDC_WFD_BE_SERVICE_HOST=10.233.45.72
SDC_BE_PORT_8080_TCP_PROTO=tcp
DMAAP_BC_PORT_8443_TCP_PROTO=tcp
PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA=9160
POLICY_DROOLS_PDP_PORT_6969_TCP_PROTO=tcp
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_PORT=9111
POLICY_DISTRIBUTION_SERVICE_PORT=6969
MSB_IAG_SERVICE_PORT_HTTPS_MSB_IAG=443
PGSET_SERVICE_PORT=5432
PORTAL_SDK_PORT_8443_TCP_PORT=8443
COMPONENT=kafka
AAF_SMS_DB_PORT_8200_TCP_ADDR=10.233.21.28
OOF_OSDF_PORT_8698_TCP_PORT=8698
AAF_CM_PORT_8150_TCP_PORT=8150
KAFKA_BROKER_ID=1
HOLMES_POSTGRES_PRIMARY_SERVICE_PORT=5432
MODELING_ETSICATALOG_PORT_8806_TCP_ADDR=10.233.31.168
MSB_EAG_SERVICE_PORT=443
CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_HOST=10.233.0.93
DBC_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
AAI_SCHEMA_SERVICE_PORT_5005_TCP_ADDR=10.233.46.36
DMAAP_BC_PORT_8443_TCP_PORT=8443
CDS_SDC_LISTENER_PORT_8080_TCP_ADDR=10.233.22.253
HOLMES_ENGINE_MGMT_PORT_9102_TCP_PORT=9102
AAI_SPARKY_BE_PORT_8000_TCP_ADDR=10.233.25.166
AAI_TRAVERSAL_PORT_5005_TCP_PORT=5005
HOLMES_RULE_MGMT_PORT_9101_TCP=tcp://10.233.54.71:9101
SDC_BE_PORT_8443_TCP_PROTO=tcp
CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_HOST=10.233.10.54
AAF_LOCATE_PORT_8095_TCP_PORT=8095
DBC_POSTGRES_PORT_5432_TCP=tcp://10.233.34.43:5432
POLICY_DROOLS_PDP_PORT_9696_TCP_PORT=9696
AAF_CASS_PORT_9042_TCP_PROTO=tcp
DMAAP_BC_PORT_8443_TCP_ADDR=10.233.50.121
MESSAGE_ROUTER_KAFKA_2_SERVICE_PORT=9091
PORTAL_CASSANDRA_PORT_7199_TCP_PORT=7199
NBI_SERVICE_PORT_API_8443=8443
MULTICLOUD_FRAMEWORK_PORT_9001_TCP=tcp://10.233.60.180:9001
MESSAGE_ROUTER_KAFKA_1_SERVICE_HOST=10.233.24.58
AAI_MODELLOADER_SERVICE_HOST=10.233.58.11
POLICY_API_SERVICE_PORT=6969
POLICY_DISTRIBUTION_SERVICE_HOST=10.233.44.100
DMAAP_BC_SERVICE_PORT=8443
HOLMES_POSTGRES_SERVICE_HOST=10.233.31.215
ONAP_POLICY_MARIADB_METRICS_PORT_9104_TCP_PROTO=tcp
PORTAL_CASSANDRA_PORT_9160_TCP_ADDR=10.233.59.84
A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_PORT=8433
MARIADB_GALERA_PORT=tcp://10.233.19.211:3306
HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_ADDR=10.233.60.48
AAF_SERVICE_PORT_8100_TCP_ADDR=10.233.29.31
KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/secrets/jaas/kafka_server_jaas.conf
AAI_SCHEMA_SERVICE_PORT_8452_TCP_ADDR=10.233.46.36
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_PORT=5701
DBC_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
MULTICLOUD_K8S_SERVICE_HOST=10.233.7.173
AAF_SMS_PORT_10443_TCP=tcp://10.233.33.191:10443
PORTAL_CASSANDRA_SERVICE_PORT=9160
AAI_BABEL_PORT_9516_TCP_ADDR=10.233.23.138
MSB_DISCOVERY_PORT_10081_TCP_ADDR=10.233.14.24
TCP_PGSET_REPLICA_PORT=tcp://10.233.17.3:5432
SDC_ONBOARDING_BE_PORT_8445_TCP_PROTO=tcp
POLICY_XACML_PDP_PORT_6969_TCP_PROTO=tcp
CDS_DB_PORT_3306_TCP_ADDR=10.233.51.197
A1POLICYMANAGEMENT_PORT_8433_TCP=tcp://10.233.2.242:8433
AAF_SERVICE_PORT=tcp://10.233.29.31:8100
POLICY_DISTRIBUTION_PORT=tcp://10.233.44.100:6969
MESSAGE_ROUTER_PORT_3905_TCP=tcp://10.233.19.176:3905
PORTAL_CASSANDRA_PORT_7199_TCP_ADDR=10.233.59.84
CDS_DB_SERVICE_HOST=10.233.51.197
POLICY_API_PORT=tcp://10.233.22.124:6969
MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_PROTO=tcp
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP=tcp://10.233.10.54:8080
DBC_POSTGRES_PORT_5432_TCP_PORT=5432
POLICY_APEX_PDP_SERVICE_PORT_POLICY_APEX_PDP=6969
A1POLICYMANAGEMENT_PORT_8081_TCP=tcp://10.233.2.242:8081
OOF_HAS_API_SERVICE_PORT=8091
CDS_UI_SERVICE_HOST=10.233.4.73
MSB_IAG_SERVICE_HOST=10.233.42.68
TCP_PGSET_PRIMARY_PORT_5432_TCP_PORT=5432
MULTICLOUD_FCAPS_PORT_9011_TCP=tcp://10.233.8.48:9011
SDC_BE_EXTERNAL_PORT_8443_TCP=tcp://10.233.8.133:8443
PORTAL_DB_PORT_3306_TCP_ADDR=10.233.20.183
POLICY_XACML_PDP_SERVICE_PORT=6969
POLICY_PAP_SERVICE_HOST=10.233.15.181
AAF_FS_PORT_8096_TCP=tcp://10.233.19.181:8096
MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_PORT=27017
SDC_BE_PORT_8443_TCP_ADDR=10.233.2.145
ONAP_POLICY_MARIADB_METRICS_PORT_9104_TCP=tcp://10.233.11.156:9104
POLICY_MARIADB_PORT_3306_TCP_PORT=3306
SDC_FE_PORT_9443_TCP=tcp://10.233.34.114:9443
ONAP_CDS_DB_METRICS_PORT_9104_TCP=tcp://10.233.43.42:9104
DMAAP_DR_NODE_PORT_8080_TCP_PROTO=tcp
NBI_PORT_8443_TCP_PORT=8443
CLI_PORT_443_TCP_ADDR=10.233.31.117
SDC_HELM_VALIDATOR_SERVICE_PORT_HTTP=8080
MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_PORT=3905
TCP_PGSET_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
TCP_PGSET_PRIMARY_PORT_5432_TCP_ADDR=10.233.5.218
enableCadi=true
CDS_UI_PORT_3000_TCP_PORT=3000
AAI_TRAVERSAL_PORT_8446_TCP_ADDR=10.233.20.193
PORTAL_WIDGET_PORT_8082_TCP_ADDR=10.233.47.162
CDS_COMMAND_EXECUTOR_PORT_50051_TCP_PORT=50051
AAF_SMS_PORT_10443_TCP_PROTO=tcp
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR=1
MULTICLOUD_K8S_PORT=tcp://10.233.7.173:9015
AAF_SMS_PORT_10443_TCP_ADDR=10.233.33.191
CHART_MUSEUM_PORT_80_TCP_PORT=80
MSB_EAG_SERVICE_PORT_HTTPS_MSB_EAG=443
POLICY_XACML_PDP_PORT_6969_TCP_PORT=6969
SDC_ONBOARDING_BE_PORT_8445_TCP_PORT=8445
DMAAP_DR_PROV_SERVICE_PORT_DR_PROV_PORT2=443
ONAP_POLICY_MARIADB_METRICS_SERVICE_HOST=10.233.11.156
POLICY_CLAMP_CL_K8S_PPNT_PORT_8083_TCP_PROTO=tcp
AAI_SPARKY_BE_PORT_8000_TCP_PORT=8000
CDS_COMMAND_EXECUTOR_PORT_50051_TCP_ADDR=10.233.27.108
AAF_CASS_PORT_7000_TCP_ADDR=10.233.38.219
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_PORT=5701
SHLVL=1
DBC_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
TCP_PGSET_REPLICA_SERVICE_HOST=10.233.17.3
AAF_CASS_SERVICE_PORT_TLS=7001
OOF_HAS_API_PORT=tcp://10.233.35.91:8091
AAF_SMS_DB_SERVICE_PORT=8200
KAFKA_LISTENERS=EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092
HOLMES_ENGINE_MGMT_SERVICE_PORT_HTTPS_REST=9102
MSB_DISCOVERY_PORT=tcp://10.233.14.24:10081
MARIADB_GALERA_PORT_3306_TCP_PORT=3306
OOF_OSDF_PORT_8698_TCP=tcp://10.233.5.226:8698
SDC_BE_EXTERNAL_PORT_8443_TCP_ADDR=10.233.8.133
AAF_CASS_PORT_7001_TCP=tcp://10.233.38.219:7001
HOLMES_RULE_MGMT_SERVICE_PORT_HTTPS_UI=9104
KAFKA_ZOOKEEPER_CONNECT=onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181
KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE=false
AAI_SCHEMA_SERVICE_PORT=tcp://10.233.46.36:8452
AAI_TRAVERSAL_SERVICE_PORT_TCP_5005=5005
KUBERNETES_SERVICE_PORT=443
PORTAL_DB_SERVICE_HOST=10.233.20.183
AAF_CM_PORT_8150_TCP=tcp://10.233.35.113:8150
AAF_OAUTH_SERVICE_PORT=8140
NBI_MONGOHOST_READ_SERVICE_HOST=10.233.14.29
MULTICLOUD_K8S_PORT_9015_TCP_PORT=9015
MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_PORT=9091
POLICY_XACML_PDP_SERVICE_PORT_POLICY_XACML_PDP=6969
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL=PLAIN
MESSAGE_ROUTER_SERVICE_PORT_HTTP_API=3904
CDS_PY_EXECUTOR_SERVICE_PORT_EXECUTOR_GRPC=50052
ROBOT_SERVICE_HOST=10.233.23.175
AAF_LOCATE_SERVICE_HOST=10.233.35.19
PORTAL_CASSANDRA_PORT_7000_TCP_PROTO=tcp
POLICY_APEX_PDP_PORT=tcp://10.233.1.84:6969
DMAAP_DR_NODE_PORT_8080_TCP_ADDR=10.233.19.60
AAI_GRAPHADMIN_PORT_5005_TCP_PROTO=tcp
MESSAGE_ROUTER_EXTERNAL_SERVICE_PORT=3905
CONSUL_SERVER_UI_PORT_8500_TCP_PROTO=tcp
DMAAP_DR_NODE_EXTERNAL_SERVICE_PORT=8443
CDS_SDC_LISTENER_SERVICE_PORT=8080
MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_ADDR=10.233.24.58
POLICY_MARIADB_PORT=tcp://10.233.40.194:3306
POLICY_MARIADB_SERVICE_PORT=3306
AAF_FS_SERVICE_HOST=10.233.19.181
MODELING_ETSICATALOG_SERVICE_PORT_MODELING_ETSICATALOG=8806
CDS_PY_EXECUTOR_PORT_50053_TCP_PORT=50053
MULTICLOUD_FCAPS_SERVICE_HOST=10.233.8.48
AAI_BABEL_PORT=tcp://10.233.23.138:9516
PORTAL_SDK_PORT_8443_TCP=tcp://10.233.48.181:8443
A1POLICYMANAGEMENT_EXTERNAL_SERVICE_HOST=10.233.40.11
DBC_POSTGRES_SERVICE_HOST=10.233.34.43
HOLMES_POSTGRES_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
AAF_SMS_DB_PORT_8200_TCP_PORT=8200
POLICY_CLAMP_CL_K8S_PPNT_SERVICE_PORT_HTTP_API=8083
CLI_PORT_9090_TCP_PORT=9090
CDS_PY_EXECUTOR_PORT_50053_TCP_PROTO=tcp
AAI_GRAPHADMIN_PORT_5005_TCP_ADDR=10.233.2.190
CLI_SERVICE_PORT=443
CHART_MUSEUM_SERVICE_HOST=10.233.35.125
AAF_CM_SERVICE_HOST=10.233.35.113
HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_PROTO=tcp
SDC_BE_PORT_8080_TCP=tcp://10.233.2.145:8080
ROBOT_PORT_443_TCP_ADDR=10.233.23.175
MSB_CONSUL_SERVICE_PORT_HTTP_MSB_CONSUL=8500
AAF_OAUTH_PORT_8140_TCP_ADDR=10.233.3.102
AAI_BABEL_PORT_9516_TCP_PROTO=tcp
MSB_DISCOVERY_PORT_10081_TCP_PROTO=tcp
AAI_RESOURCES_PORT_5005_TCP_ADDR=10.233.23.142
AAI_SERVICE_HOST=10.233.62.159
MESSAGE_ROUTER_PORT_3904_TCP_PROTO=tcp
HOLMES_ENGINE_MGMT_PORT=tcp://10.233.27.207:9102
DMAAP_DR_PROV_PORT_443_TCP_PORT=443
CDS_SDC_LISTENER_PORT_8080_TCP_PROTO=tcp
MODELING_ETSICATALOG_PORT_8806_TCP_PROTO=tcp
OOF_HAS_API_PORT_8091_TCP=tcp://10.233.35.91:8091
SDC_ONBOARDING_BE_PORT=tcp://10.233.63.31:8445
SDC_ONBOARDING_BE_SERVICE_PORT_SDC_ONBOARDING_BE=8445
AAI_SPARKY_BE_PORT_8000_TCP=tcp://10.233.25.166:8000
POLICY_DROOLS_PDP_SERVICE_HOST=10.233.55.134
DBC_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.42.64
CDS_UI_SERVICE_PORT_CDS_UI_3000=3000
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_ADDR=10.233.10.54
DMAAP_DR_NODE_SERVICE_PORT_HTTPS_API=8443
aaf_locate_url=https://aaf-locate.onap:8095
CONSUL_SERVER_UI_PORT_8500_TCP_PORT=8500
MESSAGE_ROUTER_EXTERNAL_SERVICE_PORT_HTTPS_API=3905
OOM_CERT_SERVICE_PORT_8443_TCP_PROTO=tcp
SDC_HELM_VALIDATOR_PORT_8080_TCP=tcp://10.233.6.252:8080
HOLMES_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
AAI_MODELLOADER_PORT_8080_TCP=tcp://10.233.58.11:8080
AAF_SMS_DB_PORT_8200_TCP_PROTO=tcp
MSB_EAG_PORT_443_TCP_PORT=443
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
SDC_BE_SERVICE_PORT_HTTPS_API=8443
PORTAL_APP_SERVICE_PORT_PORTAL_APP4=8443
AAF_FS_PORT_8096_TCP_PROTO=tcp
AAF_SMS_SERVICE_HOST=10.233.33.191
MULTICLOUD_FRAMEWORK_PORT_9001_TCP_PROTO=tcp
AAI_MODELLOADER_PORT_8080_TCP_ADDR=10.233.58.11
MODELING_ETSICATALOG_SERVICE_HOST=10.233.31.168
DMAAP_DR_NODE_SERVICE_PORT_HTTP_API=8080
AAF_SERVICE_PORT_8100_TCP_PORT=8100
HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_PORT=5432
AAI_MODELLOADER_PORT_8080_TCP_PROTO=tcp
KAFKA_LOG_DIRS=/var/lib/kafka/data
POLICY_CLAMP_BE_PORT=tcp://10.233.53.29:8443
HOLMES_RULE_MGMT_PORT_9104_TCP=tcp://10.233.54.71:9104
AAI_RESOURCES_PORT_8447_TCP=tcp://10.233.23.142:8447
POLICY_GUI_SERVICE_HOST=10.233.15.121
MSB_EAG_PORT_443_TCP_ADDR=10.233.22.215
AAF_CASS_PORT_9160_TCP=tcp://10.233.38.219:9160
POLICY_DROOLS_PDP_PORT_6969_TCP_PORT=6969
OOM_CERT_SERVICE_SERVICE_PORT=8443
POLICY_DISTRIBUTION_PORT_6969_TCP_ADDR=10.233.44.100
NBI_MONGOHOST_READ_SERVICE_PORT_MONGO=27017
POLICY_GUI_SERVICE_PORT_POLICY_GUI=2443
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=3
AAF_CM_SERVICE_PORT_API=8150
MSB_EAG_PORT_443_TCP=tcp://10.233.22.215:443
MARIADB_GALERA_SERVICE_PORT=3306
POLICY_API_PORT_6969_TCP_PROTO=tcp
ONAP_POLICY_MARIADB_METRICS_PORT_9104_TCP_ADDR=10.233.11.156
PORTAL_CASSANDRA_PORT_7001_TCP_PORT=7001
CUB_CLASSPATH="/usr/share/java/cp-base-new/*"
DMAAP_BC_PORT_8443_TCP=tcp://10.233.50.121:8443
MESSAGE_ROUTER_SERVICE_HOST=10.233.19.176
PORTAL_APP_PORT_8443_TCP=tcp://10.233.10.179:8443
MULTICLOUD_FCAPS_PORT=tcp://10.233.8.48:9011
PORTAL_CASSANDRA_PORT_7000_TCP_ADDR=10.233.59.84
POLICY_CLAMP_CL_K8S_PPNT_PORT_8083_TCP_PORT=8083
MESSAGE_ROUTER_EXTERNAL_PORT=tcp://10.233.33.99:3905
AAI_SERVICE_PORT=8443
ONAP_CDS_DB_METRICS_PORT=tcp://10.233.43.42:9104
MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_PROTO=tcp
DBC_POSTGRES_PORT_5432_TCP_PROTO=tcp
TCP_PGSET_PRIMARY_PORT_5432_TCP=tcp://10.233.5.218:5432
AAI_GRAPHADMIN_SERVICE_PORT_TCP_5005=5005
MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_PROTO=tcp
MSB_CONSUL_SERVICE_PORT=8500
NBI_PORT_8443_TCP=tcp://10.233.13.60:8443
TCP_PGSET_REPLICA_PORT_5432_TCP_ADDR=10.233.17.3
CLI_PORT_9090_TCP=tcp://10.233.31.117:9090
PORTAL_CASSANDRA_PORT_9042_TCP_ADDR=10.233.59.84
KUBERNETES_SERVICE_HOST=10.233.0.1
CDS_PY_EXECUTOR_SERVICE_HOST=10.233.53.129
MESSAGE_ROUTER_PORT_3904_TCP_ADDR=10.233.19.176
KAFKA_USER=mrkafka
AAF_LOCATE_PORT=tcp://10.233.35.19:8095
CHART_MUSEUM_PORT_80_TCP_PROTO=tcp
CLI_SERVICE_HOST=10.233.31.117
OOF_OSDF_PORT=tcp://10.233.5.226:8698
MSB_IAG_PORT_443_TCP_PORT=443
POLICY_GUI_PORT_2443_TCP_PORT=2443
AAI_SCHEMA_SERVICE_PORT_8452_TCP_PROTO=tcp
POLICY_PAP_PORT_6969_TCP_ADDR=10.233.15.181
A1POLICYMANAGEMENT_PORT_8081_TCP_PORT=8081
PORTAL_CASSANDRA_PORT_7199_TCP_PROTO=tcp
SDC_HELM_VALIDATOR_PORT_8080_TCP_ADDR=10.233.6.252
AAI_TRAVERSAL_PORT_5005_TCP_ADDR=10.233.20.193
POLICY_PAP_PORT_6969_TCP_PORT=6969
MESSAGE_ROUTER_PORT_3904_TCP=tcp://10.233.19.176:3904
MSB_IAG_PORT_443_TCP_PROTO=tcp
POLICY_GUI_PORT_2443_TCP_PROTO=tcp
HOLMES_RULE_MGMT_SERVICE_HOST=10.233.54.71
AAI_GRAPHADMIN_SERVICE_PORT=8449
AAI_SPARKY_BE_SERVICE_HOST=10.233.25.166
AAF_OAUTH_PORT=tcp://10.233.3.102:8140
AAF_CM_PORT_8150_TCP_ADDR=10.233.35.113
HOLMES_ENGINE_MGMT_PORT_9102_TCP_PROTO=tcp
PORTAL_CASSANDRA_PORT_7000_TCP_PORT=7000
MESSAGE_ROUTER_KAFKA_2_SERVICE_HOST=10.233.14.135
SDC_BE_SERVICE_HOST=10.233.2.145
PGSET_PORT=tcp://10.233.39.254:5432
CHART_MUSEUM_PORT_80_TCP=tcp://10.233.35.125:80
AAF_CASS_PORT=tcp://10.233.38.219:7000
HOLMES_RULE_MGMT_PORT_9101_TCP_PROTO=tcp
POLICY_CLAMP_FE_PORT_2443_TCP_ADDR=10.233.18.50
_=/usr/bin/env
===> User
uid=1000(mrkafka) gid=1001(mrkafka) groups=1001(mrkafka),1000(onap)
===> Configuring ...
SASL is enabled.
===> Running preflight checks ... 
===> Check if /var/lib/kafka/data is writable ...
===> Check if Zookeeper is healthy ...
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/share/java/cp-base-new/slf4j-log4j12-1.7.30.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/share/java/cp-base-new/slf4j-simple-1.7.30.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
log4j:WARN No appenders could be found for logger (io.confluent.admin.utils.cli.ZookeeperReadyCommand).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
===> Launching ... 
===> Launching kafka ... 
[2022-03-05 03:03:54,040] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)
[2022-03-05 03:03:54,866] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util)
[2022-03-05 03:03:55,138] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler)
[2022-03-05 03:03:55,146] INFO starting (kafka.server.KafkaServer)
[2022-03-05 03:03:55,156] INFO Connecting to zookeeper on onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 (kafka.server.KafkaServer)
[2022-03-05 03:03:55,189] INFO [ZooKeeperClient Kafka server] Initializing a new session to onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181. (kafka.zookeeper.ZooKeeperClient)
[2022-03-05 03:03:55,199] INFO Client environment:zookeeper.version=3.5.9-83df9301aa5c2a5d284a9940177808c01bc35cef, built on 01/06/2021 20:03 GMT (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,199] INFO Client environment:host.name=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,199] INFO Client environment:java.version=11.0.11 (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,199] INFO Client environment:java.vendor=Azul Systems, Inc. (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,199] INFO Client environment:java.home=/usr/lib/jvm/zulu11-ca (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,199] INFO Client environment:java.class.path=/usr/bin/../share/java/kafka/activation-1.1.1.jar:/usr/bin/../share/java/kafka/aopalliance-repackaged-2.6.1.jar:/usr/bin/../share/java/kafka/argparse4j-0.7.0.jar:/usr/bin/../share/java/kafka/audience-annotations-0.5.0.jar:/usr/bin/../share/java/kafka/commons-cli-1.4.jar:/usr/bin/../share/java/kafka/commons-lang3-3.8.1.jar:/usr/bin/../share/java/kafka/confluent-log4j-1.2.17-cp2.jar:/usr/bin/../share/java/kafka/connect-api-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/connect-basic-auth-extension-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/connect-file-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/connect-json-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/connect-mirror-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/connect-mirror-client-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/connect-runtime-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/connect-transforms-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/hk2-api-2.6.1.jar:/usr/bin/../share/java/kafka/hk2-locator-2.6.1.jar:/usr/bin/../share/java/kafka/hk2-utils-2.6.1.jar:/usr/bin/../share/java/kafka/jackson-annotations-2.10.5.jar:/usr/bin/../share/java/kafka/jackson-core-2.10.5.jar:/usr/bin/../share/java/kafka/jackson-databind-2.10.5.1.jar:/usr/bin/../share/java/kafka/jackson-dataformat-csv-2.10.5.jar:/usr/bin/../share/java/kafka/jackson-datatype-jdk8-2.10.5.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-base-2.10.5.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-json-provider-2.10.5.jar:/usr/bin/../share/java/kafka/jackson-module-jaxb-annotations-2.10.5.jar:/usr/bin/../share/java/kafka/jackson-module-paranamer-2.10.5.jar:/usr/bin/../share/java/kafka/jackson-module-scala_2.13-2.10.5.jar:/usr/bin/../share/java/kafka/jakarta.activation-api-1.2.1.jar:/usr/bin/../share/java/kafka/jakarta.annotation-api-1.3.5.jar:/usr/bin/../share/java/kafka/jakarta.inject-2.6.1.jar:/usr/bin/../share/java/kafka/jakarta.validation-api-2.0.2.jar:/usr/bin/../share/java/kafka/jakarta.ws.rs-api-2.1.6.jar:/usr/bin/../share/java/kafka/jakarta.xml.bind-api-2.3.2.jar:/usr/bin/../share/java/kafka/javassist-3.27.0-GA.jar:/usr/bin/../share/java/kafka/javax.servlet-api-3.1.0.jar:/usr/bin/../share/java/kafka/javax.ws.rs-api-2.1.1.jar:/usr/bin/../share/java/kafka/jaxb-api-2.3.0.jar:/usr/bin/../share/java/kafka/jersey-client-2.34.jar:/usr/bin/../share/java/kafka/jersey-common-2.34.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-2.34.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-core-2.34.jar:/usr/bin/../share/java/kafka/jersey-hk2-2.34.jar:/usr/bin/../share/java/kafka/jersey-server-2.34.jar:/usr/bin/../share/java/kafka/jetty-client-9.4.40.v20210413.jar:/usr/bin/../share/java/kafka/jetty-continuation-9.4.40.v20210413.jar:/usr/bin/../share/java/kafka/jetty-http-9.4.40.v20210413.jar:/usr/bin/../share/java/kafka/jetty-io-9.4.40.v20210413.jar:/usr/bin/../share/java/kafka/jetty-security-9.4.40.v20210413.jar:/usr/bin/../share/java/kafka/jetty-server-9.4.40.v20210413.jar:/usr/bin/../share/java/kafka/jetty-servlet-9.4.40.v20210413.jar:/usr/bin/../share/java/kafka/jetty-servlets-9.4.40.v20210413.jar:/usr/bin/../share/java/kafka/jetty-util-9.4.40.v20210413.jar:/usr/bin/../share/java/kafka/jetty-util-ajax-9.4.40.v20210413.jar:/usr/bin/../share/java/kafka/jline-3.12.1.jar:/usr/bin/../share/java/kafka/jopt-simple-5.0.4.jar:/usr/bin/../share/java/kafka/kafka-clients-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/kafka-log4j-appender-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/kafka-metadata-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/kafka-raft-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/kafka-shell-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-examples-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-scala_2.13-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-test-utils-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/kafka-tools-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/kafka.jar:/usr/bin/../share/java/kafka/kafka_2.13-6.2.0-ccs-javadoc.jar:/usr/bin/../share/java/kafka/kafka_2.13-6.2.0-ccs-sources.jar:/usr/bin/../share/java/kafka/kafka_2.13-6.2.0-ccs-test-sources.jar:/usr/bin/../share/java/kafka/kafka_2.13-6.2.0-ccs-test.jar:/usr/bin/../share/java/kafka/kafka_2.13-6.2.0-ccs.jar:/usr/bin/../share/java/kafka/lz4-java-1.7.1.jar:/usr/bin/../share/java/kafka/maven-artifact-3.8.1.jar:/usr/bin/../share/java/kafka/metrics-core-2.2.0.jar:/usr/bin/../share/java/kafka/netty-buffer-4.1.62.Final.jar:/usr/bin/../share/java/kafka/netty-codec-4.1.62.Final.jar:/usr/bin/../share/java/kafka/netty-common-4.1.62.Final.jar:/usr/bin/../share/java/kafka/netty-handler-4.1.62.Final.jar:/usr/bin/../share/java/kafka/netty-resolver-4.1.62.Final.jar:/usr/bin/../share/java/kafka/netty-transport-4.1.62.Final.jar:/usr/bin/../share/java/kafka/netty-transport-native-epoll-4.1.62.Final.jar:/usr/bin/../share/java/kafka/netty-transport-native-unix-common-4.1.62.Final.jar:/usr/bin/../share/java/kafka/osgi-resource-locator-1.0.3.jar:/usr/bin/../share/java/kafka/paranamer-2.8.jar:/usr/bin/../share/java/kafka/plexus-utils-3.2.1.jar:/usr/bin/../share/java/kafka/reflections-0.9.12.jar:/usr/bin/../share/java/kafka/rocksdbjni-5.18.4.jar:/usr/bin/../share/java/kafka/scala-collection-compat_2.13-2.3.0.jar:/usr/bin/../share/java/kafka/scala-library-2.13.5.jar:/usr/bin/../share/java/kafka/scala-java8-compat_2.13-0.9.1.jar:/usr/bin/../share/java/kafka/scala-logging_2.13-3.9.2.jar:/usr/bin/../share/java/kafka/scala-reflect-2.13.5.jar:/usr/bin/../share/java/kafka/slf4j-api-1.7.30.jar:/usr/bin/../share/java/kafka/slf4j-log4j12-1.7.30.jar:/usr/bin/../share/java/kafka/snappy-java-1.1.8.1.jar:/usr/bin/../share/java/kafka/zookeeper-3.5.9.jar:/usr/bin/../share/java/kafka/zookeeper-jute-3.5.9.jar:/usr/bin/../share/java/kafka/zstd-jni-1.4.9-1.jar:/usr/bin/../share/java/kafka/kafka11aaf.jar:/usr/bin/../share/java/confluent-telemetry/* (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,199] INFO Client environment:java.library.path=/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,199] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,199] INFO Client environment:java.compiler= (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,199] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,199] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,199] INFO Client environment:os.version=4.19.0-17-cloud-amd64 (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,199] INFO Client environment:user.name=mrkafka (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,199] INFO Client environment:user.home=/home/mrkafka (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,199] INFO Client environment:user.dir=/home/mrkafka (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,199] INFO Client environment:os.memory.free=1007MB (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,199] INFO Client environment:os.memory.max=1024MB (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,200] INFO Client environment:os.memory.total=1024MB (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,204] INFO Initiating client connection, connectString=onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@17a1e4ca (org.apache.zookeeper.ZooKeeper)
[2022-03-05 03:03:55,247] INFO jute.maxbuffer value is 4194304 Bytes (org.apache.zookeeper.ClientCnxnSocket)
[2022-03-05 03:03:55,256] INFO zookeeper.request.timeout value is 0. feature enabled= (org.apache.zookeeper.ClientCnxn)
[2022-03-05 03:03:55,264] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)
[2022-03-05 03:03:55,349] INFO Client successfully logged in. (org.apache.zookeeper.Login)
[2022-03-05 03:03:55,359] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient)
[2022-03-05 03:03:55,424] INFO Opening socket connection to server onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.69.205:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn)
[2022-03-05 03:03:55,438] INFO Socket connection established, initiating session, client: /10.233.71.31:38718, server: onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.69.205:2181 (org.apache.zookeeper.ClientCnxn)
[2022-03-05 03:03:55,461] INFO Session establishment complete on server onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.69.205:2181, sessionid = 0x10000745eb40001, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn)
[2022-03-05 03:03:55,467] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient)
[2022-03-05 03:03:56,654] INFO [feature-zk-node-event-process-thread]: Starting (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread)
[2022-03-05 03:03:56,942] INFO Updated cache from existing  to latest FinalizedFeaturesAndEpoch(features=Features{}, epoch=0). (kafka.server.FinalizedFeatureCache)
[2022-03-05 03:03:56,951] INFO Cluster ID = 2Q2HydR3SQ6TDYcY3QXpow (kafka.server.KafkaServer)
[2022-03-05 03:03:56,956] WARN No meta.properties file under dir /var/lib/kafka/data/meta.properties (kafka.server.BrokerMetadataCheckpoint)
[2022-03-05 03:03:57,049] INFO KafkaConfig values: 
	advertised.host.name = null
	advertised.listeners = EXTERNAL_SASL_PLAINTEXT://10.253.0.184:30491,INTERNAL_SASL_PLAINTEXT://:9092
	advertised.port = null
	alter.config.policy.class.name = null
	alter.log.dirs.replication.quota.window.num = 11
	alter.log.dirs.replication.quota.window.size.seconds = 1
	authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer
	auto.create.topics.enable = true
	auto.leader.rebalance.enable = true
	background.threads = 10
	broker.heartbeat.interval.ms = 2000
	broker.id = 1
	broker.id.generation.enable = true
	broker.rack = null
	broker.session.timeout.ms = 9000
	client.quota.callback.class = null
	compression.type = producer
	connection.failed.authentication.delay.ms = 100
	connections.max.idle.ms = 600000
	connections.max.reauth.ms = 0
	control.plane.listener.name = null
	controlled.shutdown.enable = true
	controlled.shutdown.max.retries = 3
	controlled.shutdown.retry.backoff.ms = 5000
	controller.listener.names = null
	controller.quorum.append.linger.ms = 25
	controller.quorum.election.backoff.max.ms = 1000
	controller.quorum.election.timeout.ms = 1000
	controller.quorum.fetch.timeout.ms = 2000
	controller.quorum.request.timeout.ms = 2000
	controller.quorum.retry.backoff.ms = 20
	controller.quorum.voters = []
	controller.quota.window.num = 11
	controller.quota.window.size.seconds = 1
	controller.socket.timeout.ms = 30000
	create.topic.policy.class.name = null
	default.replication.factor = 3
	delegation.token.expiry.check.interval.ms = 3600000
	delegation.token.expiry.time.ms = 86400000
	delegation.token.master.key = null
	delegation.token.max.lifetime.ms = 604800000
	delegation.token.secret.key = null
	delete.records.purgatory.purge.interval.requests = 1
	delete.topic.enable = true
	fetch.max.bytes = 57671680
	fetch.purgatory.purge.interval.requests = 1000
	group.initial.rebalance.delay.ms = 3000
	group.max.session.timeout.ms = 1800000
	group.max.size = 2147483647
	group.min.session.timeout.ms = 6000
	host.name = 
	initial.broker.registration.timeout.ms = 60000
	inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT
	inter.broker.protocol.version = 2.8-IV1
	kafka.metrics.polling.interval.secs = 10
	kafka.metrics.reporters = []
	leader.imbalance.check.interval.seconds = 300
	leader.imbalance.per.broker.percentage = 10
	listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT
	listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092
	log.cleaner.backoff.ms = 15000
	log.cleaner.dedupe.buffer.size = 134217728
	log.cleaner.delete.retention.ms = 86400000
	log.cleaner.enable = true
	log.cleaner.io.buffer.load.factor = 0.9
	log.cleaner.io.buffer.size = 524288
	log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
	log.cleaner.max.compaction.lag.ms = 9223372036854775807
	log.cleaner.min.cleanable.ratio = 0.5
	log.cleaner.min.compaction.lag.ms = 0
	log.cleaner.threads = 1
	log.cleanup.policy = [delete]
	log.dir = /tmp/kafka-logs
	log.dirs = /var/lib/kafka/data
	log.flush.interval.messages = 9223372036854775807
	log.flush.interval.ms = null
	log.flush.offset.checkpoint.interval.ms = 60000
	log.flush.scheduler.interval.ms = 9223372036854775807
	log.flush.start.offset.checkpoint.interval.ms = 60000
	log.index.interval.bytes = 4096
	log.index.size.max.bytes = 10485760
	log.message.downconversion.enable = true
	log.message.format.version = 2.8-IV1
	log.message.timestamp.difference.max.ms = 9223372036854775807
	log.message.timestamp.type = CreateTime
	log.preallocate = false
	log.retention.bytes = -1
	log.retention.check.interval.ms = 300000
	log.retention.hours = 168
	log.retention.minutes = null
	log.retention.ms = null
	log.roll.hours = 168
	log.roll.jitter.hours = 0
	log.roll.jitter.ms = null
	log.roll.ms = null
	log.segment.bytes = 1073741824
	log.segment.delete.delay.ms = 60000
	max.connection.creation.rate = 2147483647
	max.connections = 2147483647
	max.connections.per.ip = 2147483647
	max.connections.per.ip.overrides = 
	max.incremental.fetch.session.cache.slots = 1000
	message.max.bytes = 1048588
	metadata.log.dir = null
	metric.reporters = []
	metrics.num.samples = 2
	metrics.recording.level = INFO
	metrics.sample.window.ms = 30000
	min.insync.replicas = 1
	node.id = -1
	num.io.threads = 8
	num.network.threads = 3
	num.partitions = 3
	num.recovery.threads.per.data.dir = 5
	num.replica.alter.log.dirs.threads = null
	num.replica.fetchers = 1
	offset.metadata.max.bytes = 4096
	offsets.commit.required.acks = -1
	offsets.commit.timeout.ms = 5000
	offsets.load.buffer.size = 5242880
	offsets.retention.check.interval.ms = 600000
	offsets.retention.minutes = 10080
	offsets.topic.compression.codec = 0
	offsets.topic.num.partitions = 50
	offsets.topic.replication.factor = 3
	offsets.topic.segment.bytes = 104857600
	password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
	password.encoder.iterations = 4096
	password.encoder.key.length = 128
	password.encoder.keyfactory.algorithm = null
	password.encoder.old.secret = null
	password.encoder.secret = null
	port = 9092
	principal.builder.class = null
	process.roles = []
	producer.purgatory.purge.interval.requests = 1000
	queued.max.request.bytes = -1
	queued.max.requests = 500
	quota.consumer.default = 9223372036854775807
	quota.producer.default = 9223372036854775807
	quota.window.num = 11
	quota.window.size.seconds = 1
	replica.fetch.backoff.ms = 1000
	replica.fetch.max.bytes = 1048576
	replica.fetch.min.bytes = 1
	replica.fetch.response.max.bytes = 10485760
	replica.fetch.wait.max.ms = 500
	replica.high.watermark.checkpoint.interval.ms = 5000
	replica.lag.time.max.ms = 30000
	replica.selector.class = null
	replica.socket.receive.buffer.bytes = 65536
	replica.socket.timeout.ms = 30000
	replication.quota.window.num = 11
	replication.quota.window.size.seconds = 1
	request.timeout.ms = 30000
	reserved.broker.max.id = 1000
	sasl.client.callback.handler.class = null
	sasl.enabled.mechanisms = [PLAIN]
	sasl.jaas.config = null
	sasl.kerberos.kinit.cmd = /usr/bin/kinit
	sasl.kerberos.min.time.before.relogin = 60000
	sasl.kerberos.principal.to.local.rules = [DEFAULT]
	sasl.kerberos.service.name = null
	sasl.kerberos.ticket.renew.jitter = 0.05
	sasl.kerberos.ticket.renew.window.factor = 0.8
	sasl.login.callback.handler.class = null
	sasl.login.class = null
	sasl.login.refresh.buffer.seconds = 300
	sasl.login.refresh.min.period.seconds = 60
	sasl.login.refresh.window.factor = 0.8
	sasl.login.refresh.window.jitter = 0.05
	sasl.mechanism.controller.protocol = GSSAPI
	sasl.mechanism.inter.broker.protocol = PLAIN
	sasl.server.callback.handler.class = null
	security.inter.broker.protocol = PLAINTEXT
	security.providers = null
	socket.connection.setup.timeout.max.ms = 30000
	socket.connection.setup.timeout.ms = 10000
	socket.receive.buffer.bytes = 102400
	socket.request.max.bytes = 104857600
	socket.send.buffer.bytes = 102400
	ssl.cipher.suites = []
	ssl.client.auth = none
	ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
	ssl.endpoint.identification.algorithm = https
	ssl.engine.factory.class = null
	ssl.key.password = null
	ssl.keymanager.algorithm = SunX509
	ssl.keystore.certificate.chain = null
	ssl.keystore.key = null
	ssl.keystore.location = null
	ssl.keystore.password = null
	ssl.keystore.type = JKS
	ssl.principal.mapping.rules = DEFAULT
	ssl.protocol = TLSv1.3
	ssl.provider = null
	ssl.secure.random.implementation = null
	ssl.trustmanager.algorithm = PKIX
	ssl.truststore.certificates = null
	ssl.truststore.location = null
	ssl.truststore.password = null
	ssl.truststore.type = JKS
	transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000
	transaction.max.timeout.ms = 900000
	transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
	transaction.state.log.load.buffer.size = 5242880
	transaction.state.log.min.isr = 1
	transaction.state.log.num.partitions = 50
	transaction.state.log.replication.factor = 1
	transaction.state.log.segment.bytes = 104857600
	transactional.id.expiration.ms = 604800000
	unclean.leader.election.enable = false
	zookeeper.clientCnxnSocket = null
	zookeeper.connect = onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181
	zookeeper.connection.timeout.ms = 6000
	zookeeper.max.in.flight.requests = 10
	zookeeper.session.timeout.ms = 18000
	zookeeper.set.acl = true
	zookeeper.ssl.cipher.suites = null
	zookeeper.ssl.client.enable = false
	zookeeper.ssl.crl.enable = false
	zookeeper.ssl.enabled.protocols = null
	zookeeper.ssl.endpoint.identification.algorithm = HTTPS
	zookeeper.ssl.keystore.location = null
	zookeeper.ssl.keystore.password = null
	zookeeper.ssl.keystore.type = null
	zookeeper.ssl.ocsp.enable = false
	zookeeper.ssl.protocol = TLSv1.2
	zookeeper.ssl.truststore.location = null
	zookeeper.ssl.truststore.password = null
	zookeeper.ssl.truststore.type = null
	zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig)
[2022-03-05 03:03:57,064] INFO KafkaConfig values: 
	advertised.host.name = null
	advertised.listeners = EXTERNAL_SASL_PLAINTEXT://10.253.0.184:30491,INTERNAL_SASL_PLAINTEXT://:9092
	advertised.port = null
	alter.config.policy.class.name = null
	alter.log.dirs.replication.quota.window.num = 11
	alter.log.dirs.replication.quota.window.size.seconds = 1
	authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer
	auto.create.topics.enable = true
	auto.leader.rebalance.enable = true
	background.threads = 10
	broker.heartbeat.interval.ms = 2000
	broker.id = 1
	broker.id.generation.enable = true
	broker.rack = null
	broker.session.timeout.ms = 9000
	client.quota.callback.class = null
	compression.type = producer
	connection.failed.authentication.delay.ms = 100
	connections.max.idle.ms = 600000
	connections.max.reauth.ms = 0
	control.plane.listener.name = null
	controlled.shutdown.enable = true
	controlled.shutdown.max.retries = 3
	controlled.shutdown.retry.backoff.ms = 5000
	controller.listener.names = null
	controller.quorum.append.linger.ms = 25
	controller.quorum.election.backoff.max.ms = 1000
	controller.quorum.election.timeout.ms = 1000
	controller.quorum.fetch.timeout.ms = 2000
	controller.quorum.request.timeout.ms = 2000
	controller.quorum.retry.backoff.ms = 20
	controller.quorum.voters = []
	controller.quota.window.num = 11
	controller.quota.window.size.seconds = 1
	controller.socket.timeout.ms = 30000
	create.topic.policy.class.name = null
	default.replication.factor = 3
	delegation.token.expiry.check.interval.ms = 3600000
	delegation.token.expiry.time.ms = 86400000
	delegation.token.master.key = null
	delegation.token.max.lifetime.ms = 604800000
	delegation.token.secret.key = null
	delete.records.purgatory.purge.interval.requests = 1
	delete.topic.enable = true
	fetch.max.bytes = 57671680
	fetch.purgatory.purge.interval.requests = 1000
	group.initial.rebalance.delay.ms = 3000
	group.max.session.timeout.ms = 1800000
	group.max.size = 2147483647
	group.min.session.timeout.ms = 6000
	host.name = 
	initial.broker.registration.timeout.ms = 60000
	inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT
	inter.broker.protocol.version = 2.8-IV1
	kafka.metrics.polling.interval.secs = 10
	kafka.metrics.reporters = []
	leader.imbalance.check.interval.seconds = 300
	leader.imbalance.per.broker.percentage = 10
	listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT
	listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092
	log.cleaner.backoff.ms = 15000
	log.cleaner.dedupe.buffer.size = 134217728
	log.cleaner.delete.retention.ms = 86400000
	log.cleaner.enable = true
	log.cleaner.io.buffer.load.factor = 0.9
	log.cleaner.io.buffer.size = 524288
	log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
	log.cleaner.max.compaction.lag.ms = 9223372036854775807
	log.cleaner.min.cleanable.ratio = 0.5
	log.cleaner.min.compaction.lag.ms = 0
	log.cleaner.threads = 1
	log.cleanup.policy = [delete]
	log.dir = /tmp/kafka-logs
	log.dirs = /var/lib/kafka/data
	log.flush.interval.messages = 9223372036854775807
	log.flush.interval.ms = null
	log.flush.offset.checkpoint.interval.ms = 60000
	log.flush.scheduler.interval.ms = 9223372036854775807
	log.flush.start.offset.checkpoint.interval.ms = 60000
	log.index.interval.bytes = 4096
	log.index.size.max.bytes = 10485760
	log.message.downconversion.enable = true
	log.message.format.version = 2.8-IV1
	log.message.timestamp.difference.max.ms = 9223372036854775807
	log.message.timestamp.type = CreateTime
	log.preallocate = false
	log.retention.bytes = -1
	log.retention.check.interval.ms = 300000
	log.retention.hours = 168
	log.retention.minutes = null
	log.retention.ms = null
	log.roll.hours = 168
	log.roll.jitter.hours = 0
	log.roll.jitter.ms = null
	log.roll.ms = null
	log.segment.bytes = 1073741824
	log.segment.delete.delay.ms = 60000
	max.connection.creation.rate = 2147483647
	max.connections = 2147483647
	max.connections.per.ip = 2147483647
	max.connections.per.ip.overrides = 
	max.incremental.fetch.session.cache.slots = 1000
	message.max.bytes = 1048588
	metadata.log.dir = null
	metric.reporters = []
	metrics.num.samples = 2
	metrics.recording.level = INFO
	metrics.sample.window.ms = 30000
	min.insync.replicas = 1
	node.id = -1
	num.io.threads = 8
	num.network.threads = 3
	num.partitions = 3
	num.recovery.threads.per.data.dir = 5
	num.replica.alter.log.dirs.threads = null
	num.replica.fetchers = 1
	offset.metadata.max.bytes = 4096
	offsets.commit.required.acks = -1
	offsets.commit.timeout.ms = 5000
	offsets.load.buffer.size = 5242880
	offsets.retention.check.interval.ms = 600000
	offsets.retention.minutes = 10080
	offsets.topic.compression.codec = 0
	offsets.topic.num.partitions = 50
	offsets.topic.replication.factor = 3
	offsets.topic.segment.bytes = 104857600
	password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
	password.encoder.iterations = 4096
	password.encoder.key.length = 128
	password.encoder.keyfactory.algorithm = null
	password.encoder.old.secret = null
	password.encoder.secret = null
	port = 9092
	principal.builder.class = null
	process.roles = []
	producer.purgatory.purge.interval.requests = 1000
	queued.max.request.bytes = -1
	queued.max.requests = 500
	quota.consumer.default = 9223372036854775807
	quota.producer.default = 9223372036854775807
	quota.window.num = 11
	quota.window.size.seconds = 1
	replica.fetch.backoff.ms = 1000
	replica.fetch.max.bytes = 1048576
	replica.fetch.min.bytes = 1
	replica.fetch.response.max.bytes = 10485760
	replica.fetch.wait.max.ms = 500
	replica.high.watermark.checkpoint.interval.ms = 5000
	replica.lag.time.max.ms = 30000
	replica.selector.class = null
	replica.socket.receive.buffer.bytes = 65536
	replica.socket.timeout.ms = 30000
	replication.quota.window.num = 11
	replication.quota.window.size.seconds = 1
	request.timeout.ms = 30000
	reserved.broker.max.id = 1000
	sasl.client.callback.handler.class = null
	sasl.enabled.mechanisms = [PLAIN]
	sasl.jaas.config = null
	sasl.kerberos.kinit.cmd = /usr/bin/kinit
	sasl.kerberos.min.time.before.relogin = 60000
	sasl.kerberos.principal.to.local.rules = [DEFAULT]
	sasl.kerberos.service.name = null
	sasl.kerberos.ticket.renew.jitter = 0.05
	sasl.kerberos.ticket.renew.window.factor = 0.8
	sasl.login.callback.handler.class = null
	sasl.login.class = null
	sasl.login.refresh.buffer.seconds = 300
	sasl.login.refresh.min.period.seconds = 60
	sasl.login.refresh.window.factor = 0.8
	sasl.login.refresh.window.jitter = 0.05
	sasl.mechanism.controller.protocol = GSSAPI
	sasl.mechanism.inter.broker.protocol = PLAIN
	sasl.server.callback.handler.class = null
	security.inter.broker.protocol = PLAINTEXT
	security.providers = null
	socket.connection.setup.timeout.max.ms = 30000
	socket.connection.setup.timeout.ms = 10000
	socket.receive.buffer.bytes = 102400
	socket.request.max.bytes = 104857600
	socket.send.buffer.bytes = 102400
	ssl.cipher.suites = []
	ssl.client.auth = none
	ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
	ssl.endpoint.identification.algorithm = https
	ssl.engine.factory.class = null
	ssl.key.password = null
	ssl.keymanager.algorithm = SunX509
	ssl.keystore.certificate.chain = null
	ssl.keystore.key = null
	ssl.keystore.location = null
	ssl.keystore.password = null
	ssl.keystore.type = JKS
	ssl.principal.mapping.rules = DEFAULT
	ssl.protocol = TLSv1.3
	ssl.provider = null
	ssl.secure.random.implementation = null
	ssl.trustmanager.algorithm = PKIX
	ssl.truststore.certificates = null
	ssl.truststore.location = null
	ssl.truststore.password = null
	ssl.truststore.type = JKS
	transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000
	transaction.max.timeout.ms = 900000
	transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
	transaction.state.log.load.buffer.size = 5242880
	transaction.state.log.min.isr = 1
	transaction.state.log.num.partitions = 50
	transaction.state.log.replication.factor = 1
	transaction.state.log.segment.bytes = 104857600
	transactional.id.expiration.ms = 604800000
	unclean.leader.election.enable = false
	zookeeper.clientCnxnSocket = null
	zookeeper.connect = onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181
	zookeeper.connection.timeout.ms = 6000
	zookeeper.max.in.flight.requests = 10
	zookeeper.session.timeout.ms = 18000
	zookeeper.set.acl = true
	zookeeper.ssl.cipher.suites = null
	zookeeper.ssl.client.enable = false
	zookeeper.ssl.crl.enable = false
	zookeeper.ssl.enabled.protocols = null
	zookeeper.ssl.endpoint.identification.algorithm = HTTPS
	zookeeper.ssl.keystore.location = null
	zookeeper.ssl.keystore.password = null
	zookeeper.ssl.keystore.type = null
	zookeeper.ssl.ocsp.enable = false
	zookeeper.ssl.protocol = TLSv1.2
	zookeeper.ssl.truststore.location = null
	zookeeper.ssl.truststore.password = null
	zookeeper.ssl.truststore.type = null
	zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig)
[2022-03-05 03:03:57,149] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2022-03-05 03:03:57,150] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2022-03-05 03:03:57,152] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2022-03-05 03:03:57,154] INFO [ThrottledChannelReaper-ControllerMutation]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2022-03-05 03:03:57,213] INFO Loading logs from log dirs ArraySeq(/var/lib/kafka/data) (kafka.log.LogManager)
[2022-03-05 03:03:57,218] INFO Attempting recovery for all logs in /var/lib/kafka/data since no clean shutdown file was found (kafka.log.LogManager)
[2022-03-05 03:03:57,230] INFO Loaded 0 logs in 17ms. (kafka.log.LogManager)
[2022-03-05 03:03:57,231] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager)
[2022-03-05 03:03:57,234] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager)
[2022-03-05 03:03:57,255] INFO Starting the log cleaner (kafka.log.LogCleaner)
[2022-03-05 03:03:57,433] INFO [kafka-log-cleaner-thread-0]: Starting (kafka.log.LogCleaner)
[2022-03-05 03:03:58,211] INFO Updated connection-accept-rate max connection creation rate to 2147483647 (kafka.network.ConnectionQuotas)
[2022-03-05 03:03:58,216] INFO Awaiting socket connections on 0.0.0.0:9091. (kafka.network.Acceptor)
[2022-03-05 03:03:58,257] INFO Successfully logged in. (org.apache.kafka.common.security.authenticator.AbstractLogin)
[2022-03-05 03:03:58,300] INFO [SocketServer listenerType=ZK_BROKER, nodeId=1] Created data-plane acceptor and processors for endpoint : ListenerName(EXTERNAL_SASL_PLAINTEXT) (kafka.network.SocketServer)
[2022-03-05 03:03:58,302] INFO Updated connection-accept-rate max connection creation rate to 2147483647 (kafka.network.ConnectionQuotas)
[2022-03-05 03:03:58,302] INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.Acceptor)
[2022-03-05 03:03:58,319] INFO [SocketServer listenerType=ZK_BROKER, nodeId=1] Created data-plane acceptor and processors for endpoint : ListenerName(INTERNAL_SASL_PLAINTEXT) (kafka.network.SocketServer)
[2022-03-05 03:03:58,370] INFO [broker-1-to-controller-send-thread]: Starting (kafka.server.BrokerToControllerRequestThread)
[2022-03-05 03:03:58,417] INFO [ExpirationReaper-1-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2022-03-05 03:03:58,417] INFO [ExpirationReaper-1-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2022-03-05 03:03:58,419] INFO [ExpirationReaper-1-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2022-03-05 03:03:58,420] INFO [ExpirationReaper-1-ElectLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2022-03-05 03:03:58,444] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler)
[2022-03-05 03:03:58,600] INFO Creating /brokers/ids/1 (is it secure? true) (kafka.zk.KafkaZkClient)
[2022-03-05 03:03:58,642] INFO Stat of the created znode at /brokers/ids/1 is: 4294967346,4294967346,1646449438621,1646449438621,1,0,0,72058093842989057,380,0,4294967346
 (kafka.zk.KafkaZkClient)
[2022-03-05 03:03:58,651] INFO Registered broker 1 at path /brokers/ids/1 with addresses: EXTERNAL_SASL_PLAINTEXT://10.253.0.184:30491,INTERNAL_SASL_PLAINTEXT://onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092, czxid (broker epoch): 4294967346 (kafka.zk.KafkaZkClient)
[2022-03-05 03:03:58,807] INFO [GroupCoordinator 1]: Starting up. (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:03:58,823] INFO [ExpirationReaper-1-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2022-03-05 03:03:58,823] INFO [ExpirationReaper-1-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2022-03-05 03:03:58,823] INFO [ControllerEventThread controllerId=1] Starting (kafka.controller.ControllerEventManager$ControllerEventThread)
[2022-03-05 03:03:58,824] INFO [ExpirationReaper-1-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2022-03-05 03:03:58,835] INFO [GroupCoordinator 1]: Startup complete. (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:03:58,862] DEBUG [Controller id=1] Broker 0 has been elected as the controller, so stopping the election process. (kafka.controller.KafkaController)
[2022-03-05 03:03:58,898] INFO [ProducerId Manager 1]: Acquired new producerId block (brokerId:1,blockStartProducerId:1000,blockEndProducerId:1999) by writing to Zk with path version 2 (kafka.coordinator.transaction.ProducerIdManager)
[2022-03-05 03:03:58,900] INFO [TransactionCoordinator id=1] Starting up. (kafka.coordinator.transaction.TransactionCoordinator)
[2022-03-05 03:03:58,931] INFO [TransactionCoordinator id=1] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator)
[2022-03-05 03:03:58,933] INFO [Transaction Marker Channel Manager 1]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager)
[2022-03-05 03:03:58,981] INFO [ExpirationReaper-1-AlterAcls]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2022-03-05 03:03:59,028] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread)
[2022-03-05 03:03:59,058] INFO [SocketServer listenerType=ZK_BROKER, nodeId=1] Starting socket server acceptors and processors (kafka.network.SocketServer)
[2022-03-05 03:03:59,066] INFO [SocketServer listenerType=ZK_BROKER, nodeId=1] Started data-plane acceptor and processor(s) for endpoint : ListenerName(INTERNAL_SASL_PLAINTEXT) (kafka.network.SocketServer)
[2022-03-05 03:03:59,068] INFO [SocketServer listenerType=ZK_BROKER, nodeId=1] Started data-plane acceptor and processor(s) for endpoint : ListenerName(EXTERNAL_SASL_PLAINTEXT) (kafka.network.SocketServer)
[2022-03-05 03:03:59,069] INFO [SocketServer listenerType=ZK_BROKER, nodeId=1] Started socket server acceptors and processors (kafka.network.SocketServer)
[2022-03-05 03:03:59,075] INFO Kafka version: 6.2.0-ccs (org.apache.kafka.common.utils.AppInfoParser)
[2022-03-05 03:03:59,075] INFO Kafka commitId: 1a5755cf9401c84f (org.apache.kafka.common.utils.AppInfoParser)
[2022-03-05 03:03:59,075] INFO Kafka startTimeMs: 1646449439069 (org.apache.kafka.common.utils.AppInfoParser)
[2022-03-05 03:03:59,077] INFO [KafkaServer id=1] started (kafka.server.KafkaServer)
2022-03-05T03:05:29.619+0000 INIT [cadi] https.protocols set by cadi_protocols in CADI Properties
2022-03-05T03:05:29.620+0000 INIT [cadi] jdk.tls.client.protocols set from Default Protocols
2022-03-05T03:05:30.033+0000 INIT [cadi] X509 Chain
  0)
    Subject: C=US, O=ONAP, OU=OSAAF, OU=dmaapmr@mr.dmaap.onap.org:DEV, CN=dmaap-mr
    Issuer : CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
    Expires: Sun Mar 05 03:00:59 GMT 2023
  1)
    Subject: CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
    Issuer : C=US, O=ONAP, OU=OSAAF
    Expires: Thu Aug 17 18:51:37 GMT 2023

2022-03-05T03:05:30.037+0000 INIT [cadi] https.protocols loaded from System Properties
2022-03-05T03:05:30.037+0000 INIT [cadi] jdk.tls.client.protocols loaded from System Properties
2022-03-05T03:05:30.121+0000 INIT [cadi] RegistrationProperty: default_container='oom'
2022-03-05T03:05:30.121+0000 INIT [cadi] RegistrationProperty: public_port='null'
2022-03-05T03:05:30.122+0000 INIT [cadi] RegistrationProperty: hostname='onap-message-router-kafka-1'
2022-03-05T03:05:30.122+0000 INIT [cadi] RegistrationProperty: public_fqdn='mr.dmaap.onap.org'
2022-03-05T03:05:30.122+0000 INIT [cadi] RegistrationProperty: default_name='%NS.%N'
2022-03-05T03:05:30.122+0000 INIT [cadi] RegistrationProperty: latitude='37.78187'
2022-03-05T03:05:30.123+0000 INIT [cadi] RegistrationProperty: longitude='-122.26147'
2022-03-05T03:05:30.123+0000 INIT [cadi] RegistrationProperty: public_hostname(overloaded by )='mr.dmaap.onap.org'
2022-03-05T03:05:30.123+0000 INIT [cadi] RegistrationProperty: default_fqdn='dmaap-mr'
2022-03-05T03:05:30.123+0000 INIT [cadi] RegistrationProperty: default_container_ns='onap'
2022-03-05T03:05:30.662+0000 INIT [cadi] Cleaning Thread initialized with interval of 60000 ms and max objects of 1000
2022-03-05T03:05:30.667+0000 INIT [cadi] Both cadi-oauth jar and Property cadi_oauth2_url is required to initialize OAuth2
[2022-03-05 03:05:30,668] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:05:30,668] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:05:30,726] INFO [broker-1-to-controller-send-thread]: Recorded new controller, from now on will use broker onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092 (id: 0 rack: null) (kafka.server.BrokerToControllerRequestThread)
[2022-03-05 03:14:54,289] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:14:54,289] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:14:54,370] INFO Creating topic POLICY-CLRUNTIME-PARTICIPANT with configuration {} and initial partition assignment HashMap(0 -> ArrayBuffer(2, 0, 1), 1 -> ArrayBuffer(1, 2, 0), 2 -> ArrayBuffer(0, 1, 2)) (kafka.zk.AdminZkClient)
[2022-03-05 03:14:54,893] INFO Creating topic POLICY-PDP-PAP with configuration {} and initial partition assignment HashMap(0 -> ArrayBuffer(2, 1, 0), 1 -> ArrayBuffer(1, 0, 2), 2 -> ArrayBuffer(0, 2, 1)) (kafka.zk.AdminZkClient)
[2022-03-05 03:14:55,164] INFO [Broker id=1] Handling LeaderAndIsr request correlationId 2 from controller 0 for 3 partitions (state.change.logger)
[2022-03-05 03:14:55,168] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=1, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 2 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:14:55,168] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=0, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 2 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:14:55,168] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=2, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 2 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:14:55,208] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 2 from controller 0 epoch 1 starting the become-leader transition for partition POLICY-CLRUNTIME-PARTICIPANT-1 (state.change.logger)
[2022-03-05 03:14:55,210] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(POLICY-CLRUNTIME-PARTICIPANT-1) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:14:55,217] INFO [Broker id=1] Stopped fetchers as part of LeaderAndIsr request correlationId 2 from controller 0 epoch 1 as part of the become-leader transition for 1 partitions (state.change.logger)
[2022-03-05 03:14:55,339] INFO [Log partition=POLICY-CLRUNTIME-PARTICIPANT-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:14:55,346] INFO Created log for partition POLICY-CLRUNTIME-PARTICIPANT-1 in /var/lib/kafka/data/POLICY-CLRUNTIME-PARTICIPANT-1 with properties {} (kafka.log.LogManager)
[2022-03-05 03:14:55,350] INFO [Partition POLICY-CLRUNTIME-PARTICIPANT-1 broker=1] No checkpointed highwatermark is found for partition POLICY-CLRUNTIME-PARTICIPANT-1 (kafka.cluster.Partition)
[2022-03-05 03:14:55,351] INFO [Partition POLICY-CLRUNTIME-PARTICIPANT-1 broker=1] Log loaded for partition POLICY-CLRUNTIME-PARTICIPANT-1 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:14:55,354] INFO [Broker id=1] Leader POLICY-CLRUNTIME-PARTICIPANT-1 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,2,0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:14:55,376] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 2 from controller 0 epoch 1 for the become-leader transition for partition POLICY-CLRUNTIME-PARTICIPANT-1 (state.change.logger)
[2022-03-05 03:14:55,379] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 2 from controller 0 epoch 1 starting the become-follower transition for partition POLICY-CLRUNTIME-PARTICIPANT-2 with leader 0 (state.change.logger)
[2022-03-05 03:14:55,380] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 2 from controller 0 epoch 1 starting the become-follower transition for partition POLICY-CLRUNTIME-PARTICIPANT-0 with leader 2 (state.change.logger)
[2022-03-05 03:14:55,412] INFO [Log partition=POLICY-CLRUNTIME-PARTICIPANT-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:14:55,414] INFO Created log for partition POLICY-CLRUNTIME-PARTICIPANT-2 in /var/lib/kafka/data/POLICY-CLRUNTIME-PARTICIPANT-2 with properties {} (kafka.log.LogManager)
[2022-03-05 03:14:55,414] INFO [Partition POLICY-CLRUNTIME-PARTICIPANT-2 broker=1] No checkpointed highwatermark is found for partition POLICY-CLRUNTIME-PARTICIPANT-2 (kafka.cluster.Partition)
[2022-03-05 03:14:55,414] INFO [Partition POLICY-CLRUNTIME-PARTICIPANT-2 broker=1] Log loaded for partition POLICY-CLRUNTIME-PARTICIPANT-2 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:14:55,415] INFO [Broker id=1] Follower POLICY-CLRUNTIME-PARTICIPANT-2 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:14:55,440] INFO [Log partition=POLICY-CLRUNTIME-PARTICIPANT-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:14:55,442] INFO Created log for partition POLICY-CLRUNTIME-PARTICIPANT-0 in /var/lib/kafka/data/POLICY-CLRUNTIME-PARTICIPANT-0 with properties {} (kafka.log.LogManager)
[2022-03-05 03:14:55,442] INFO [Partition POLICY-CLRUNTIME-PARTICIPANT-0 broker=1] No checkpointed highwatermark is found for partition POLICY-CLRUNTIME-PARTICIPANT-0 (kafka.cluster.Partition)
[2022-03-05 03:14:55,442] INFO [Partition POLICY-CLRUNTIME-PARTICIPANT-0 broker=1] Log loaded for partition POLICY-CLRUNTIME-PARTICIPANT-0 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:14:55,443] INFO [Broker id=1] Follower POLICY-CLRUNTIME-PARTICIPANT-0 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:14:55,444] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions HashSet(POLICY-CLRUNTIME-PARTICIPANT-0, POLICY-CLRUNTIME-PARTICIPANT-2) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:14:55,444] INFO [Broker id=1] Stopped fetchers as part of become-follower request from controller 0 epoch 1 with correlation id 2 for 2 partitions (state.change.logger)
[2022-03-05 03:14:55,482] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Starting (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:14:55,485] INFO [ReplicaFetcherManager on broker 1] Added fetcher to broker 2 for partitions Map(POLICY-CLRUNTIME-PARTICIPANT-0 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0)) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:14:55,488] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition POLICY-CLRUNTIME-PARTICIPANT-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:14:55,489] INFO [Log partition=POLICY-CLRUNTIME-PARTICIPANT-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:14:55,493] INFO [ReplicaFetcherManager on broker 1] Added fetcher to broker 0 for partitions Map(POLICY-CLRUNTIME-PARTICIPANT-2 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0)) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:14:55,493] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Starting (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:14:55,495] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition POLICY-CLRUNTIME-PARTICIPANT-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:14:55,495] INFO [Log partition=POLICY-CLRUNTIME-PARTICIPANT-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:14:55,502] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 2 from controller 0 epoch 1 for the become-follower transition for partition POLICY-CLRUNTIME-PARTICIPANT-2 with leader 0 (state.change.logger)
[2022-03-05 03:14:55,503] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 2 from controller 0 epoch 1 for the become-follower transition for partition POLICY-CLRUNTIME-PARTICIPANT-0 with leader 2 (state.change.logger)
[2022-03-05 03:14:55,538] INFO [Broker id=1] Finished LeaderAndIsr request in 378ms correlationId 2 from controller 0 for 3 partitions (state.change.logger)
[2022-03-05 03:14:55,563] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=1, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition POLICY-CLRUNTIME-PARTICIPANT-1 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 3 (state.change.logger)
[2022-03-05 03:14:55,563] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=0, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition POLICY-CLRUNTIME-PARTICIPANT-0 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 3 (state.change.logger)
[2022-03-05 03:14:55,563] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=2, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition POLICY-CLRUNTIME-PARTICIPANT-2 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 3 (state.change.logger)
[2022-03-05 03:14:55,564] INFO [Broker id=1] Add 3 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 3 (state.change.logger)
[2022-03-05 03:14:55,633] WARN [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Received UNKNOWN_TOPIC_OR_PARTITION from the leader for partition POLICY-CLRUNTIME-PARTICIPANT-2. This error may be returned transiently when the partition is being created or deleted, but it is not expected to persist. (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:14:55,636] WARN [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Received UNKNOWN_TOPIC_OR_PARTITION from the leader for partition POLICY-CLRUNTIME-PARTICIPANT-0. This error may be returned transiently when the partition is being created or deleted, but it is not expected to persist. (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:14:55,845] INFO [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 0 for 3 partitions (state.change.logger)
[2022-03-05 03:14:55,845] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=1, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 4 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:14:55,845] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=2, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 4 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:14:55,845] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=0, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 4 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:14:55,850] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 0 epoch 1 starting the become-leader transition for partition POLICY-PDP-PAP-1 (state.change.logger)
[2022-03-05 03:14:55,852] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(POLICY-PDP-PAP-1) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:14:55,852] INFO [Broker id=1] Stopped fetchers as part of LeaderAndIsr request correlationId 4 from controller 0 epoch 1 as part of the become-leader transition for 1 partitions (state.change.logger)
[2022-03-05 03:14:55,891] INFO [Log partition=POLICY-PDP-PAP-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:14:55,894] INFO Created log for partition POLICY-PDP-PAP-1 in /var/lib/kafka/data/POLICY-PDP-PAP-1 with properties {} (kafka.log.LogManager)
[2022-03-05 03:14:55,903] INFO [Partition POLICY-PDP-PAP-1 broker=1] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-1 (kafka.cluster.Partition)
[2022-03-05 03:14:55,903] INFO [Partition POLICY-PDP-PAP-1 broker=1] Log loaded for partition POLICY-PDP-PAP-1 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:14:55,903] INFO [Broker id=1] Leader POLICY-PDP-PAP-1 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,0,2] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:14:55,913] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 0 epoch 1 for the become-leader transition for partition POLICY-PDP-PAP-1 (state.change.logger)
[2022-03-05 03:14:55,913] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 0 epoch 1 starting the become-follower transition for partition POLICY-PDP-PAP-0 with leader 2 (state.change.logger)
[2022-03-05 03:14:55,913] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 0 epoch 1 starting the become-follower transition for partition POLICY-PDP-PAP-2 with leader 0 (state.change.logger)
[2022-03-05 03:14:55,947] INFO [Log partition=POLICY-PDP-PAP-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:14:55,952] INFO Created log for partition POLICY-PDP-PAP-0 in /var/lib/kafka/data/POLICY-PDP-PAP-0 with properties {} (kafka.log.LogManager)
[2022-03-05 03:14:55,952] INFO [Partition POLICY-PDP-PAP-0 broker=1] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-0 (kafka.cluster.Partition)
[2022-03-05 03:14:55,952] INFO [Partition POLICY-PDP-PAP-0 broker=1] Log loaded for partition POLICY-PDP-PAP-0 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:14:55,952] INFO [Broker id=1] Follower POLICY-PDP-PAP-0 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:14:56,018] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:14:56,020] INFO Created log for partition POLICY-PDP-PAP-2 in /var/lib/kafka/data/POLICY-PDP-PAP-2 with properties {} (kafka.log.LogManager)
[2022-03-05 03:14:56,020] INFO [Partition POLICY-PDP-PAP-2 broker=1] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-2 (kafka.cluster.Partition)
[2022-03-05 03:14:56,020] INFO [Partition POLICY-PDP-PAP-2 broker=1] Log loaded for partition POLICY-PDP-PAP-2 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:14:56,020] INFO [Broker id=1] Follower POLICY-PDP-PAP-2 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:14:56,021] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions HashSet(POLICY-PDP-PAP-2, POLICY-PDP-PAP-0) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:14:56,025] INFO [Broker id=1] Stopped fetchers as part of become-follower request from controller 0 epoch 1 with correlation id 4 for 2 partitions (state.change.logger)
[2022-03-05 03:14:56,027] INFO [ReplicaFetcherManager on broker 1] Added fetcher to broker 2 for partitions Map(POLICY-PDP-PAP-0 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0)) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:14:56,027] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition POLICY-PDP-PAP-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:14:56,033] INFO [ReplicaFetcherManager on broker 1] Added fetcher to broker 0 for partitions Map(POLICY-PDP-PAP-2 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0)) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:14:56,033] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition POLICY-PDP-PAP-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:14:56,033] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 0 epoch 1 for the become-follower transition for partition POLICY-PDP-PAP-0 with leader 2 (state.change.logger)
[2022-03-05 03:14:56,033] INFO [Log partition=POLICY-PDP-PAP-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:14:56,033] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:14:56,033] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 0 epoch 1 for the become-follower transition for partition POLICY-PDP-PAP-2 with leader 0 (state.change.logger)
[2022-03-05 03:14:56,035] INFO [Broker id=1] Finished LeaderAndIsr request in 191ms correlationId 4 from controller 0 for 3 partitions (state.change.logger)
[2022-03-05 03:14:56,038] WARN [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Received UNKNOWN_TOPIC_OR_PARTITION from the leader for partition POLICY-PDP-PAP-2. This error may be returned transiently when the partition is being created or deleted, but it is not expected to persist. (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:14:56,039] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=1, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition POLICY-PDP-PAP-1 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 5 (state.change.logger)
[2022-03-05 03:14:56,039] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=2, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition POLICY-PDP-PAP-2 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 5 (state.change.logger)
[2022-03-05 03:14:56,039] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=0, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition POLICY-PDP-PAP-0 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 5 (state.change.logger)
[2022-03-05 03:14:56,039] INFO [Broker id=1] Add 3 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 5 (state.change.logger)
[2022-03-05 03:14:56,041] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:14:56,042] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:14:56,042] WARN [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Received UNKNOWN_TOPIC_OR_PARTITION from the leader for partition POLICY-PDP-PAP-0. This error may be returned transiently when the partition is being created or deleted, but it is not expected to persist. (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:14:56,086] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:14:56,086] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:00,431] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:00,431] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:02,210] INFO [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 for 50 partitions (state.change.logger)
[2022-03-05 03:15:02,211] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=13, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,211] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=46, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,211] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=9, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,211] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=42, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,211] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=21, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,211] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=17, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,211] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=30, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,212] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=26, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,212] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=5, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,212] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=38, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,212] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=1, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,212] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=34, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,212] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=16, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,212] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=45, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,212] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=12, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,212] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=41, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,212] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=24, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,212] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=20, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,220] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=49, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,220] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=0, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,220] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=29, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,220] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=25, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,221] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=8, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,221] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=37, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,221] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=4, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,222] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=33, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,222] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=15, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,222] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=48, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,222] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=11, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,222] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=44, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,222] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=23, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,222] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=19, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,222] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=32, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,222] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=28, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,222] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=7, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,223] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=40, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,223] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=3, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,223] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=36, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,223] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=47, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,223] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=14, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,223] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=43, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,223] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=10, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,223] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=22, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,223] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=18, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,223] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=31, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,223] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=27, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,223] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=39, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,224] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=6, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,224] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=35, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,224] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=2, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 6 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:02,328] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-37 (state.change.logger)
[2022-03-05 03:15:02,328] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-7 (state.change.logger)
[2022-03-05 03:15:02,328] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-22 (state.change.logger)
[2022-03-05 03:15:02,328] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-10 (state.change.logger)
[2022-03-05 03:15:02,328] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-31 (state.change.logger)
[2022-03-05 03:15:02,328] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-46 (state.change.logger)
[2022-03-05 03:15:02,328] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-1 (state.change.logger)
[2022-03-05 03:15:02,328] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-16 (state.change.logger)
[2022-03-05 03:15:02,328] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-19 (state.change.logger)
[2022-03-05 03:15:02,328] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-34 (state.change.logger)
[2022-03-05 03:15:02,328] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-4 (state.change.logger)
[2022-03-05 03:15:02,328] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-25 (state.change.logger)
[2022-03-05 03:15:02,328] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-40 (state.change.logger)
[2022-03-05 03:15:02,328] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-43 (state.change.logger)
[2022-03-05 03:15:02,329] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-13 (state.change.logger)
[2022-03-05 03:15:02,329] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-28 (state.change.logger)
[2022-03-05 03:15:02,329] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-leader transition for partition __consumer_offsets-49 (state.change.logger)
[2022-03-05 03:15:02,330] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions HashSet(__consumer_offsets-22, __consumer_offsets-4, __consumer_offsets-25, __consumer_offsets-49, __consumer_offsets-31, __consumer_offsets-37, __consumer_offsets-19, __consumer_offsets-13, __consumer_offsets-43, __consumer_offsets-1, __consumer_offsets-34, __consumer_offsets-7, __consumer_offsets-46, __consumer_offsets-16, __consumer_offsets-28, __consumer_offsets-10, __consumer_offsets-40) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:15:02,331] INFO [Broker id=1] Stopped fetchers as part of LeaderAndIsr request correlationId 6 from controller 0 epoch 1 as part of the become-leader transition for 17 partitions (state.change.logger)
[2022-03-05 03:15:02,360] INFO [Log partition=__consumer_offsets-37, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:02,362] INFO Created log for partition __consumer_offsets-37 in /var/lib/kafka/data/__consumer_offsets-37 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:02,365] INFO [Partition __consumer_offsets-37 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-37 (kafka.cluster.Partition)
[2022-03-05 03:15:02,366] INFO [Partition __consumer_offsets-37 broker=1] Log loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:02,366] INFO [Broker id=1] Leader __consumer_offsets-37 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,2,0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:02,429] INFO [Log partition=__consumer_offsets-7, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:02,431] INFO Created log for partition __consumer_offsets-7 in /var/lib/kafka/data/__consumer_offsets-7 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:02,431] INFO [Partition __consumer_offsets-7 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-7 (kafka.cluster.Partition)
[2022-03-05 03:15:02,431] INFO [Partition __consumer_offsets-7 broker=1] Log loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:02,431] INFO [Broker id=1] Leader __consumer_offsets-7 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,2,0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:02,500] INFO [Log partition=__consumer_offsets-22, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:02,503] INFO Created log for partition __consumer_offsets-22 in /var/lib/kafka/data/__consumer_offsets-22 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:02,503] INFO [Partition __consumer_offsets-22 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-22 (kafka.cluster.Partition)
[2022-03-05 03:15:02,503] INFO [Partition __consumer_offsets-22 broker=1] Log loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:02,503] INFO [Broker id=1] Leader __consumer_offsets-22 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,0,2] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:02,584] INFO [Log partition=__consumer_offsets-10, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:02,587] INFO Created log for partition __consumer_offsets-10 in /var/lib/kafka/data/__consumer_offsets-10 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:02,588] INFO [Partition __consumer_offsets-10 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-10 (kafka.cluster.Partition)
[2022-03-05 03:15:02,588] INFO [Partition __consumer_offsets-10 broker=1] Log loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:02,588] INFO [Broker id=1] Leader __consumer_offsets-10 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,0,2] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:02,699] INFO [Log partition=__consumer_offsets-31, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:02,710] INFO Created log for partition __consumer_offsets-31 in /var/lib/kafka/data/__consumer_offsets-31 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:02,710] INFO [Partition __consumer_offsets-31 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-31 (kafka.cluster.Partition)
[2022-03-05 03:15:02,710] INFO [Partition __consumer_offsets-31 broker=1] Log loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:02,712] INFO [Broker id=1] Leader __consumer_offsets-31 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,2,0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:02,769] INFO [Log partition=__consumer_offsets-46, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:02,773] INFO Created log for partition __consumer_offsets-46 in /var/lib/kafka/data/__consumer_offsets-46 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:02,773] INFO [Partition __consumer_offsets-46 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-46 (kafka.cluster.Partition)
[2022-03-05 03:15:02,773] INFO [Partition __consumer_offsets-46 broker=1] Log loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:02,773] INFO [Broker id=1] Leader __consumer_offsets-46 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,0,2] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:02,791] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:02,792] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:02,842] INFO [Log partition=__consumer_offsets-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:02,844] INFO Created log for partition __consumer_offsets-1 in /var/lib/kafka/data/__consumer_offsets-1 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:02,844] INFO [Partition __consumer_offsets-1 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-1 (kafka.cluster.Partition)
[2022-03-05 03:15:02,845] INFO [Partition __consumer_offsets-1 broker=1] Log loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:02,845] INFO [Broker id=1] Leader __consumer_offsets-1 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,2,0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:02,885] INFO [Log partition=__consumer_offsets-16, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:02,887] INFO Created log for partition __consumer_offsets-16 in /var/lib/kafka/data/__consumer_offsets-16 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:02,887] INFO [Partition __consumer_offsets-16 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-16 (kafka.cluster.Partition)
[2022-03-05 03:15:02,887] INFO [Partition __consumer_offsets-16 broker=1] Log loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:02,888] INFO [Broker id=1] Leader __consumer_offsets-16 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,0,2] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:02,925] INFO [Log partition=__consumer_offsets-19, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:02,930] INFO Created log for partition __consumer_offsets-19 in /var/lib/kafka/data/__consumer_offsets-19 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:02,930] INFO [Partition __consumer_offsets-19 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-19 (kafka.cluster.Partition)
[2022-03-05 03:15:02,931] INFO [Partition __consumer_offsets-19 broker=1] Log loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:02,931] INFO [Broker id=1] Leader __consumer_offsets-19 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,2,0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:02,960] INFO [Log partition=__consumer_offsets-34, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:02,962] INFO Created log for partition __consumer_offsets-34 in /var/lib/kafka/data/__consumer_offsets-34 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:02,963] INFO [Partition __consumer_offsets-34 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-34 (kafka.cluster.Partition)
[2022-03-05 03:15:02,963] INFO [Partition __consumer_offsets-34 broker=1] Log loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:02,963] INFO [Broker id=1] Leader __consumer_offsets-34 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,0,2] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:02,994] INFO [Log partition=__consumer_offsets-4, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:02,999] INFO Created log for partition __consumer_offsets-4 in /var/lib/kafka/data/__consumer_offsets-4 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:02,999] INFO [Partition __consumer_offsets-4 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-4 (kafka.cluster.Partition)
[2022-03-05 03:15:02,999] INFO [Partition __consumer_offsets-4 broker=1] Log loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:02,999] INFO [Broker id=1] Leader __consumer_offsets-4 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,0,2] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,042] INFO [Log partition=__consumer_offsets-25, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,045] INFO Created log for partition __consumer_offsets-25 in /var/lib/kafka/data/__consumer_offsets-25 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,045] INFO [Partition __consumer_offsets-25 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-25 (kafka.cluster.Partition)
[2022-03-05 03:15:03,045] INFO [Partition __consumer_offsets-25 broker=1] Log loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,045] INFO [Broker id=1] Leader __consumer_offsets-25 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,2,0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,071] INFO [Log partition=__consumer_offsets-40, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,073] INFO Created log for partition __consumer_offsets-40 in /var/lib/kafka/data/__consumer_offsets-40 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,073] INFO [Partition __consumer_offsets-40 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-40 (kafka.cluster.Partition)
[2022-03-05 03:15:03,074] INFO [Partition __consumer_offsets-40 broker=1] Log loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,074] INFO [Broker id=1] Leader __consumer_offsets-40 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,0,2] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,110] INFO [Log partition=__consumer_offsets-43, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,112] INFO Created log for partition __consumer_offsets-43 in /var/lib/kafka/data/__consumer_offsets-43 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,113] INFO [Partition __consumer_offsets-43 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-43 (kafka.cluster.Partition)
[2022-03-05 03:15:03,113] INFO [Partition __consumer_offsets-43 broker=1] Log loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,113] INFO [Broker id=1] Leader __consumer_offsets-43 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,2,0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,146] INFO [Log partition=__consumer_offsets-13, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,150] INFO Created log for partition __consumer_offsets-13 in /var/lib/kafka/data/__consumer_offsets-13 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,150] INFO [Partition __consumer_offsets-13 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-13 (kafka.cluster.Partition)
[2022-03-05 03:15:03,150] INFO [Partition __consumer_offsets-13 broker=1] Log loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,150] INFO [Broker id=1] Leader __consumer_offsets-13 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,2,0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,244] INFO [Log partition=__consumer_offsets-28, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,247] INFO Created log for partition __consumer_offsets-28 in /var/lib/kafka/data/__consumer_offsets-28 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,247] INFO [Partition __consumer_offsets-28 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-28 (kafka.cluster.Partition)
[2022-03-05 03:15:03,247] INFO [Partition __consumer_offsets-28 broker=1] Log loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,248] INFO [Broker id=1] Leader __consumer_offsets-28 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,0,2] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,322] INFO [Log partition=__consumer_offsets-49, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,324] INFO Created log for partition __consumer_offsets-49 in /var/lib/kafka/data/__consumer_offsets-49 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,324] INFO [Partition __consumer_offsets-49 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-49 (kafka.cluster.Partition)
[2022-03-05 03:15:03,325] INFO [Partition __consumer_offsets-49 broker=1] Log loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,325] INFO [Broker id=1] Leader __consumer_offsets-49 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,2,0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,330] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-37 (state.change.logger)
[2022-03-05 03:15:03,330] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-7 (state.change.logger)
[2022-03-05 03:15:03,330] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-22 (state.change.logger)
[2022-03-05 03:15:03,330] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-10 (state.change.logger)
[2022-03-05 03:15:03,330] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-31 (state.change.logger)
[2022-03-05 03:15:03,330] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-46 (state.change.logger)
[2022-03-05 03:15:03,330] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-1 (state.change.logger)
[2022-03-05 03:15:03,330] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-16 (state.change.logger)
[2022-03-05 03:15:03,331] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-19 (state.change.logger)
[2022-03-05 03:15:03,331] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-34 (state.change.logger)
[2022-03-05 03:15:03,331] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-4 (state.change.logger)
[2022-03-05 03:15:03,331] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-25 (state.change.logger)
[2022-03-05 03:15:03,331] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-40 (state.change.logger)
[2022-03-05 03:15:03,331] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-43 (state.change.logger)
[2022-03-05 03:15:03,331] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-13 (state.change.logger)
[2022-03-05 03:15:03,331] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-28 (state.change.logger)
[2022-03-05 03:15:03,331] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-leader transition for partition __consumer_offsets-49 (state.change.logger)
[2022-03-05 03:15:03,331] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-3 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,331] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-18 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,331] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-41 with leader 0 (state.change.logger)
[2022-03-05 03:15:03,331] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-29 with leader 0 (state.change.logger)
[2022-03-05 03:15:03,332] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-44 with leader 0 (state.change.logger)
[2022-03-05 03:15:03,332] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-14 with leader 0 (state.change.logger)
[2022-03-05 03:15:03,332] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-33 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,332] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-48 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,332] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-23 with leader 0 (state.change.logger)
[2022-03-05 03:15:03,332] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-38 with leader 0 (state.change.logger)
[2022-03-05 03:15:03,332] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-8 with leader 0 (state.change.logger)
[2022-03-05 03:15:03,333] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-11 with leader 0 (state.change.logger)
[2022-03-05 03:15:03,333] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-26 with leader 0 (state.change.logger)
[2022-03-05 03:15:03,333] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-45 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,333] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-15 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,333] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-30 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,333] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-0 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,333] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-35 with leader 0 (state.change.logger)
[2022-03-05 03:15:03,333] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-5 with leader 0 (state.change.logger)
[2022-03-05 03:15:03,334] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-20 with leader 0 (state.change.logger)
[2022-03-05 03:15:03,334] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-39 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,334] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-9 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,334] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-24 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,334] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-27 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,334] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-42 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,334] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-12 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,334] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-2 with leader 0 (state.change.logger)
[2022-03-05 03:15:03,335] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-21 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,335] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-36 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,335] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-6 with leader 2 (state.change.logger)
[2022-03-05 03:15:03,335] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-47 with leader 0 (state.change.logger)
[2022-03-05 03:15:03,335] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-17 with leader 0 (state.change.logger)
[2022-03-05 03:15:03,335] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 0 epoch 1 starting the become-follower transition for partition __consumer_offsets-32 with leader 0 (state.change.logger)
[2022-03-05 03:15:03,369] INFO [Log partition=__consumer_offsets-3, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,370] INFO Created log for partition __consumer_offsets-3 in /var/lib/kafka/data/__consumer_offsets-3 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,371] INFO [Partition __consumer_offsets-3 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-3 (kafka.cluster.Partition)
[2022-03-05 03:15:03,371] INFO [Partition __consumer_offsets-3 broker=1] Log loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,371] INFO [Broker id=1] Follower __consumer_offsets-3 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,415] INFO [Log partition=__consumer_offsets-18, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,417] INFO Created log for partition __consumer_offsets-18 in /var/lib/kafka/data/__consumer_offsets-18 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,417] INFO [Partition __consumer_offsets-18 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-18 (kafka.cluster.Partition)
[2022-03-05 03:15:03,417] INFO [Partition __consumer_offsets-18 broker=1] Log loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,417] INFO [Broker id=1] Follower __consumer_offsets-18 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,440] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,442] INFO Created log for partition __consumer_offsets-41 in /var/lib/kafka/data/__consumer_offsets-41 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,442] INFO [Partition __consumer_offsets-41 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-41 (kafka.cluster.Partition)
[2022-03-05 03:15:03,442] INFO [Partition __consumer_offsets-41 broker=1] Log loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,442] INFO [Broker id=1] Follower __consumer_offsets-41 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,479] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,481] INFO Created log for partition __consumer_offsets-29 in /var/lib/kafka/data/__consumer_offsets-29 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,481] INFO [Partition __consumer_offsets-29 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-29 (kafka.cluster.Partition)
[2022-03-05 03:15:03,481] INFO [Partition __consumer_offsets-29 broker=1] Log loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,481] INFO [Broker id=1] Follower __consumer_offsets-29 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,515] INFO [Log partition=__consumer_offsets-44, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,517] INFO Created log for partition __consumer_offsets-44 in /var/lib/kafka/data/__consumer_offsets-44 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,517] INFO [Partition __consumer_offsets-44 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-44 (kafka.cluster.Partition)
[2022-03-05 03:15:03,517] INFO [Partition __consumer_offsets-44 broker=1] Log loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,517] INFO [Broker id=1] Follower __consumer_offsets-44 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,547] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,550] INFO Created log for partition __consumer_offsets-14 in /var/lib/kafka/data/__consumer_offsets-14 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,550] INFO [Partition __consumer_offsets-14 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-14 (kafka.cluster.Partition)
[2022-03-05 03:15:03,550] INFO [Partition __consumer_offsets-14 broker=1] Log loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,550] INFO [Broker id=1] Follower __consumer_offsets-14 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,579] INFO [Log partition=__consumer_offsets-33, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,581] INFO Created log for partition __consumer_offsets-33 in /var/lib/kafka/data/__consumer_offsets-33 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,582] INFO [Partition __consumer_offsets-33 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-33 (kafka.cluster.Partition)
[2022-03-05 03:15:03,582] INFO [Partition __consumer_offsets-33 broker=1] Log loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,582] INFO [Broker id=1] Follower __consumer_offsets-33 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,619] INFO [Log partition=__consumer_offsets-48, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,622] INFO Created log for partition __consumer_offsets-48 in /var/lib/kafka/data/__consumer_offsets-48 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,622] INFO [Partition __consumer_offsets-48 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-48 (kafka.cluster.Partition)
[2022-03-05 03:15:03,622] INFO [Partition __consumer_offsets-48 broker=1] Log loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,622] INFO [Broker id=1] Follower __consumer_offsets-48 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,651] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,653] INFO Created log for partition __consumer_offsets-23 in /var/lib/kafka/data/__consumer_offsets-23 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,653] INFO [Partition __consumer_offsets-23 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-23 (kafka.cluster.Partition)
[2022-03-05 03:15:03,653] INFO [Partition __consumer_offsets-23 broker=1] Log loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,654] INFO [Broker id=1] Follower __consumer_offsets-23 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,688] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,690] INFO Created log for partition __consumer_offsets-38 in /var/lib/kafka/data/__consumer_offsets-38 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,690] INFO [Partition __consumer_offsets-38 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-38 (kafka.cluster.Partition)
[2022-03-05 03:15:03,690] INFO [Partition __consumer_offsets-38 broker=1] Log loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,690] INFO [Broker id=1] Follower __consumer_offsets-38 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,769] INFO [Log partition=__consumer_offsets-8, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,773] INFO Created log for partition __consumer_offsets-8 in /var/lib/kafka/data/__consumer_offsets-8 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,774] INFO [Partition __consumer_offsets-8 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-8 (kafka.cluster.Partition)
[2022-03-05 03:15:03,774] INFO [Partition __consumer_offsets-8 broker=1] Log loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,774] INFO [Broker id=1] Follower __consumer_offsets-8 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,833] INFO [Log partition=__consumer_offsets-11, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,835] INFO Created log for partition __consumer_offsets-11 in /var/lib/kafka/data/__consumer_offsets-11 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,835] INFO [Partition __consumer_offsets-11 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-11 (kafka.cluster.Partition)
[2022-03-05 03:15:03,835] INFO [Partition __consumer_offsets-11 broker=1] Log loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,835] INFO [Broker id=1] Follower __consumer_offsets-11 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,866] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,868] INFO Created log for partition __consumer_offsets-26 in /var/lib/kafka/data/__consumer_offsets-26 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,868] INFO [Partition __consumer_offsets-26 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-26 (kafka.cluster.Partition)
[2022-03-05 03:15:03,869] INFO [Partition __consumer_offsets-26 broker=1] Log loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,869] INFO [Broker id=1] Follower __consumer_offsets-26 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,899] INFO [Log partition=__consumer_offsets-45, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,902] INFO Created log for partition __consumer_offsets-45 in /var/lib/kafka/data/__consumer_offsets-45 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,902] INFO [Partition __consumer_offsets-45 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-45 (kafka.cluster.Partition)
[2022-03-05 03:15:03,902] INFO [Partition __consumer_offsets-45 broker=1] Log loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,902] INFO [Broker id=1] Follower __consumer_offsets-45 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,947] INFO [Log partition=__consumer_offsets-15, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:03,949] INFO Created log for partition __consumer_offsets-15 in /var/lib/kafka/data/__consumer_offsets-15 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:03,949] INFO [Partition __consumer_offsets-15 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-15 (kafka.cluster.Partition)
[2022-03-05 03:15:03,949] INFO [Partition __consumer_offsets-15 broker=1] Log loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:03,955] INFO [Broker id=1] Follower __consumer_offsets-15 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:03,999] INFO [Log partition=__consumer_offsets-30, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,003] INFO Created log for partition __consumer_offsets-30 in /var/lib/kafka/data/__consumer_offsets-30 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,003] INFO [Partition __consumer_offsets-30 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-30 (kafka.cluster.Partition)
[2022-03-05 03:15:04,004] INFO [Partition __consumer_offsets-30 broker=1] Log loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,004] INFO [Broker id=1] Follower __consumer_offsets-30 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,092] INFO [Log partition=__consumer_offsets-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,098] INFO Created log for partition __consumer_offsets-0 in /var/lib/kafka/data/__consumer_offsets-0 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,098] INFO [Partition __consumer_offsets-0 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,098] INFO [Partition __consumer_offsets-0 broker=1] Log loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,098] INFO [Broker id=1] Follower __consumer_offsets-0 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,152] INFO [Log partition=__consumer_offsets-35, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,154] INFO Created log for partition __consumer_offsets-35 in /var/lib/kafka/data/__consumer_offsets-35 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,154] INFO [Partition __consumer_offsets-35 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-35 (kafka.cluster.Partition)
[2022-03-05 03:15:04,154] INFO [Partition __consumer_offsets-35 broker=1] Log loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,155] INFO [Broker id=1] Follower __consumer_offsets-35 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,186] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,188] INFO Created log for partition __consumer_offsets-5 in /var/lib/kafka/data/__consumer_offsets-5 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,188] INFO [Partition __consumer_offsets-5 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-5 (kafka.cluster.Partition)
[2022-03-05 03:15:04,188] INFO [Partition __consumer_offsets-5 broker=1] Log loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,188] INFO [Broker id=1] Follower __consumer_offsets-5 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,219] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,222] INFO Created log for partition __consumer_offsets-20 in /var/lib/kafka/data/__consumer_offsets-20 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,222] INFO [Partition __consumer_offsets-20 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-20 (kafka.cluster.Partition)
[2022-03-05 03:15:04,222] INFO [Partition __consumer_offsets-20 broker=1] Log loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,222] INFO [Broker id=1] Follower __consumer_offsets-20 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,256] INFO [Log partition=__consumer_offsets-39, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,258] INFO Created log for partition __consumer_offsets-39 in /var/lib/kafka/data/__consumer_offsets-39 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,258] INFO [Partition __consumer_offsets-39 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-39 (kafka.cluster.Partition)
[2022-03-05 03:15:04,258] INFO [Partition __consumer_offsets-39 broker=1] Log loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,258] INFO [Broker id=1] Follower __consumer_offsets-39 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,298] INFO [Log partition=__consumer_offsets-9, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,302] INFO Created log for partition __consumer_offsets-9 in /var/lib/kafka/data/__consumer_offsets-9 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,303] INFO [Partition __consumer_offsets-9 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-9 (kafka.cluster.Partition)
[2022-03-05 03:15:04,303] INFO [Partition __consumer_offsets-9 broker=1] Log loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,303] INFO [Broker id=1] Follower __consumer_offsets-9 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,383] INFO [Log partition=__consumer_offsets-24, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,386] INFO Created log for partition __consumer_offsets-24 in /var/lib/kafka/data/__consumer_offsets-24 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,386] INFO [Partition __consumer_offsets-24 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-24 (kafka.cluster.Partition)
[2022-03-05 03:15:04,386] INFO [Partition __consumer_offsets-24 broker=1] Log loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,387] INFO [Broker id=1] Follower __consumer_offsets-24 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,422] INFO [Log partition=__consumer_offsets-27, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,496] INFO Created log for partition __consumer_offsets-27 in /var/lib/kafka/data/__consumer_offsets-27 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,496] INFO [Partition __consumer_offsets-27 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-27 (kafka.cluster.Partition)
[2022-03-05 03:15:04,496] INFO [Partition __consumer_offsets-27 broker=1] Log loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,497] INFO [Broker id=1] Follower __consumer_offsets-27 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,550] INFO [Log partition=__consumer_offsets-42, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,553] INFO Created log for partition __consumer_offsets-42 in /var/lib/kafka/data/__consumer_offsets-42 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,553] INFO [Partition __consumer_offsets-42 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-42 (kafka.cluster.Partition)
[2022-03-05 03:15:04,553] INFO [Partition __consumer_offsets-42 broker=1] Log loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,553] INFO [Broker id=1] Follower __consumer_offsets-42 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,609] INFO [Log partition=__consumer_offsets-12, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,611] INFO Created log for partition __consumer_offsets-12 in /var/lib/kafka/data/__consumer_offsets-12 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,611] INFO [Partition __consumer_offsets-12 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-12 (kafka.cluster.Partition)
[2022-03-05 03:15:04,611] INFO [Partition __consumer_offsets-12 broker=1] Log loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,611] INFO [Broker id=1] Follower __consumer_offsets-12 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,718] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,722] INFO Created log for partition __consumer_offsets-2 in /var/lib/kafka/data/__consumer_offsets-2 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,723] INFO [Partition __consumer_offsets-2 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-2 (kafka.cluster.Partition)
[2022-03-05 03:15:04,723] INFO [Partition __consumer_offsets-2 broker=1] Log loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,723] INFO [Broker id=1] Follower __consumer_offsets-2 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,756] INFO [Log partition=__consumer_offsets-21, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,758] INFO Created log for partition __consumer_offsets-21 in /var/lib/kafka/data/__consumer_offsets-21 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,758] INFO [Partition __consumer_offsets-21 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-21 (kafka.cluster.Partition)
[2022-03-05 03:15:04,758] INFO [Partition __consumer_offsets-21 broker=1] Log loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,758] INFO [Broker id=1] Follower __consumer_offsets-21 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,837] INFO [Log partition=__consumer_offsets-36, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,847] INFO Created log for partition __consumer_offsets-36 in /var/lib/kafka/data/__consumer_offsets-36 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,847] INFO [Partition __consumer_offsets-36 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-36 (kafka.cluster.Partition)
[2022-03-05 03:15:04,847] INFO [Partition __consumer_offsets-36 broker=1] Log loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,847] INFO [Broker id=1] Follower __consumer_offsets-36 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,870] INFO [Log partition=__consumer_offsets-6, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,872] INFO Created log for partition __consumer_offsets-6 in /var/lib/kafka/data/__consumer_offsets-6 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,872] INFO [Partition __consumer_offsets-6 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-6 (kafka.cluster.Partition)
[2022-03-05 03:15:04,872] INFO [Partition __consumer_offsets-6 broker=1] Log loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,872] INFO [Broker id=1] Follower __consumer_offsets-6 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,884] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:04,884] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:04,927] INFO [Log partition=__consumer_offsets-47, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,929] INFO Created log for partition __consumer_offsets-47 in /var/lib/kafka/data/__consumer_offsets-47 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,929] INFO [Partition __consumer_offsets-47 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-47 (kafka.cluster.Partition)
[2022-03-05 03:15:04,929] INFO [Partition __consumer_offsets-47 broker=1] Log loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,929] INFO [Broker id=1] Follower __consumer_offsets-47 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,957] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,959] INFO Created log for partition __consumer_offsets-17 in /var/lib/kafka/data/__consumer_offsets-17 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,959] INFO [Partition __consumer_offsets-17 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-17 (kafka.cluster.Partition)
[2022-03-05 03:15:04,959] INFO [Partition __consumer_offsets-17 broker=1] Log loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,959] INFO [Broker id=1] Follower __consumer_offsets-17 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,992] INFO [Log partition=__consumer_offsets-32, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:04,998] INFO Created log for partition __consumer_offsets-32 in /var/lib/kafka/data/__consumer_offsets-32 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager)
[2022-03-05 03:15:04,998] INFO [Partition __consumer_offsets-32 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-32 (kafka.cluster.Partition)
[2022-03-05 03:15:04,998] INFO [Partition __consumer_offsets-32 broker=1] Log loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:04,998] INFO [Broker id=1] Follower __consumer_offsets-32 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:04,999] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions HashSet(__consumer_offsets-15, __consumer_offsets-48, __consumer_offsets-11, __consumer_offsets-44, __consumer_offsets-9, __consumer_offsets-42, __consumer_offsets-23, __consumer_offsets-21, __consumer_offsets-17, __consumer_offsets-32, __consumer_offsets-30, __consumer_offsets-26, __consumer_offsets-5, __consumer_offsets-38, __consumer_offsets-3, __consumer_offsets-36, __consumer_offsets-47, __consumer_offsets-45, __consumer_offsets-14, __consumer_offsets-12, __consumer_offsets-41, __consumer_offsets-24, __consumer_offsets-20, __consumer_offsets-18, __consumer_offsets-0, __consumer_offsets-29, __consumer_offsets-27, __consumer_offsets-39, __consumer_offsets-8, __consumer_offsets-6, __consumer_offsets-35, __consumer_offsets-33, __consumer_offsets-2) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:15:04,999] INFO [Broker id=1] Stopped fetchers as part of become-follower request from controller 0 epoch 1 with correlation id 6 for 33 partitions (state.change.logger)
[2022-03-05 03:15:05,035] INFO [ReplicaFetcherManager on broker 1] Added fetcher to broker 2 for partitions HashMap(__consumer_offsets-30 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-21 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-33 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-36 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-48 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-6 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-0 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-45 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-27 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-9 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-42 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-3 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-18 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-15 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-24 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-39 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-12 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0)) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:15:05,037] INFO [ReplicaFetcherManager on broker 1] Added fetcher to broker 0 for partitions HashMap(__consumer_offsets-8 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-35 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-47 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-38 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-17 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-11 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-29 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-32 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-41 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-23 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-2 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-14 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-20 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-44 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-5 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0), __consumer_offsets-26 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0)) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:15:05,037] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-3 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,037] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-18 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,037] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-41 with leader 0 (state.change.logger)
[2022-03-05 03:15:05,037] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-29 with leader 0 (state.change.logger)
[2022-03-05 03:15:05,037] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-44 with leader 0 (state.change.logger)
[2022-03-05 03:15:05,037] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-14 with leader 0 (state.change.logger)
[2022-03-05 03:15:05,037] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-33 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,037] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-48 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,037] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-23 with leader 0 (state.change.logger)
[2022-03-05 03:15:05,037] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-38 with leader 0 (state.change.logger)
[2022-03-05 03:15:05,038] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-8 with leader 0 (state.change.logger)
[2022-03-05 03:15:05,038] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-11 with leader 0 (state.change.logger)
[2022-03-05 03:15:05,038] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-26 with leader 0 (state.change.logger)
[2022-03-05 03:15:05,038] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-45 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,038] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-15 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,038] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-30 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,038] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-0 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,038] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-35 with leader 0 (state.change.logger)
[2022-03-05 03:15:05,038] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-5 with leader 0 (state.change.logger)
[2022-03-05 03:15:05,038] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-20 with leader 0 (state.change.logger)
[2022-03-05 03:15:05,038] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-39 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,038] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-9 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,038] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-24 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,038] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-27 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,039] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-42 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,039] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-12 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,039] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-2 with leader 0 (state.change.logger)
[2022-03-05 03:15:05,039] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-21 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,039] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-36 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,039] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-6 with leader 2 (state.change.logger)
[2022-03-05 03:15:05,039] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-47 with leader 0 (state.change.logger)
[2022-03-05 03:15:05,039] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-17 with leader 0 (state.change.logger)
[2022-03-05 03:15:05,039] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 0 epoch 1 for the become-follower transition for partition __consumer_offsets-32 with leader 0 (state.change.logger)
[2022-03-05 03:15:05,041] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 37 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,044] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-37 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,047] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 7 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,047] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-7 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,047] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 22 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,047] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-22 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,047] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 10 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,047] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-10 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,047] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 31 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,047] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-31 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,047] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 46 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,047] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-46 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,047] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 1 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,048] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-1 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,048] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 16 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,048] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-16 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,048] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 19 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,048] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-19 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,048] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 34 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,048] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-34 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,048] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 4 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,048] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-4 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,048] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 25 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,048] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-25 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,049] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 40 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,049] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-40 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,049] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 43 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,049] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-43 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,049] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 13 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,049] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-13 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,049] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 28 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,049] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-28 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,049] INFO [GroupCoordinator 1]: Elected as the group coordinator for partition 49 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,049] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-49 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,049] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 3 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,050] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-3 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,052] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 18 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,052] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-18 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,052] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 41 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,052] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-41 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,052] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 29 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,052] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-29 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,053] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 44 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,053] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-44 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,053] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 14 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,053] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-14 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,053] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 33 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,053] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-33 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,053] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 48 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,053] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-48 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,053] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 23 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,053] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-23 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,053] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 38 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,053] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-38 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,053] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 8 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,053] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-8 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,054] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 11 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,054] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-11 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,054] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 26 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,054] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-26 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,054] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 45 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,054] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-45 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,054] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 15 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,054] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-15 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,054] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 30 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,054] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-30 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,054] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 0 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,054] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-0 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,054] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 35 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,054] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-35 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,054] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 5 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,054] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-5 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,054] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 20 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,054] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-20 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,055] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 39 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,055] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-39 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,055] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 9 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,055] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-9 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,055] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 24 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,055] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-24 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,055] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 27 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,055] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-27 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,055] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-37 in 10 milliseconds, of which 3 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,056] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 42 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,056] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-42 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,056] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 12 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,056] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-12 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,056] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 2 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,056] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-2 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,056] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-7 in 9 milliseconds, of which 9 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,056] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 21 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,056] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-21 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,056] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 36 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,056] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-36 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,057] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 6 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,057] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-6 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,057] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 47 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,057] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-47 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,057] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 17 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,057] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-17 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,057] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 32 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,057] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-32 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,058] INFO [Broker id=1] Finished LeaderAndIsr request in 2847ms correlationId 6 from controller 0 for 50 partitions (state.change.logger)
[2022-03-05 03:15:05,056] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-22 in 9 milliseconds, of which 9 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,059] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-10 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,059] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-31 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,060] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-46 in 13 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,060] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-1 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,060] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-16 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,060] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-19 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,060] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-34 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,061] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-4 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,061] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-25 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,061] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-40 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,062] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-43 in 12 milliseconds, of which 12 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,062] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-13 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,062] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-28 in 13 milliseconds, of which 13 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,063] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=13, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-13 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,070] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=46, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-46 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,070] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=9, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-9 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,070] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=42, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-42 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,070] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=21, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-21 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,070] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-49 in 21 milliseconds, of which 20 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,070] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=17, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-17 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,070] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=30, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-30 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,070] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=26, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-26 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,070] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=5, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-5 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,070] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=38, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-38 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,070] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=1, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-1 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,070] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=34, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-34 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,070] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=16, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-16 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,070] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=45, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-45 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,070] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=12, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-12 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,070] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=41, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-41 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,071] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=24, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-24 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,071] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=20, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-20 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,071] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=49, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-49 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,071] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=0, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-0 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,071] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=29, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-29 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,071] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=25, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-25 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,071] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=8, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-8 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,071] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=37, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-37 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,071] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=4, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-4 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,071] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=33, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-33 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,071] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=15, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-15 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,071] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=48, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-48 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,071] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=11, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-11 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,071] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=44, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-44 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=23, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-23 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=19, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-19 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=32, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-32 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=28, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-28 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=7, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-7 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=40, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-40 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=3, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-3 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=36, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-36 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=47, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-47 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=14, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-14 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=43, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-43 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=10, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-10 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=22, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-22 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=18, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-18 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=31, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-31 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=27, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-27 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=39, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-39 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=6, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-6 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=35, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-35 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,072] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=2, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-2 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,073] INFO [Broker id=1] Add 50 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 7 (state.change.logger)
[2022-03-05 03:15:05,074] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-3. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,074] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-18. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,074] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-41. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,074] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-29. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,074] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-44. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,074] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-14. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,074] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-33. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,074] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-48. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,074] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-23. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,074] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-38. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,074] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-8. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,074] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-11. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,074] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-26. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,074] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-45. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,074] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-15. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-30. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-0. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-35. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-5. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-20. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-39. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-9. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-24. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-27. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-42. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-12. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-2. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-21. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-36. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-6. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-47. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-17. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,075] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-32. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 03:15:05,137] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:05,137] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:05,174] INFO [GroupCoordinator 1]: Preparing to rebalance group policy-pap--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-37) (reason: Adding new member da5a1f8e-f582-4c85-bdae-f48a6f872f7a-d9c672b3-85ce-45df-b533-42af4a1614dd with group instance id None) (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:05,187] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-47 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,188] INFO [Log partition=__consumer_offsets-47, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,188] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-14 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,188] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,188] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-11 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,188] INFO [Log partition=__consumer_offsets-11, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,188] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-44 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,188] INFO [Log partition=__consumer_offsets-44, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,188] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-41 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,189] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,189] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-23 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,189] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,189] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-20 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,189] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,189] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-17 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,189] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,189] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-32 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,189] INFO [Log partition=__consumer_offsets-32, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,189] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-29 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,189] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,189] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-26 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,189] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,190] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-8 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,190] INFO [Log partition=__consumer_offsets-8, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,190] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-5 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,190] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,190] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-38 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,190] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,190] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-35 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,190] INFO [Log partition=__consumer_offsets-35, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,190] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,190] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,250] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-15 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,250] INFO [Log partition=__consumer_offsets-15, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,251] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-48 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,251] INFO [Log partition=__consumer_offsets-48, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,251] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-45 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,251] INFO [Log partition=__consumer_offsets-45, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,251] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-12 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,251] INFO [Log partition=__consumer_offsets-12, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,251] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-9 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,251] INFO [Log partition=__consumer_offsets-9, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,251] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-42 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,251] INFO [Log partition=__consumer_offsets-42, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,251] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-24 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,251] INFO [Log partition=__consumer_offsets-24, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,251] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-21 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,251] INFO [Log partition=__consumer_offsets-21, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,251] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-18 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,252] INFO [Log partition=__consumer_offsets-18, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,252] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,252] INFO [Log partition=__consumer_offsets-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,252] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-30 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,252] INFO [Log partition=__consumer_offsets-30, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,252] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-27 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,252] INFO [Log partition=__consumer_offsets-27, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,252] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-39 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,252] INFO [Log partition=__consumer_offsets-39, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,252] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-6 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,252] INFO [Log partition=__consumer_offsets-6, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,252] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-3 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,252] INFO [Log partition=__consumer_offsets-3, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,252] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-36 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,252] INFO [Log partition=__consumer_offsets-36, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:05,253] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-33 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:05,253] INFO [Log partition=__consumer_offsets-33, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:06,994] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:06,994] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:07,003] INFO [GroupCoordinator 1]: Preparing to rebalance group aa5ba94d-0b7f-4e82-9fb9-4fa1b56bb1f7--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-31) (reason: Adding new member onap-policy-apex-pdp-0-9251d391-2b31-4c21-bcef-4bc21c89ecde with group instance id None) (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:08,192] INFO [GroupCoordinator 1]: Stabilized group policy-pap--POLICY-PDP-PAP generation 1 (__consumer_offsets-37) with 1 members (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:08,208] INFO [GroupCoordinator 1]: Assignment received from leader for group policy-pap--POLICY-PDP-PAP for generation 1. The group has 1 members, 0 of which are static. (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:08,316] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:08,316] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:08,392] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:08,392] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:10,005] INFO [GroupCoordinator 1]: Stabilized group aa5ba94d-0b7f-4e82-9fb9-4fa1b56bb1f7--POLICY-PDP-PAP generation 1 (__consumer_offsets-31) with 1 members (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:10,333] INFO [GroupCoordinator 1]: Assignment received from leader for group aa5ba94d-0b7f-4e82-9fb9-4fa1b56bb1f7--POLICY-PDP-PAP for generation 1. The group has 1 members, 0 of which are static. (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:10,479] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:10,479] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:11,402] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:11,402] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:13,583] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:13,584] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:15,244] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:15,244] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:24,419] INFO [Broker id=1] Handling LeaderAndIsr request correlationId 8 from controller 0 for 3 partitions (state.change.logger)
[2022-03-05 03:15:24,419] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=0, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 8 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:24,419] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=1, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 8 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:24,419] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=2, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 8 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:15:24,422] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 8 from controller 0 epoch 1 starting the become-leader transition for partition POLICY-NOTIFICATION-2 (state.change.logger)
[2022-03-05 03:15:24,422] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(POLICY-NOTIFICATION-2) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:15:24,422] INFO [Broker id=1] Stopped fetchers as part of LeaderAndIsr request correlationId 8 from controller 0 epoch 1 as part of the become-leader transition for 1 partitions (state.change.logger)
[2022-03-05 03:15:24,535] INFO [Log partition=POLICY-NOTIFICATION-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:24,537] INFO Created log for partition POLICY-NOTIFICATION-2 in /var/lib/kafka/data/POLICY-NOTIFICATION-2 with properties {} (kafka.log.LogManager)
[2022-03-05 03:15:24,547] INFO [Partition POLICY-NOTIFICATION-2 broker=1] No checkpointed highwatermark is found for partition POLICY-NOTIFICATION-2 (kafka.cluster.Partition)
[2022-03-05 03:15:24,547] INFO [Partition POLICY-NOTIFICATION-2 broker=1] Log loaded for partition POLICY-NOTIFICATION-2 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:24,547] INFO [Broker id=1] Leader POLICY-NOTIFICATION-2 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1,2,0] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:24,581] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 8 from controller 0 epoch 1 for the become-leader transition for partition POLICY-NOTIFICATION-2 (state.change.logger)
[2022-03-05 03:15:24,581] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 8 from controller 0 epoch 1 starting the become-follower transition for partition POLICY-NOTIFICATION-1 with leader 2 (state.change.logger)
[2022-03-05 03:15:24,581] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 8 from controller 0 epoch 1 starting the become-follower transition for partition POLICY-NOTIFICATION-0 with leader 0 (state.change.logger)
[2022-03-05 03:15:24,638] INFO [Log partition=POLICY-NOTIFICATION-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:24,640] INFO Created log for partition POLICY-NOTIFICATION-1 in /var/lib/kafka/data/POLICY-NOTIFICATION-1 with properties {} (kafka.log.LogManager)
[2022-03-05 03:15:24,641] INFO [Partition POLICY-NOTIFICATION-1 broker=1] No checkpointed highwatermark is found for partition POLICY-NOTIFICATION-1 (kafka.cluster.Partition)
[2022-03-05 03:15:24,641] INFO [Partition POLICY-NOTIFICATION-1 broker=1] Log loaded for partition POLICY-NOTIFICATION-1 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:24,641] INFO [Broker id=1] Follower POLICY-NOTIFICATION-1 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:24,749] INFO [Log partition=POLICY-NOTIFICATION-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:15:24,751] INFO Created log for partition POLICY-NOTIFICATION-0 in /var/lib/kafka/data/POLICY-NOTIFICATION-0 with properties {} (kafka.log.LogManager)
[2022-03-05 03:15:24,751] INFO [Partition POLICY-NOTIFICATION-0 broker=1] No checkpointed highwatermark is found for partition POLICY-NOTIFICATION-0 (kafka.cluster.Partition)
[2022-03-05 03:15:24,751] INFO [Partition POLICY-NOTIFICATION-0 broker=1] Log loaded for partition POLICY-NOTIFICATION-0 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:15:24,751] INFO [Broker id=1] Follower POLICY-NOTIFICATION-0 starts at leader epoch 0 from offset 0 with high watermark 0. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:15:24,752] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions HashSet(POLICY-NOTIFICATION-0, POLICY-NOTIFICATION-1) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:15:24,752] INFO [Broker id=1] Stopped fetchers as part of become-follower request from controller 0 epoch 1 with correlation id 8 for 2 partitions (state.change.logger)
[2022-03-05 03:15:24,753] INFO [ReplicaFetcherManager on broker 1] Added fetcher to broker 2 for partitions Map(POLICY-NOTIFICATION-1 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),0,0)) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:15:24,753] INFO [ReplicaFetcherManager on broker 1] Added fetcher to broker 0 for partitions Map(POLICY-NOTIFICATION-0 -> InitialFetchState(BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092),0,0)) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:15:24,753] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 8 from controller 0 epoch 1 for the become-follower transition for partition POLICY-NOTIFICATION-1 with leader 2 (state.change.logger)
[2022-03-05 03:15:24,753] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 8 from controller 0 epoch 1 for the become-follower transition for partition POLICY-NOTIFICATION-0 with leader 0 (state.change.logger)
[2022-03-05 03:15:24,754] INFO [Broker id=1] Finished LeaderAndIsr request in 335ms correlationId 8 from controller 0 for 3 partitions (state.change.logger)
[2022-03-05 03:15:24,763] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=0, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition POLICY-NOTIFICATION-0 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 9 (state.change.logger)
[2022-03-05 03:15:24,764] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=2, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition POLICY-NOTIFICATION-2 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 9 (state.change.logger)
[2022-03-05 03:15:24,764] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=1, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition POLICY-NOTIFICATION-1 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 9 (state.change.logger)
[2022-03-05 03:15:24,764] INFO [Broker id=1] Add 3 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 9 (state.change.logger)
[2022-03-05 03:15:24,831] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition POLICY-NOTIFICATION-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:24,831] INFO [Log partition=POLICY-NOTIFICATION-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:24,957] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition POLICY-NOTIFICATION-1 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:24,957] INFO [Log partition=POLICY-NOTIFICATION-1, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 03:15:25,084] WARN [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Received UNKNOWN_TOPIC_OR_PARTITION from the leader for partition POLICY-NOTIFICATION-0. This error may be returned transiently when the partition is being created or deleted, but it is not expected to persist. (kafka.server.ReplicaFetcherThread)
[2022-03-05 03:15:32,995] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:32,995] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:33,025] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:33,025] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:33,033] INFO [GroupCoordinator 1]: Preparing to rebalance group edd01df5-4f14-4c88-936e-a0411c65a060--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-31) (reason: Adding new member onap-policy-drools-pdp-0-2ebdf0dd-297d-44e4-8a8d-ccaafb05efcb with group instance id None) (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:36,035] INFO [GroupCoordinator 1]: Stabilized group edd01df5-4f14-4c88-936e-a0411c65a060--POLICY-PDP-PAP generation 1 (__consumer_offsets-31) with 1 members (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:36,038] INFO [GroupCoordinator 1]: Assignment received from leader for group edd01df5-4f14-4c88-936e-a0411c65a060--POLICY-PDP-PAP for generation 1. The group has 1 members, 0 of which are static. (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:15:36,156] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:36,156] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:42,415] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:15:42,415] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:16:55,898] INFO [Broker id=1] Handling LeaderAndIsr request correlationId 10 from controller 0 for 1 partitions (state.change.logger)
[2022-03-05 03:16:55,898] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='org.onap.dmaap.mr.PNF_READY', partitionIndex=1, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 10 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:16:55,900] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 10 from controller 0 epoch 1 starting the become-leader transition for partition org.onap.dmaap.mr.PNF_READY-1 (state.change.logger)
[2022-03-05 03:16:55,900] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(org.onap.dmaap.mr.PNF_READY-1) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:16:55,900] INFO [Broker id=1] Stopped fetchers as part of LeaderAndIsr request correlationId 10 from controller 0 epoch 1 as part of the become-leader transition for 1 partitions (state.change.logger)
[2022-03-05 03:16:55,947] INFO [Log partition=org.onap.dmaap.mr.PNF_READY-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:16:55,949] INFO Created log for partition org.onap.dmaap.mr.PNF_READY-1 in /var/lib/kafka/data/org.onap.dmaap.mr.PNF_READY-1 with properties {} (kafka.log.LogManager)
[2022-03-05 03:16:55,955] INFO [Partition org.onap.dmaap.mr.PNF_READY-1 broker=1] No checkpointed highwatermark is found for partition org.onap.dmaap.mr.PNF_READY-1 (kafka.cluster.Partition)
[2022-03-05 03:16:55,955] INFO [Partition org.onap.dmaap.mr.PNF_READY-1 broker=1] Log loaded for partition org.onap.dmaap.mr.PNF_READY-1 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:16:55,955] INFO [Broker id=1] Leader org.onap.dmaap.mr.PNF_READY-1 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:16:55,963] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 10 from controller 0 epoch 1 for the become-leader transition for partition org.onap.dmaap.mr.PNF_READY-1 (state.change.logger)
[2022-03-05 03:16:55,965] INFO [Broker id=1] Finished LeaderAndIsr request in 67ms correlationId 10 from controller 0 for 1 partitions (state.change.logger)
[2022-03-05 03:16:55,969] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='org.onap.dmaap.mr.PNF_READY', partitionIndex=1, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-1 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 11 (state.change.logger)
[2022-03-05 03:16:55,969] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='org.onap.dmaap.mr.PNF_READY', partitionIndex=0, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-0 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 11 (state.change.logger)
[2022-03-05 03:16:55,969] INFO [Broker id=1] Add 2 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 11 (state.change.logger)
[2022-03-05 03:16:58,138] INFO [Broker id=1] Handling LeaderAndIsr request correlationId 12 from controller 0 for 1 partitions (state.change.logger)
[2022-03-05 03:16:58,138] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='org.onap.dmaap.mr.PNF_REGISTRATION', partitionIndex=1, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 12 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:16:58,139] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 12 from controller 0 epoch 1 starting the become-leader transition for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 (state.change.logger)
[2022-03-05 03:16:58,139] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(org.onap.dmaap.mr.PNF_REGISTRATION-1) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:16:58,140] INFO [Broker id=1] Stopped fetchers as part of LeaderAndIsr request correlationId 12 from controller 0 epoch 1 as part of the become-leader transition for 1 partitions (state.change.logger)
[2022-03-05 03:16:58,167] INFO [Log partition=org.onap.dmaap.mr.PNF_REGISTRATION-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:16:58,169] INFO Created log for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 in /var/lib/kafka/data/org.onap.dmaap.mr.PNF_REGISTRATION-1 with properties {} (kafka.log.LogManager)
[2022-03-05 03:16:58,171] INFO [Partition org.onap.dmaap.mr.PNF_REGISTRATION-1 broker=1] No checkpointed highwatermark is found for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 (kafka.cluster.Partition)
[2022-03-05 03:16:58,171] INFO [Partition org.onap.dmaap.mr.PNF_REGISTRATION-1 broker=1] Log loaded for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:16:58,171] INFO [Broker id=1] Leader org.onap.dmaap.mr.PNF_REGISTRATION-1 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:16:58,178] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 12 from controller 0 epoch 1 for the become-leader transition for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 (state.change.logger)
[2022-03-05 03:16:58,179] INFO [Broker id=1] Finished LeaderAndIsr request in 41ms correlationId 12 from controller 0 for 1 partitions (state.change.logger)
[2022-03-05 03:16:58,183] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='org.onap.dmaap.mr.PNF_REGISTRATION', partitionIndex=0, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 13 (state.change.logger)
[2022-03-05 03:16:58,183] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='org.onap.dmaap.mr.PNF_REGISTRATION', partitionIndex=1, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 13 (state.change.logger)
[2022-03-05 03:16:58,183] INFO [Broker id=1] Add 2 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 13 (state.change.logger)
[2022-03-05 03:16:59,510] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='org.onap.dmaap.mr.mirrormakeragent', partitionIndex=0, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition org.onap.dmaap.mr.mirrormakeragent-0 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 14 (state.change.logger)
[2022-03-05 03:16:59,510] INFO [Broker id=1] Add 1 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 14 (state.change.logger)
[2022-03-05 03:21:50,121] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='SDC-DISTR-NOTIF-TOPIC-AUTO', partitionIndex=0, controllerEpoch=1, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 15 (state.change.logger)
[2022-03-05 03:21:50,121] INFO [Broker id=1] Add 1 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 15 (state.change.logger)
[2022-03-05 03:21:52,506] INFO [Broker id=1] Handling LeaderAndIsr request correlationId 16 from controller 0 for 1 partitions (state.change.logger)
[2022-03-05 03:21:52,506] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='SDC-DISTR-STATUS-TOPIC-AUTO', partitionIndex=0, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], addingReplicas=[], removingReplicas=[], isNew=true) correlation id 16 from controller 0 epoch 1 (state.change.logger)
[2022-03-05 03:21:52,531] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 16 from controller 0 epoch 1 starting the become-leader transition for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (state.change.logger)
[2022-03-05 03:21:52,531] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(SDC-DISTR-STATUS-TOPIC-AUTO-0) (kafka.server.ReplicaFetcherManager)
[2022-03-05 03:21:52,532] INFO [Broker id=1] Stopped fetchers as part of LeaderAndIsr request correlationId 16 from controller 0 epoch 1 as part of the become-leader transition for 1 partitions (state.change.logger)
[2022-03-05 03:21:52,552] INFO [Log partition=SDC-DISTR-STATUS-TOPIC-AUTO-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2022-03-05 03:21:52,554] INFO Created log for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 in /var/lib/kafka/data/SDC-DISTR-STATUS-TOPIC-AUTO-0 with properties {} (kafka.log.LogManager)
[2022-03-05 03:21:52,557] INFO [Partition SDC-DISTR-STATUS-TOPIC-AUTO-0 broker=1] No checkpointed highwatermark is found for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (kafka.cluster.Partition)
[2022-03-05 03:21:52,557] INFO [Partition SDC-DISTR-STATUS-TOPIC-AUTO-0 broker=1] Log loaded for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 with initial high watermark 0 (kafka.cluster.Partition)
[2022-03-05 03:21:52,557] INFO [Broker id=1] Leader SDC-DISTR-STATUS-TOPIC-AUTO-0 starts at leader epoch 0 from offset 0 with high watermark 0 ISR [1] addingReplicas [] removingReplicas []. Previous leader epoch was -1. (state.change.logger)
[2022-03-05 03:21:52,560] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 16 from controller 0 epoch 1 for the become-leader transition for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (state.change.logger)
[2022-03-05 03:21:52,561] INFO [Broker id=1] Finished LeaderAndIsr request in 55ms correlationId 16 from controller 0 for 1 partitions (state.change.logger)
[2022-03-05 03:21:52,564] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='SDC-DISTR-STATUS-TOPIC-AUTO', partitionIndex=0, controllerEpoch=1, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 17 (state.change.logger)
[2022-03-05 03:21:52,564] INFO [Broker id=1] Add 1 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 17 (state.change.logger)
[2022-03-05 03:21:56,902] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:21:56,903] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:23:24,859] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:23:24,859] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:23:31,409] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:23:31,409] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:23:31,418] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:23:31,418] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:23:31,422] INFO [GroupCoordinator 1]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-46) (reason: Adding new member policy-id-f38be3c4-5cf1-4342-a4ca-cdfc1aa24d1c with group instance id None) (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:23:34,424] INFO [GroupCoordinator 1]: Stabilized group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-46) with 1 members (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:23:34,427] INFO [GroupCoordinator 1]: Assignment received from leader for group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1. The group has 1 members, 0 of which are static. (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:24:24,543] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:24:24,544] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:24:29,043] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:24:29,043] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:24:29,056] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:24:29,056] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:24:29,068] INFO [GroupCoordinator 1]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-1) (reason: Adding new member clamp-8aac3310-8058-49d7-8f94-a41ae09df8d9 with group instance id None) (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:24:32,070] INFO [GroupCoordinator 1]: Stabilized group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-1) with 1 members (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:24:32,073] INFO [GroupCoordinator 1]: Assignment received from leader for group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1. The group has 1 members, 0 of which are static. (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:29:31,354] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:29:31,354] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:33:18,445] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:33:18,445] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:35:00,383] INFO [GroupCoordinator 1]: Member[group.instance.id None, member.id onap-policy-apex-pdp-0-9251d391-2b31-4c21-bcef-4bc21c89ecde] in group aa5ba94d-0b7f-4e82-9fb9-4fa1b56bb1f7--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:35:00,385] INFO [GroupCoordinator 1]: Preparing to rebalance group aa5ba94d-0b7f-4e82-9fb9-4fa1b56bb1f7--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-31) (reason: removing member onap-policy-apex-pdp-0-9251d391-2b31-4c21-bcef-4bc21c89ecde on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:35:00,386] INFO [GroupCoordinator 1]: Group aa5ba94d-0b7f-4e82-9fb9-4fa1b56bb1f7--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-31) (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:35:07,762] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:35:07,762] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:35:07,793] INFO [GroupCoordinator 1]: Preparing to rebalance group aa5ba94d-0b7f-4e82-9fb9-4fa1b56bb1f7--POLICY-PDP-PAP in state PreparingRebalance with old generation 2 (__consumer_offsets-31) (reason: Adding new member onap-policy-apex-pdp-0-4effb3e5-eeea-4af3-8d17-d9c38a9fcd97 with group instance id None) (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:35:10,795] INFO [GroupCoordinator 1]: Stabilized group aa5ba94d-0b7f-4e82-9fb9-4fa1b56bb1f7--POLICY-PDP-PAP generation 3 (__consumer_offsets-31) with 1 members (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:35:10,798] INFO [GroupCoordinator 1]: Assignment received from leader for group aa5ba94d-0b7f-4e82-9fb9-4fa1b56bb1f7--POLICY-PDP-PAP for generation 3. The group has 1 members, 0 of which are static. (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 03:35:10,921] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:35:10,922] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:44:31,673] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:44:31,673] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:59:32,003] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 03:59:32,003] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:14:32,243] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:14:32,243] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:16:49,295] WARN Client session timed out, have not heard from server in 12003ms for sessionid 0x10000745eb40001 (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:16:49,298] INFO Client session timed out, have not heard from server in 12003ms for sessionid 0x10000745eb40001, closing socket connection and attempting reconnect (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:16:50,088] INFO Client successfully logged in. (org.apache.zookeeper.Login)
[2022-03-05 04:16:50,089] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient)
[2022-03-05 04:16:50,090] INFO Opening socket connection to server onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.233.73.24:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:16:50,116] INFO Socket connection established, initiating session, client: /10.233.71.31:44652, server: onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.233.73.24:2181 (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:16:53,820] INFO Session establishment complete on server onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.233.73.24:2181, sessionid = 0x10000745eb40001, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:17:24,433] WARN Client session timed out, have not heard from server in 12022ms for sessionid 0x10000745eb40001 (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:17:24,434] INFO Client session timed out, have not heard from server in 12022ms for sessionid 0x10000745eb40001, closing socket connection and attempting reconnect (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:17:27,081] INFO Client successfully logged in. (org.apache.zookeeper.Login)
[2022-03-05 04:17:27,081] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient)
[2022-03-05 04:17:27,082] INFO Opening socket connection to server onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.233.70.129:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:18:26,170] WARN Client session timed out, have not heard from server in 61635ms for sessionid 0x10000745eb40001 (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:18:26,172] INFO Client session timed out, have not heard from server in 61635ms for sessionid 0x10000745eb40001, closing socket connection and attempting reconnect (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:18:45,990] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Error sending fetch request (sessionId=1790841273, epoch=8355) to node 2: (org.apache.kafka.clients.FetchSessionHandler)
java.io.IOException: Connection to 2 was disconnected before the response was read
	at org.apache.kafka.clients.NetworkClientUtils.sendAndReceive(NetworkClientUtils.java:100)
	at kafka.server.ReplicaFetcherBlockingSend.sendRequest(ReplicaFetcherBlockingSend.scala:110)
	at kafka.server.ReplicaFetcherThread.fetchFromLeader(ReplicaFetcherThread.scala:217)
	at kafka.server.AbstractFetcherThread.processFetchRequest(AbstractFetcherThread.scala:317)
	at kafka.server.AbstractFetcherThread.$anonfun$maybeFetch$3(AbstractFetcherThread.scala:141)
	at kafka.server.AbstractFetcherThread.$anonfun$maybeFetch$3$adapted(AbstractFetcherThread.scala:140)
	at scala.Option.foreach(Option.scala:437)
	at kafka.server.AbstractFetcherThread.maybeFetch(AbstractFetcherThread.scala:140)
	at kafka.server.AbstractFetcherThread.doWork(AbstractFetcherThread.scala:123)
	at kafka.utils.ShutdownableThread.run(ShutdownableThread.scala:96)
[2022-03-05 04:18:46,067] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:46,076] WARN [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Error in response for fetch request (type=FetchRequest, replicaId=1, maxWait=500, minBytes=1, maxBytes=10485760, fetchData={}, isolationLevel=READ_UNCOMMITTED, toForget=, metadata=(sessionId=1790841273, epoch=8355), rackId=) (kafka.server.ReplicaFetcherThread)
java.io.IOException: Connection to 2 was disconnected before the response was read
	at org.apache.kafka.clients.NetworkClientUtils.sendAndReceive(NetworkClientUtils.java:100)
	at kafka.server.ReplicaFetcherBlockingSend.sendRequest(ReplicaFetcherBlockingSend.scala:110)
	at kafka.server.ReplicaFetcherThread.fetchFromLeader(ReplicaFetcherThread.scala:217)
	at kafka.server.AbstractFetcherThread.processFetchRequest(AbstractFetcherThread.scala:317)
	at kafka.server.AbstractFetcherThread.$anonfun$maybeFetch$3(AbstractFetcherThread.scala:141)
	at kafka.server.AbstractFetcherThread.$anonfun$maybeFetch$3$adapted(AbstractFetcherThread.scala:140)
	at scala.Option.foreach(Option.scala:437)
	at kafka.server.AbstractFetcherThread.maybeFetch(AbstractFetcherThread.scala:140)
	at kafka.server.AbstractFetcherThread.doWork(AbstractFetcherThread.scala:123)
	at kafka.utils.ShutdownableThread.run(ShutdownableThread.scala:96)
[2022-03-05 04:18:46,077] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:46,090] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:46,090] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:46,090] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:46,090] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:46,434] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:46,435] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:46,435] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:46,435] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:46,434] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:46,435] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:46,438] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:46,438] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:46,648] INFO [GroupCoordinator 1]: Preparing to rebalance group policy-pap--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-37) (reason: Adding new member da5a1f8e-f582-4c85-bdae-f48a6f872f7a-855de3e9-21fe-41cc-905e-da5dd31cc722 with group instance id None) (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:46,648] INFO [GroupCoordinator 1]: Preparing to rebalance group aa5ba94d-0b7f-4e82-9fb9-4fa1b56bb1f7--POLICY-PDP-PAP in state PreparingRebalance with old generation 3 (__consumer_offsets-31) (reason: Adding new member onap-policy-apex-pdp-0-91e3a047-383b-494f-a61b-d4c09b4a0a32 with group instance id None) (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:46,693] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:46,693] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:46,769] INFO [GroupCoordinator 1]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 1 (__consumer_offsets-1) (reason: Adding new member clamp-97444166-dba0-4446-b8f6-453bbc5adbfc with group instance id None) (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:48,767] INFO Client successfully logged in. (org.apache.zookeeper.Login)
[2022-03-05 04:18:48,787] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient)
[2022-03-05 04:18:48,787] INFO Opening socket connection to server onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.69.205:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:18:48,835] INFO Socket connection established, initiating session, client: /10.233.71.31:43442, server: onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.69.205:2181 (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:18:48,931] WARN Unable to reconnect to ZooKeeper service, session 0x10000745eb40001 has expired (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:18:48,931] INFO Unable to reconnect to ZooKeeper service, session 0x10000745eb40001 has expired, closing socket connection (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:18:48,967] INFO EventThread shut down for session: 0x10000745eb40001 (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:18:48,969] INFO [ZooKeeperClient Kafka server] Session expired. (kafka.zookeeper.ZooKeeperClient)
[2022-03-05 04:18:48,976] INFO [ZooKeeperClient Kafka server] Initializing a new session to onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181. (kafka.zookeeper.ZooKeeperClient)
[2022-03-05 04:18:49,005] DEBUG [Controller id=1] Resigning (kafka.controller.KafkaController)
[2022-03-05 04:18:49,006] DEBUG [Controller id=1] Unregister BrokerModifications handler for Set() (kafka.controller.KafkaController)
[2022-03-05 04:18:49,008] INFO [PartitionStateMachine controllerId=1] Stopped partition state machine (kafka.controller.ZkPartitionStateMachine)
[2022-03-05 04:18:48,977] INFO Initiating client connection, connectString=onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@17a1e4ca (org.apache.zookeeper.ZooKeeper)
[2022-03-05 04:18:49,010] INFO [ReplicaStateMachine controllerId=1] Stopped replica state machine (kafka.controller.ZkReplicaStateMachine)
[2022-03-05 04:18:49,013] INFO jute.maxbuffer value is 4194304 Bytes (org.apache.zookeeper.ClientCnxnSocket)
[2022-03-05 04:18:49,021] INFO zookeeper.request.timeout value is 0. feature enabled= (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:18:49,035] INFO [Controller id=1] Resigned (kafka.controller.KafkaController)
[2022-03-05 04:18:49,065] INFO Creating /brokers/ids/1 (is it secure? true) (kafka.zk.KafkaZkClient)
[2022-03-05 04:18:49,079] INFO Client successfully logged in. (org.apache.zookeeper.Login)
[2022-03-05 04:18:49,079] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient)
[2022-03-05 04:18:49,080] INFO Opening socket connection to server onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.233.70.129:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:18:49,082] INFO Socket connection established, initiating session, client: /10.233.71.31:43778, server: onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.233.70.129:2181 (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:18:49,362] INFO Session establishment complete on server onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.233.70.129:2181, sessionid = 0x20000c796960001, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn)
[2022-03-05 04:18:49,387] INFO Updated cache from existing FinalizedFeaturesAndEpoch(features=Features{}, epoch=0) to latest FinalizedFeaturesAndEpoch(features=Features{}, epoch=0). (kafka.server.FinalizedFeatureCache)
[2022-03-05 04:18:49,547] INFO Stat of the created znode at /brokers/ids/1 is: 8589934767,8589934767,1646453929428,1646453929428,1,0,0,144116045300760577,380,0,8589934767
 (kafka.zk.KafkaZkClient)
[2022-03-05 04:18:49,547] INFO Registered broker 1 at path /brokers/ids/1 with addresses: EXTERNAL_SASL_PLAINTEXT://10.253.0.184:30491,INTERNAL_SASL_PLAINTEXT://onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092, czxid (broker epoch): 8589934767 (kafka.zk.KafkaZkClient)
[2022-03-05 04:18:49,605] DEBUG [Controller id=1] Broker 2 has been elected as the controller, so stopping the election process. (kafka.controller.KafkaController)
[2022-03-05 04:18:49,979] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:49,979] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:50,018] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:50,018] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:51,582] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:51,582] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:51,587] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition POLICY-PDP-PAP-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=2, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition POLICY-PDP-PAP-2 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition POLICY-PDP-PAP-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='org.onap.dmaap.mr.PNF_REGISTRATION', partitionIndex=0, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='org.onap.dmaap.mr.PNF_REGISTRATION', partitionIndex=1, controllerEpoch=2, leader=-1, leaderEpoch=1, isr=[1], zkVersion=1, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='SDC-DISTR-STATUS-TOPIC-AUTO', partitionIndex=0, controllerEpoch=2, leader=-1, leaderEpoch=1, isr=[1], zkVersion=1, replicas=[1], offlineReplicas=[]) for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition POLICY-NOTIFICATION-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=2, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition POLICY-NOTIFICATION-2 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition POLICY-NOTIFICATION-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='SDC-DISTR-NOTIF-TOPIC-AUTO', partitionIndex=0, controllerEpoch=2, leader=-1, leaderEpoch=1, isr=[0], zkVersion=1, replicas=[0], offlineReplicas=[]) for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='org.onap.dmaap.mr.PNF_READY', partitionIndex=1, controllerEpoch=2, leader=-1, leaderEpoch=1, isr=[1], zkVersion=1, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='org.onap.dmaap.mr.PNF_READY', partitionIndex=0, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition POLICY-CLRUNTIME-PARTICIPANT-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition POLICY-CLRUNTIME-PARTICIPANT-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=2, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition POLICY-CLRUNTIME-PARTICIPANT-2 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='org.onap.dmaap.mr.mirrormakeragent', partitionIndex=0, controllerEpoch=1, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition org.onap.dmaap.mr.mirrormakeragent-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=13, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-13 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,588] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=46, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-46 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=9, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-9 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=42, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-42 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=21, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-21 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=17, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-17 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=30, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-30 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=26, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-26 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=5, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-5 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=38, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-38 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=34, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-34 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=16, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-16 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=45, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-45 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=12, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-12 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=41, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-41 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=24, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-24 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=20, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-20 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=49, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-49 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=29, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-29 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=25, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-25 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=8, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-8 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,589] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=37, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-37 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=4, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-4 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=33, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-33 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=15, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-15 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=48, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-48 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=11, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-11 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=44, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-44 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=23, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-23 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=19, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-19 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=32, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-32 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=28, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-28 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=7, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-7 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=40, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-40 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=3, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-3 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=36, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-36 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=47, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-47 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=14, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-14 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=43, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-43 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=10, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-10 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=22, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-22 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=18, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-18 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=31, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-31 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,590] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=27, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-27 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,591] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=39, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-39 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,591] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=6, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-6 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,591] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=35, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-35 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,591] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=2, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-2 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,591] INFO [Broker id=1] Add 66 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 0 (state.change.logger)
[2022-03-05 04:18:51,597] INFO [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 for 62 partitions (state.change.logger)
[2022-03-05 04:18:51,597] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,597] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=2, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,597] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,597] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='org.onap.dmaap.mr.PNF_REGISTRATION', partitionIndex=1, controllerEpoch=2, leader=-1, leaderEpoch=1, isr=[1], zkVersion=1, replicas=[1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,597] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='SDC-DISTR-STATUS-TOPIC-AUTO', partitionIndex=0, controllerEpoch=2, leader=-1, leaderEpoch=1, isr=[1], zkVersion=1, replicas=[1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,597] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,597] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=2, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='org.onap.dmaap.mr.PNF_READY', partitionIndex=1, controllerEpoch=2, leader=-1, leaderEpoch=1, isr=[1], zkVersion=1, replicas=[1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=2, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=13, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=46, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=9, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=42, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=21, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=17, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=30, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=26, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=5, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=38, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=34, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=16, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=45, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=12, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=41, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=24, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=20, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=49, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=29, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=25, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=8, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=37, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=4, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=33, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=15, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,598] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=48, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=11, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=44, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=23, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=19, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=32, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=28, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=7, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=40, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=3, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=36, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=47, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=14, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=43, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=10, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=22, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=18, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=31, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=27, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=39, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=6, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,599] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=35, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:51,600] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='__consumer_offsets', partitionIndex=2, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 1 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:52,009] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:52,009] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:52,134] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:52,135] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:52,198] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:52,198] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:52,329] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:52,329] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:54,349] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:54,349] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:54,678] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:54,679] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:55,117] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition __consumer_offsets-3 has an older epoch (0) than the current leader. Will await the new LeaderAndIsr state before resuming fetching. (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,119] WARN [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition __consumer_offsets-3 marked as failed (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,119] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition __consumer_offsets-18 has an older epoch (0) than the current leader. Will await the new LeaderAndIsr state before resuming fetching. (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,120] WARN [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition __consumer_offsets-18 marked as failed (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,121] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition __consumer_offsets-39 has an older epoch (0) than the current leader. Will await the new LeaderAndIsr state before resuming fetching. (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,121] WARN [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition __consumer_offsets-39 marked as failed (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,573] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition POLICY-NOTIFICATION-1 has an older epoch (0) than the current leader. Will await the new LeaderAndIsr state before resuming fetching. (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,573] WARN [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition POLICY-NOTIFICATION-1 marked as failed (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-3 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-18 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-41 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-10 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-33 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-48 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-19 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-34 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 with leader -1 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-4 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition POLICY-NOTIFICATION-2 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition org.onap.dmaap.mr.PNF_READY-1 with leader -1 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-11 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-26 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-49 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 with leader -1 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-39 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition POLICY-NOTIFICATION-1 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-9 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-24 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,621] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition POLICY-CLRUNTIME-PARTICIPANT-1 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-31 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-46 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-1 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-16 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-2 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-25 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-40 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition POLICY-NOTIFICATION-0 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition POLICY-CLRUNTIME-PARTICIPANT-2 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-47 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-17 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-32 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-37 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-7 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-22 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-29 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-44 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,622] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-14 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition POLICY-PDP-PAP-0 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-23 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-38 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-8 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-45 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition POLICY-CLRUNTIME-PARTICIPANT-0 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-15 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-30 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-0 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition POLICY-PDP-PAP-1 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-35 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-5 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-20 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-27 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-42 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-12 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-21 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-36 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-6 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,623] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition POLICY-PDP-PAP-2 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,624] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-43 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,624] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-13 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,624] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 1 from controller 2 epoch 2 starting the become-follower transition for partition __consumer_offsets-28 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,625] INFO [Broker id=1] Follower __consumer_offsets-3 starts at leader epoch 2 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,626] INFO [Broker id=1] Follower __consumer_offsets-18 starts at leader epoch 2 from offset 213 with high watermark 213. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,626] INFO [Broker id=1] Follower __consumer_offsets-41 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,626] INFO [Broker id=1] Follower __consumer_offsets-10 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,626] INFO [Broker id=1] Follower __consumer_offsets-33 starts at leader epoch 2 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,626] INFO [Broker id=1] Follower __consumer_offsets-48 starts at leader epoch 2 from offset 1177 with high watermark 1177. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,626] INFO [Broker id=1] Follower __consumer_offsets-19 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,626] INFO [Broker id=1] Follower __consumer_offsets-34 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,629] ERROR [Broker id=1] Received LeaderAndIsrRequest with correlation id 1 from controller 2 epoch 2 for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 (last update controller epoch 2) but cannot become follower since the new leader -1 is unavailable. (state.change.logger)
[2022-03-05 04:18:55,629] INFO [Broker id=1] Follower __consumer_offsets-4 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,629] INFO [Broker id=1] Follower POLICY-NOTIFICATION-2 starts at leader epoch 3 from offset 1 with high watermark 1. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,629] ERROR [Broker id=1] Received LeaderAndIsrRequest with correlation id 1 from controller 2 epoch 2 for partition org.onap.dmaap.mr.PNF_READY-1 (last update controller epoch 2) but cannot become follower since the new leader -1 is unavailable. (state.change.logger)
[2022-03-05 04:18:55,630] INFO [Broker id=1] Follower __consumer_offsets-11 starts at leader epoch 3 from offset 892 with high watermark 892. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,631] INFO [Broker id=1] Follower __consumer_offsets-26 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,631] INFO [Broker id=1] Follower __consumer_offsets-49 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,631] ERROR [Broker id=1] Received LeaderAndIsrRequest with correlation id 1 from controller 2 epoch 2 for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (last update controller epoch 2) but cannot become follower since the new leader -1 is unavailable. (state.change.logger)
[2022-03-05 04:18:55,631] INFO [Broker id=1] Follower __consumer_offsets-39 starts at leader epoch 2 from offset 1183 with high watermark 1183. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,631] INFO [Broker id=1] Follower POLICY-NOTIFICATION-1 starts at leader epoch 2 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,631] INFO [Broker id=1] Follower __consumer_offsets-9 starts at leader epoch 2 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,631] INFO [Broker id=1] Follower __consumer_offsets-24 starts at leader epoch 2 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,631] INFO [Broker id=1] Follower POLICY-CLRUNTIME-PARTICIPANT-1 starts at leader epoch 3 from offset 67 with high watermark 66. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,650] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition __consumer_offsets-9 has an older epoch (0) than the current leader. Will await the new LeaderAndIsr state before resuming fetching. (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,651] WARN [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition __consumer_offsets-9 marked as failed (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,651] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition __consumer_offsets-42 has an older epoch (0) than the current leader. Will await the new LeaderAndIsr state before resuming fetching. (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,651] WARN [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition __consumer_offsets-42 marked as failed (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,651] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition __consumer_offsets-24 has an older epoch (0) than the current leader. Will await the new LeaderAndIsr state before resuming fetching. (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,651] WARN [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition __consumer_offsets-24 marked as failed (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,651] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition __consumer_offsets-27 has an older epoch (0) than the current leader. Will await the new LeaderAndIsr state before resuming fetching. (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,652] WARN [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition __consumer_offsets-27 marked as failed (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,654] INFO [Broker id=1] Follower __consumer_offsets-31 starts at leader epoch 3 from offset 1741 with high watermark 1735. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,654] INFO [Broker id=1] Follower __consumer_offsets-46 starts at leader epoch 3 from offset 163 with high watermark 162. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,654] INFO [Broker id=1] Follower __consumer_offsets-1 starts at leader epoch 3 from offset 107 with high watermark 107. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,654] INFO [Broker id=1] Follower __consumer_offsets-16 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,655] INFO [Broker id=1] Follower __consumer_offsets-2 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,655] INFO [Broker id=1] Follower __consumer_offsets-25 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,655] INFO [Broker id=1] Follower __consumer_offsets-40 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,655] INFO [Broker id=1] Follower POLICY-NOTIFICATION-0 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,656] INFO [Broker id=1] Follower POLICY-CLRUNTIME-PARTICIPANT-2 starts at leader epoch 3 from offset 134 with high watermark 134. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,656] INFO [Broker id=1] Follower __consumer_offsets-47 starts at leader epoch 3 from offset 162 with high watermark 162. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,656] INFO [Broker id=1] Follower __consumer_offsets-17 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,657] INFO [Broker id=1] Follower __consumer_offsets-32 starts at leader epoch 3 from offset 57 with high watermark 57. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,657] INFO [Broker id=1] Follower __consumer_offsets-37 starts at leader epoch 3 from offset 892 with high watermark 889. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,659] INFO [Broker id=1] Follower __consumer_offsets-7 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,659] INFO [Broker id=1] Follower __consumer_offsets-22 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,659] INFO [Broker id=1] Follower __consumer_offsets-29 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,660] INFO [Broker id=1] Follower __consumer_offsets-44 starts at leader epoch 3 from offset 45 with high watermark 45. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,660] INFO [Broker id=1] Follower __consumer_offsets-14 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,660] INFO [Broker id=1] Follower POLICY-PDP-PAP-0 starts at leader epoch 2 from offset 41 with high watermark 41. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,662] INFO [Broker id=1] Follower __consumer_offsets-23 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,662] INFO [Broker id=1] Follower __consumer_offsets-38 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,663] INFO [Broker id=1] Follower __consumer_offsets-8 starts at leader epoch 3 from offset 1144 with high watermark 1144. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,663] INFO [Broker id=1] Follower __consumer_offsets-45 starts at leader epoch 2 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,664] INFO [Broker id=1] Follower POLICY-CLRUNTIME-PARTICIPANT-0 starts at leader epoch 2 from offset 32 with high watermark 32. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,665] INFO [Broker id=1] Follower __consumer_offsets-15 starts at leader epoch 2 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,665] INFO [Broker id=1] Follower __consumer_offsets-30 starts at leader epoch 2 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,665] INFO [Broker id=1] Follower __consumer_offsets-0 starts at leader epoch 2 from offset 46 with high watermark 46. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,665] INFO [Broker id=1] Follower POLICY-PDP-PAP-1 starts at leader epoch 3 from offset 202 with high watermark 201. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,665] INFO [Broker id=1] Follower __consumer_offsets-35 starts at leader epoch 3 from offset 904 with high watermark 904. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,666] INFO [Broker id=1] Follower __consumer_offsets-5 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,666] INFO [Broker id=1] Follower __consumer_offsets-20 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,666] INFO [Broker id=1] Follower __consumer_offsets-27 starts at leader epoch 2 from offset 110 with high watermark 110. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,666] INFO [Broker id=1] Follower __consumer_offsets-42 starts at leader epoch 2 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,667] INFO [Broker id=1] Follower __consumer_offsets-12 starts at leader epoch 2 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,667] INFO [Broker id=1] Follower __consumer_offsets-21 starts at leader epoch 2 from offset 1141 with high watermark 1141. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,667] INFO [Broker id=1] Follower __consumer_offsets-36 starts at leader epoch 2 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,667] INFO [Broker id=1] Follower __consumer_offsets-6 starts at leader epoch 2 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,667] INFO [Broker id=1] Follower POLICY-PDP-PAP-2 starts at leader epoch 3 from offset 38 with high watermark 38. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,668] INFO [Broker id=1] Follower __consumer_offsets-43 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,668] INFO [Broker id=1] Follower __consumer_offsets-13 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,668] INFO [Broker id=1] Follower __consumer_offsets-28 starts at leader epoch 3 from offset 0 with high watermark 0. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,685] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions HashSet(__consumer_offsets-13, __consumer_offsets-46, __consumer_offsets-9, __consumer_offsets-42, __consumer_offsets-21, __consumer_offsets-17, POLICY-PDP-PAP-1, __consumer_offsets-30, POLICY-NOTIFICATION-0, __consumer_offsets-26, POLICY-CLRUNTIME-PARTICIPANT-1, __consumer_offsets-5, __consumer_offsets-38, __consumer_offsets-1, __consumer_offsets-34, __consumer_offsets-16, __consumer_offsets-45, __consumer_offsets-12, __consumer_offsets-41, __consumer_offsets-24, __consumer_offsets-20, __consumer_offsets-49, POLICY-PDP-PAP-2, __consumer_offsets-0, __consumer_offsets-29, __consumer_offsets-25, __consumer_offsets-8, POLICY-CLRUNTIME-PARTICIPANT-0, __consumer_offsets-37, __consumer_offsets-4, __consumer_offsets-33, __consumer_offsets-15, __consumer_offsets-48, __consumer_offsets-11, __consumer_offsets-44, __consumer_offsets-23, __consumer_offsets-19, __consumer_offsets-32, __consumer_offsets-28, POLICY-NOTIFICATION-2, __consumer_offsets-7, __consumer_offsets-40, __consumer_offsets-3, __consumer_offsets-36, __consumer_offsets-47, __consumer_offsets-14, __consumer_offsets-43, __consumer_offsets-10, __consumer_offsets-22, __consumer_offsets-18, __consumer_offsets-31, __consumer_offsets-27, POLICY-NOTIFICATION-1, POLICY-PDP-PAP-0, __consumer_offsets-39, __consumer_offsets-6, POLICY-CLRUNTIME-PARTICIPANT-2, __consumer_offsets-35, __consumer_offsets-2) (kafka.server.ReplicaFetcherManager)
[2022-03-05 04:18:55,689] INFO [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 2 with correlation id 1 for 59 partitions (state.change.logger)
[2022-03-05 04:18:55,741] INFO [ReplicaFetcherManager on broker 1] Added fetcher to broker 2 for partitions HashMap(__consumer_offsets-22 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-30 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,0), POLICY-NOTIFICATION-1 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,0), POLICY-NOTIFICATION-0 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), POLICY-PDP-PAP-1 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,202), __consumer_offsets-38 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), POLICY-PDP-PAP-2 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,38), __consumer_offsets-8 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,1144), __consumer_offsets-21 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,1141), __consumer_offsets-4 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-27 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,110), __consumer_offsets-7 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-9 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,0), __consumer_offsets-46 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,163), POLICY-CLRUNTIME-PARTICIPANT-1 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,67), __consumer_offsets-25 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-35 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,904), POLICY-CLRUNTIME-PARTICIPANT-2 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,134), __consumer_offsets-41 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-33 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,0), __consumer_offsets-23 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-49 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-47 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,162), __consumer_offsets-16 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-28 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-31 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,1741), __consumer_offsets-36 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,0), __consumer_offsets-42 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,0), __consumer_offsets-3 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,0), __consumer_offsets-18 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,213), __consumer_offsets-37 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,892), POLICY-NOTIFICATION-2 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,1), __consumer_offsets-15 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,0), __consumer_offsets-24 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,0), __consumer_offsets-17 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-48 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,1177), __consumer_offsets-19 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-11 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,892), POLICY-PDP-PAP-0 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,41), __consumer_offsets-13 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-2 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-43 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-6 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,0), __consumer_offsets-14 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-20 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-0 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,46), __consumer_offsets-44 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,45), __consumer_offsets-39 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,1183), __consumer_offsets-12 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,0), POLICY-CLRUNTIME-PARTICIPANT-0 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,32), __consumer_offsets-45 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),2,0), __consumer_offsets-1 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,107), __consumer_offsets-5 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-26 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-29 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-34 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-10 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0), __consumer_offsets-32 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,57), __consumer_offsets-40 -> InitialFetchState(BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092),3,0)) (kafka.server.ReplicaFetcherManager)
[2022-03-05 04:18:55,746] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-3 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,746] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-18 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,746] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-41 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,746] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-10 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,746] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-33 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,746] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-48 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-19 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-34 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 with leader -1 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-4 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition POLICY-NOTIFICATION-2 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition org.onap.dmaap.mr.PNF_READY-1 with leader -1 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-11 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-26 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-49 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 with leader -1 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-39 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition POLICY-NOTIFICATION-1 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-9 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-24 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition POLICY-CLRUNTIME-PARTICIPANT-1 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-31 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-46 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-1 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-16 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-2 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-25 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-40 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition POLICY-NOTIFICATION-0 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition POLICY-CLRUNTIME-PARTICIPANT-2 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-47 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-17 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-32 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-37 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-7 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-22 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-29 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-44 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-14 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition POLICY-PDP-PAP-0 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,747] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-23 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-38 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-8 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-45 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition POLICY-CLRUNTIME-PARTICIPANT-0 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-15 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-30 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-0 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition POLICY-PDP-PAP-1 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-35 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-5 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-20 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-27 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-42 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-12 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-21 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-36 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-6 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition POLICY-PDP-PAP-2 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-43 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-13 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,748] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 1 from controller 2 epoch 2 for the become-follower transition for partition __consumer_offsets-28 with leader 2 (state.change.logger)
[2022-03-05 04:18:55,751] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Shutting down (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,753] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Shutdown completed (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,754] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Stopped (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,774] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Partition __consumer_offsets-48 has an new epoch (2) than the current leader. retry the partition later (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,777] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 3 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,777] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-3 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,794] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 18 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,794] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-18 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,794] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-15 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,794] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 41 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,795] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-41 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,795] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 10 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,795] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-10 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,795] INFO [Log partition=__consumer_offsets-15, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,795] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-3. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,795] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 33 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,812] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-33 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,812] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-18. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,812] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-41. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,812] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-10. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,812] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-33. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,796] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-45 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,812] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 48 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,814] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-48 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,814] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 19 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,814] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-19 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,814] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-48. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,814] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 34 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,814] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-34 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,814] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-19. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,814] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 4 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,814] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-4 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,814] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-34. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,814] INFO [Log partition=__consumer_offsets-45, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,814] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-4. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,815] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-14 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,814] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 11 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,815] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-11 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,815] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 26 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,815] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,815] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-26 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,815] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-11. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,815] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 49 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,815] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-49 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,815] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 39 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,815] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-39 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,815] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-26. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,815] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 9 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,815] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-9 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,815] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-49. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,815] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 24 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,815] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-24 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,815] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 31 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,815] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-31 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,815] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 46 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,816] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-46 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,815] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-12 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,816] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 1 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,815] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-39. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,816] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-1 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,816] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 16 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,816] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-16 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,816] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 2 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,816] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-2 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,816] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 25 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,816] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-25 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,816] INFO [Log partition=__consumer_offsets-12, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,816] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 40 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,816] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-40 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,816] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-9. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,817] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-41 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,817] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-24. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,817] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,817] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-9 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,817] INFO [Log partition=__consumer_offsets-9, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,817] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-42 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,817] INFO [Log partition=__consumer_offsets-42, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,817] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-23 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,817] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,817] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-24 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,817] INFO [Log partition=__consumer_offsets-24, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,817] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-20 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,817] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,817] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-17 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,818] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,818] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-29 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,818] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,818] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-30 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,818] INFO [Log partition=__consumer_offsets-30, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,818] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition POLICY-NOTIFICATION-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,818] INFO [Log partition=POLICY-NOTIFICATION-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,818] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition POLICY-NOTIFICATION-1 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,818] INFO [Log partition=POLICY-NOTIFICATION-1, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,818] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-26 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,818] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,818] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-5 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,818] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,818] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-38 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,818] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,818] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-6 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,818] INFO [Log partition=__consumer_offsets-6, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,819] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-3 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,819] INFO [Log partition=__consumer_offsets-3, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,819] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-36 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,819] INFO [Log partition=__consumer_offsets-36, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,819] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-33 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,819] INFO [Log partition=__consumer_offsets-33, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,819] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2022-03-05 04:18:55,819] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2022-03-05 04:18:55,817] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 47 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,821] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-47 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,821] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 17 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,821] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-17 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,821] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 32 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,821] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-32 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,821] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 37 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,821] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-37 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,821] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 7 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,822] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-7 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,822] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 22 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,822] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-22 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,822] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 29 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,822] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-29 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,822] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 44 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,822] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-44 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,822] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 14 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,822] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-14 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,822] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 23 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,822] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-23 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,822] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 38 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,822] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-38 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,822] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 8 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,822] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-8 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,822] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 45 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,822] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-45 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,822] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 15 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,822] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-15 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,822] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 30 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,822] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-30 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,822] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 0 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,822] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-0 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,822] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 35 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,822] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-35 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,822] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 5 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,822] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-5 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,822] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 20 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,822] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-20 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,822] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 27 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,822] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-27 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,823] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 42 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,823] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-42 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,823] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 12 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,823] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-12 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,823] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 21 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,823] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-21 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,823] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 36 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,823] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-36 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,823] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 6 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,823] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-6 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,823] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 43 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,823] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-43 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,823] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 13 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,823] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-13 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,823] INFO [GroupCoordinator 1]: Resigned as the group coordinator for partition 28 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,823] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-28 (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,824] INFO [Broker id=1] Finished LeaderAndIsr request in 4226ms correlationId 1 from controller 2 for 62 partitions (state.change.logger)
[2022-03-05 04:18:55,831] INFO [GroupCoordinator 1]: Unloading group metadata for aa5ba94d-0b7f-4e82-9fb9-4fa1b56bb1f7--POLICY-PDP-PAP with generation 3 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition POLICY-PDP-PAP-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=2, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition POLICY-PDP-PAP-2 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition POLICY-PDP-PAP-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='org.onap.dmaap.mr.PNF_REGISTRATION', partitionIndex=1, controllerEpoch=2, leader=-1, leaderEpoch=1, isr=[1], zkVersion=1, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='SDC-DISTR-STATUS-TOPIC-AUTO', partitionIndex=0, controllerEpoch=2, leader=-1, leaderEpoch=1, isr=[1], zkVersion=1, replicas=[1], offlineReplicas=[]) for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition POLICY-NOTIFICATION-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=2, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition POLICY-NOTIFICATION-2 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition POLICY-NOTIFICATION-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='SDC-DISTR-NOTIF-TOPIC-AUTO', partitionIndex=0, controllerEpoch=2, leader=-1, leaderEpoch=1, isr=[0], zkVersion=1, replicas=[0], offlineReplicas=[]) for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='org.onap.dmaap.mr.PNF_READY', partitionIndex=1, controllerEpoch=2, leader=-1, leaderEpoch=1, isr=[1], zkVersion=1, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition POLICY-CLRUNTIME-PARTICIPANT-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition POLICY-CLRUNTIME-PARTICIPANT-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=2, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition POLICY-CLRUNTIME-PARTICIPANT-2 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=13, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-13 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=46, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-46 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=9, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-9 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=42, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-42 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=21, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-21 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=17, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-17 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,831] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=30, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-30 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,832] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=26, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-26 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,832] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=5, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-5 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,832] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=38, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-38 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,832] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,832] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=34, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-34 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,832] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=16, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-16 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,832] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=45, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-45 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,832] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=12, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-12 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,832] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=41, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-41 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,832] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=24, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-24 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,832] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=20, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-20 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,832] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=49, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-49 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,832] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,832] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=29, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-29 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,833] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=25, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-25 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,833] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=8, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-8 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,833] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=37, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-37 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,833] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=4, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-4 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,833] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=33, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-33 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,833] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=15, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-15 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,833] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=48, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-48 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,833] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=11, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-11 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,833] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=44, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-44 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,833] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=23, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-23 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,834] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=19, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-19 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,834] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=32, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-32 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,834] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=28, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-28 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,834] INFO [GroupCoordinator 1]: Unloading group metadata for edd01df5-4f14-4c88-936e-a0411c65a060--POLICY-PDP-PAP with generation 1 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,839] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=7, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-7 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,839] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=40, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-40 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,839] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=3, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-3 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,839] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=36, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-36 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,839] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=47, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-47 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,839] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=14, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-14 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,840] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=43, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-43 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,840] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=10, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-10 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,840] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=22, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-22 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,840] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=18, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-18 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,840] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=31, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-31 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,840] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=27, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-27 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,840] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=39, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-39 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,840] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=6, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-6 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,840] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=35, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-35 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,840] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=2, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2], zkVersion=3, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-2 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,840] INFO [Broker id=1] Add 63 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 2 (state.change.logger)
[2022-03-05 04:18:55,841] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-31. Removed 6 cached offsets and 2 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,842] INFO [GroupCoordinator 1]: Unloading group metadata for policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 1 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,842] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-46. Removed 1 cached offsets and 1 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,842] INFO [GroupCoordinator 1]: Unloading group metadata for clamp--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 1 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,842] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-1. Removed 1 cached offsets and 1 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,842] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-16. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,842] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-2. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,843] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-25. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,843] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-40. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,843] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-47. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,843] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-17. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,843] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-32. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,843] INFO [GroupCoordinator 1]: Unloading group metadata for policy-pap--POLICY-PDP-PAP with generation 1 (kafka.coordinator.group.GroupCoordinator)
[2022-03-05 04:18:55,843] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-37. Removed 3 cached offsets and 1 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,844] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-7. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,844] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-22. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,844] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-29. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,845] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-44. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,845] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-14. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,845] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-23. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,847] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-38. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,847] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-8. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,847] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-45. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,847] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-15. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,847] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-30. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,847] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-0. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,847] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-35. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,847] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-5. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,847] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-20. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,847] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-27. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,848] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-42. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,848] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-12. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,851] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-21. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,859] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-36. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,859] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-6. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,859] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-43. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,859] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-13. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,860] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-28. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2022-03-05 04:18:55,863] INFO [Broker id=1] Handling LeaderAndIsr request correlationId 3 from controller 2 for 3 partitions (state.change.logger)
[2022-03-05 04:18:55,863] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='org.onap.dmaap.mr.PNF_REGISTRATION', partitionIndex=1, controllerEpoch=2, leader=1, leaderEpoch=2, isr=[1], zkVersion=2, replicas=[1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 3 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:55,863] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='SDC-DISTR-STATUS-TOPIC-AUTO', partitionIndex=0, controllerEpoch=2, leader=1, leaderEpoch=2, isr=[1], zkVersion=2, replicas=[1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 3 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:55,864] TRACE [Broker id=1] Received LeaderAndIsr request LeaderAndIsrPartitionState(topicName='org.onap.dmaap.mr.PNF_READY', partitionIndex=1, controllerEpoch=2, leader=1, leaderEpoch=2, isr=[1], zkVersion=2, replicas=[1], addingReplicas=[], removingReplicas=[], isNew=false) correlation id 3 from controller 2 epoch 2 (state.change.logger)
[2022-03-05 04:18:55,865] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 3 from controller 2 epoch 2 starting the become-leader transition for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 (state.change.logger)
[2022-03-05 04:18:55,865] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 3 from controller 2 epoch 2 starting the become-leader transition for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (state.change.logger)
[2022-03-05 04:18:55,865] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 3 from controller 2 epoch 2 starting the become-leader transition for partition org.onap.dmaap.mr.PNF_READY-1 (state.change.logger)
[2022-03-05 04:18:55,865] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(org.onap.dmaap.mr.PNF_REGISTRATION-1, SDC-DISTR-STATUS-TOPIC-AUTO-0, org.onap.dmaap.mr.PNF_READY-1) (kafka.server.ReplicaFetcherManager)
[2022-03-05 04:18:55,865] INFO [Broker id=1] Stopped fetchers as part of LeaderAndIsr request correlationId 3 from controller 2 epoch 2 as part of the become-leader transition for 3 partitions (state.change.logger)
[2022-03-05 04:18:55,865] INFO [Broker id=1] Leader org.onap.dmaap.mr.PNF_REGISTRATION-1 starts at leader epoch 2 from offset 0 with high watermark 0 ISR [1] addingReplicas [] removingReplicas []. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:55,897] INFO [Broker id=1] Skipped the become-leader state change after marking its partition as leader with correlation id 3 from controller 2 epoch 2 for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 (last update controller epoch 2) since it is already the leader for the partition. (state.change.logger)
[2022-03-05 04:18:55,897] INFO [Broker id=1] Leader SDC-DISTR-STATUS-TOPIC-AUTO-0 starts at leader epoch 2 from offset 0 with high watermark 0 ISR [1] addingReplicas [] removingReplicas []. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:56,038] INFO [Broker id=1] Skipped the become-leader state change after marking its partition as leader with correlation id 3 from controller 2 epoch 2 for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (last update controller epoch 2) since it is already the leader for the partition. (state.change.logger)
[2022-03-05 04:18:56,038] INFO [Broker id=1] Leader org.onap.dmaap.mr.PNF_READY-1 starts at leader epoch 2 from offset 0 with high watermark 0 ISR [1] addingReplicas [] removingReplicas []. Previous leader epoch was 0. (state.change.logger)
[2022-03-05 04:18:56,052] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:56,055] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:56,111] INFO [Broker id=1] Skipped the become-leader state change after marking its partition as leader with correlation id 3 from controller 2 epoch 2 for partition org.onap.dmaap.mr.PNF_READY-1 (last update controller epoch 2) since it is already the leader for the partition. (state.change.logger)
[2022-03-05 04:18:56,111] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 3 from controller 2 epoch 2 for the become-leader transition for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 (state.change.logger)
[2022-03-05 04:18:56,111] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 3 from controller 2 epoch 2 for the become-leader transition for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (state.change.logger)
[2022-03-05 04:18:56,112] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 3 from controller 2 epoch 2 for the become-leader transition for partition org.onap.dmaap.mr.PNF_READY-1 (state.change.logger)
[2022-03-05 04:18:56,112] INFO [Broker id=1] Finished LeaderAndIsr request in 252ms correlationId 3 from controller 2 for 3 partitions (state.change.logger)
[2022-03-05 04:18:56,169] INFO ^Event received  with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:56,169] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2022-03-05 04:18:56,174] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='org.onap.dmaap.mr.PNF_REGISTRATION', partitionIndex=1, controllerEpoch=2, leader=1, leaderEpoch=2, isr=[1], zkVersion=2, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 4 (state.change.logger)
[2022-03-05 04:18:56,174] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='SDC-DISTR-STATUS-TOPIC-AUTO', partitionIndex=0, controllerEpoch=2, leader=1, leaderEpoch=2, isr=[1], zkVersion=2, replicas=[1], offlineReplicas=[]) for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 4 (state.change.logger)
[2022-03-05 04:18:56,174] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='SDC-DISTR-NOTIF-TOPIC-AUTO', partitionIndex=0, controllerEpoch=2, leader=0, leaderEpoch=2, isr=[0], zkVersion=2, replicas=[0], offlineReplicas=[]) for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 4 (state.change.logger)
[2022-03-05 04:18:56,174] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='org.onap.dmaap.mr.PNF_READY', partitionIndex=1, controllerEpoch=2, leader=1, leaderEpoch=2, isr=[1], zkVersion=2, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 4 (state.change.logger)
[2022-03-05 04:18:56,174] INFO [Broker id=1] Add 4 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 4 (state.change.logger)
[2022-03-05 04:18:56,238] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=21, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-21 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 5 (state.change.logger)
[2022-03-05 04:18:56,238] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 5 (state.change.logger)
[2022-03-05 04:18:56,238] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=48, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-48 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 5 (state.change.logger)
[2022-03-05 04:18:56,238] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=18, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-18 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 5 (state.change.logger)
[2022-03-05 04:18:56,238] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=27, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-27 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 5 (state.change.logger)
[2022-03-05 04:18:56,238] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=39, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2], zkVersion=2, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-39 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 5 (state.change.logger)
[2022-03-05 04:18:56,238] INFO [Broker id=1] Add 6 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 5 (state.change.logger)
[2022-03-05 04:18:56,265] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0], zkVersion=3, replicas=[2, 0, 1], offlineReplicas=[]) for partition POLICY-NOTIFICATION-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 6 (state.change.logger)
[2022-03-05 04:18:56,265] INFO [Broker id=1] Add 1 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 6 (state.change.logger)
[2022-03-05 04:18:56,304] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=9, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 1], zkVersion=3, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-9 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 7 (state.change.logger)
[2022-03-05 04:18:56,304] INFO [Broker id=1] Add 1 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 7 (state.change.logger)
[2022-03-05 04:18:56,344] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=12, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0], zkVersion=3, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-12 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 8 (state.change.logger)
[2022-03-05 04:18:56,344] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=24, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 1], zkVersion=3, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-24 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 8 (state.change.logger)
[2022-03-05 04:18:56,345] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=27, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 1], zkVersion=3, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-27 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 8 (state.change.logger)
[2022-03-05 04:18:56,345] INFO [Broker id=1] Add 3 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 8 (state.change.logger)
[2022-03-05 04:18:56,406] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0], zkVersion=3, replicas=[2, 1, 0], offlineReplicas=[]) for partition POLICY-PDP-PAP-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 9 (state.change.logger)
[2022-03-05 04:18:56,406] INFO [Broker id=1] Add 1 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 9 (state.change.logger)
[2022-03-05 04:18:56,478] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0, 1], zkVersion=4, replicas=[2, 0, 1], offlineReplicas=[]) for partition POLICY-NOTIFICATION-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 10 (state.change.logger)
[2022-03-05 04:18:56,478] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=9, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 1, 0], zkVersion=4, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-9 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 10 (state.change.logger)
[2022-03-05 04:18:56,478] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=42, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0], zkVersion=3, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-42 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 10 (state.change.logger)
[2022-03-05 04:18:56,478] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=12, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0, 1], zkVersion=4, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-12 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 10 (state.change.logger)
[2022-03-05 04:18:56,478] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=24, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 1, 0], zkVersion=4, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-24 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 10 (state.change.logger)
[2022-03-05 04:18:56,479] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=33, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0], zkVersion=3, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-33 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 10 (state.change.logger)
[2022-03-05 04:18:56,479] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=3, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0], zkVersion=3, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-3 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 10 (state.change.logger)
[2022-03-05 04:18:56,479] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=18, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0], zkVersion=3, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-18 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 10 (state.change.logger)
[2022-03-05 04:18:56,479] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=27, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 1, 0], zkVersion=4, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-27 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 10 (state.change.logger)
[2022-03-05 04:18:56,479] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=39, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0], zkVersion=3, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-39 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 10 (state.change.logger)
[2022-03-05 04:18:56,479] INFO [Broker id=1] Add 10 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 10 (state.change.logger)
[2022-03-05 04:18:57,433] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0, 1], zkVersion=4, replicas=[2, 1, 0], offlineReplicas=[]) for partition POLICY-PDP-PAP-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 11 (state.change.logger)
[2022-03-05 04:18:57,433] INFO [Broker id=1] Add 1 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 11 (state.change.logger)
[2022-03-05 04:18:58,017] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 1], zkVersion=3, replicas=[2, 0, 1], offlineReplicas=[]) for partition POLICY-CLRUNTIME-PARTICIPANT-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,017] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=42, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0, 1], zkVersion=4, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-42 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,017] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=21, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0], zkVersion=3, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-21 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,017] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=30, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0], zkVersion=3, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-30 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,017] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=45, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 1], zkVersion=3, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-45 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,017] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=41, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1], zkVersion=4, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-41 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,017] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 1], zkVersion=3, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,017] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=29, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1], zkVersion=4, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-29 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,018] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=37, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1], zkVersion=4, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-37 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,018] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=33, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0, 1], zkVersion=4, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-33 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,018] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=15, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0], zkVersion=3, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-15 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,018] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=48, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0], zkVersion=3, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-48 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,018] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=7, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1], zkVersion=4, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-7 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,018] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=3, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0, 1], zkVersion=4, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-3 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,018] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=36, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 1], zkVersion=3, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-36 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,018] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=10, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1], zkVersion=4, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-10 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,018] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=22, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1], zkVersion=4, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-22 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,018] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=18, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0, 1], zkVersion=4, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-18 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,018] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=39, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0, 1], zkVersion=4, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-39 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,019] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=6, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 1], zkVersion=3, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-6 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,019] INFO [Broker id=1] Add 20 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 12 (state.change.logger)
[2022-03-05 04:18:58,274] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=48, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0, 1], zkVersion=4, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-48 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 13 (state.change.logger)
[2022-03-05 04:18:58,274] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=7, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1, 0], zkVersion=5, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-7 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 13 (state.change.logger)
[2022-03-05 04:18:58,274] INFO [Broker id=1] Add 2 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 13 (state.change.logger)
[2022-03-05 04:18:59,537] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1], zkVersion=4, replicas=[1, 0, 2], offlineReplicas=[]) for partition POLICY-PDP-PAP-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,537] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-PDP-PAP', partitionIndex=2, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1], zkVersion=4, replicas=[0, 2, 1], offlineReplicas=[]) for partition POLICY-PDP-PAP-2 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,537] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 0], zkVersion=4, replicas=[0, 1, 2], offlineReplicas=[]) for partition POLICY-NOTIFICATION-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,537] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-NOTIFICATION', partitionIndex=2, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 0], zkVersion=4, replicas=[1, 2, 0], offlineReplicas=[]) for partition POLICY-NOTIFICATION-2 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,537] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1], zkVersion=4, replicas=[1, 2, 0], offlineReplicas=[]) for partition POLICY-CLRUNTIME-PARTICIPANT-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,538] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 1, 0], zkVersion=4, replicas=[2, 0, 1], offlineReplicas=[]) for partition POLICY-CLRUNTIME-PARTICIPANT-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,538] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='POLICY-CLRUNTIME-PARTICIPANT', partitionIndex=2, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1], zkVersion=4, replicas=[0, 1, 2], offlineReplicas=[]) for partition POLICY-CLRUNTIME-PARTICIPANT-2 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,538] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=46, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1], zkVersion=4, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-46 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,538] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=21, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0, 1], zkVersion=4, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-21 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,538] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=30, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 0, 1], zkVersion=4, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-30 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,538] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=26, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 0], zkVersion=4, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-26 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,538] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=5, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 0], zkVersion=4, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-5 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,538] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=38, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 0], zkVersion=4, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-38 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,539] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=1, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 0], zkVersion=4, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-1 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,539] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=34, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 0], zkVersion=4, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-34 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,539] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=16, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 0], zkVersion=4, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-16 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,539] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=45, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 1, 0], zkVersion=4, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-45 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,539] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=41, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1, 0], zkVersion=5, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-41 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,539] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=20, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 0], zkVersion=4, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-20 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,539] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=49, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1], zkVersion=4, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-49 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,539] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=0, controllerEpoch=2, leader=2, leaderEpoch=2, isr=[2, 1, 0], zkVersion=4, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-0 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,540] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=29, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1, 0], zkVersion=5, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-29 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,540] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=25, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1], zkVersion=4, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-25 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,540] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=8, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1], zkVersion=4, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-8 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,540] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=37, controllerEpoch=2, leader=2, leaderEpoch=3, isr=[2, 1, 0], zkVersion=5, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-37 in response to UpdateMetadata request sent by controller 2 epoch 2 with correlation id 14 (state.change.logger)
[2022-03-05 04:18:59,540] TRACE [Broker id=1] Cached leader info UpdateMetadataPartitionState(topicName='__consumer_offsets', partitionIndex=4, controllerEpoch