Results

By type

          + export KAFKA_BROKER_ID=2
+ cp /opt/app/osaaf/local/cadi.properties /etc/kafka/data/cadi.properties
+ export KAFKA_ADVERTISED_LISTENERS=EXTERNAL_SASL_PLAINTEXT://10.253.0.253:30492,INTERNAL_SASL_PLAINTEXT://:9092
+ exec /etc/confluent/docker/run
===> ENV Variables ...
A1POLICYMANAGEMENT_EXTERNAL_PORT=tcp://10.233.17.1:8433
A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP=tcp://10.233.17.1:8433
A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_ADDR=10.233.17.1
A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_PORT=8433
A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_PROTO=tcp
A1POLICYMANAGEMENT_EXTERNAL_SERVICE_HOST=10.233.17.1
A1POLICYMANAGEMENT_EXTERNAL_SERVICE_PORT=8433
A1POLICYMANAGEMENT_EXTERNAL_SERVICE_PORT_HTTPS_API=8433
A1POLICYMANAGEMENT_PORT=tcp://10.233.43.179:8433
A1POLICYMANAGEMENT_PORT_8081_TCP=tcp://10.233.43.179:8081
A1POLICYMANAGEMENT_PORT_8081_TCP_ADDR=10.233.43.179
A1POLICYMANAGEMENT_PORT_8081_TCP_PORT=8081
A1POLICYMANAGEMENT_PORT_8081_TCP_PROTO=tcp
A1POLICYMANAGEMENT_PORT_8433_TCP=tcp://10.233.43.179:8433
A1POLICYMANAGEMENT_PORT_8433_TCP_ADDR=10.233.43.179
A1POLICYMANAGEMENT_PORT_8433_TCP_PORT=8433
A1POLICYMANAGEMENT_PORT_8433_TCP_PROTO=tcp
A1POLICYMANAGEMENT_SERVICE_HOST=10.233.43.179
A1POLICYMANAGEMENT_SERVICE_PORT=8433
A1POLICYMANAGEMENT_SERVICE_PORT_HTTPS_API=8433
A1POLICYMANAGEMENT_SERVICE_PORT_HTTP_API=8081
AAF_CASS_PORT=tcp://10.233.0.33:7000
AAF_CASS_PORT_7000_TCP=tcp://10.233.0.33:7000
AAF_CASS_PORT_7000_TCP_ADDR=10.233.0.33
AAF_CASS_PORT_7000_TCP_PORT=7000
AAF_CASS_PORT_7000_TCP_PROTO=tcp
AAF_CASS_PORT_7001_TCP=tcp://10.233.0.33:7001
AAF_CASS_PORT_7001_TCP_ADDR=10.233.0.33
AAF_CASS_PORT_7001_TCP_PORT=7001
AAF_CASS_PORT_7001_TCP_PROTO=tcp
AAF_CASS_PORT_9042_TCP=tcp://10.233.0.33:9042
AAF_CASS_PORT_9042_TCP_ADDR=10.233.0.33
AAF_CASS_PORT_9042_TCP_PORT=9042
AAF_CASS_PORT_9042_TCP_PROTO=tcp
AAF_CASS_PORT_9160_TCP=tcp://10.233.0.33:9160
AAF_CASS_PORT_9160_TCP_ADDR=10.233.0.33
AAF_CASS_PORT_9160_TCP_PORT=9160
AAF_CASS_PORT_9160_TCP_PROTO=tcp
AAF_CASS_SERVICE_HOST=10.233.0.33
AAF_CASS_SERVICE_PORT=7000
AAF_CASS_SERVICE_PORT_TCP_CQL=9042
AAF_CASS_SERVICE_PORT_TCP_INTRA=7000
AAF_CASS_SERVICE_PORT_TCP_THRIFT=9160
AAF_CASS_SERVICE_PORT_TLS=7001
AAF_CM_PORT=tcp://10.233.28.40:8150
AAF_CM_PORT_8150_TCP=tcp://10.233.28.40:8150
AAF_CM_PORT_8150_TCP_ADDR=10.233.28.40
AAF_CM_PORT_8150_TCP_PORT=8150
AAF_CM_PORT_8150_TCP_PROTO=tcp
AAF_CM_SERVICE_HOST=10.233.28.40
AAF_CM_SERVICE_PORT=8150
AAF_CM_SERVICE_PORT_API=8150
AAF_FS_PORT=tcp://10.233.26.238:8096
AAF_FS_PORT_8096_TCP=tcp://10.233.26.238:8096
AAF_FS_PORT_8096_TCP_ADDR=10.233.26.238
AAF_FS_PORT_8096_TCP_PORT=8096
AAF_FS_PORT_8096_TCP_PROTO=tcp
AAF_FS_SERVICE_HOST=10.233.26.238
AAF_FS_SERVICE_PORT=8096
AAF_FS_SERVICE_PORT_API=8096
AAF_GUI_PORT=tcp://10.233.4.219:8200
AAF_GUI_PORT_8200_TCP=tcp://10.233.4.219:8200
AAF_GUI_PORT_8200_TCP_ADDR=10.233.4.219
AAF_GUI_PORT_8200_TCP_PORT=8200
AAF_GUI_PORT_8200_TCP_PROTO=tcp
AAF_GUI_SERVICE_HOST=10.233.4.219
AAF_GUI_SERVICE_PORT=8200
AAF_GUI_SERVICE_PORT_GUI=8200
AAF_LOCATE_PORT=tcp://10.233.33.73:8095
AAF_LOCATE_PORT_8095_TCP=tcp://10.233.33.73:8095
AAF_LOCATE_PORT_8095_TCP_ADDR=10.233.33.73
AAF_LOCATE_PORT_8095_TCP_PORT=8095
AAF_LOCATE_PORT_8095_TCP_PROTO=tcp
AAF_LOCATE_SERVICE_HOST=10.233.33.73
AAF_LOCATE_SERVICE_PORT=8095
AAF_LOCATE_SERVICE_PORT_API=8095
AAF_OAUTH_PORT=tcp://10.233.61.170:8140
AAF_OAUTH_PORT_8140_TCP=tcp://10.233.61.170:8140
AAF_OAUTH_PORT_8140_TCP_ADDR=10.233.61.170
AAF_OAUTH_PORT_8140_TCP_PORT=8140
AAF_OAUTH_PORT_8140_TCP_PROTO=tcp
AAF_OAUTH_SERVICE_HOST=10.233.61.170
AAF_OAUTH_SERVICE_PORT=8140
AAF_OAUTH_SERVICE_PORT_API=8140
AAF_SERVICE_PORT=tcp://10.233.4.242:8100
AAF_SERVICE_PORT_8100_TCP=tcp://10.233.4.242:8100
AAF_SERVICE_PORT_8100_TCP_ADDR=10.233.4.242
AAF_SERVICE_PORT_8100_TCP_PORT=8100
AAF_SERVICE_PORT_8100_TCP_PROTO=tcp
AAF_SERVICE_SERVICE_HOST=10.233.4.242
AAF_SERVICE_SERVICE_PORT=8100
AAF_SERVICE_SERVICE_PORT_API=8100
AAF_SMS_DB_PORT=tcp://10.233.7.210:8200
AAF_SMS_DB_PORT_8200_TCP=tcp://10.233.7.210:8200
AAF_SMS_DB_PORT_8200_TCP_ADDR=10.233.7.210
AAF_SMS_DB_PORT_8200_TCP_PORT=8200
AAF_SMS_DB_PORT_8200_TCP_PROTO=tcp
AAF_SMS_DB_SERVICE_HOST=10.233.7.210
AAF_SMS_DB_SERVICE_PORT=8200
AAF_SMS_DB_SERVICE_PORT_AAF_SMS_DB=8200
AAF_SMS_PORT=tcp://10.233.17.90:10443
AAF_SMS_PORT_10443_TCP=tcp://10.233.17.90:10443
AAF_SMS_PORT_10443_TCP_ADDR=10.233.17.90
AAF_SMS_PORT_10443_TCP_PORT=10443
AAF_SMS_PORT_10443_TCP_PROTO=tcp
AAF_SMS_SERVICE_HOST=10.233.17.90
AAF_SMS_SERVICE_PORT=10443
AAI_BABEL_PORT=tcp://10.233.18.175:9516
AAI_BABEL_PORT_9516_TCP=tcp://10.233.18.175:9516
AAI_BABEL_PORT_9516_TCP_ADDR=10.233.18.175
AAI_BABEL_PORT_9516_TCP_PORT=9516
AAI_BABEL_PORT_9516_TCP_PROTO=tcp
AAI_BABEL_SERVICE_HOST=10.233.18.175
AAI_BABEL_SERVICE_PORT=9516
AAI_BABEL_SERVICE_PORT_BABEL=9516
AAI_MODELLOADER_PORT=tcp://10.233.45.209:8080
AAI_MODELLOADER_PORT_8080_TCP=tcp://10.233.45.209:8080
AAI_MODELLOADER_PORT_8080_TCP_ADDR=10.233.45.209
AAI_MODELLOADER_PORT_8080_TCP_PORT=8080
AAI_MODELLOADER_PORT_8080_TCP_PROTO=tcp
AAI_MODELLOADER_PORT_8443_TCP=tcp://10.233.45.209:8443
AAI_MODELLOADER_PORT_8443_TCP_ADDR=10.233.45.209
AAI_MODELLOADER_PORT_8443_TCP_PORT=8443
AAI_MODELLOADER_PORT_8443_TCP_PROTO=tcp
AAI_MODELLOADER_SERVICE_HOST=10.233.45.209
AAI_MODELLOADER_SERVICE_PORT=8080
AAI_MODELLOADER_SERVICE_PORT_AAI_MODELLOADER=8080
AAI_MODELLOADER_SERVICE_PORT_AAI_MODELLOADER_SSL=8443
AAI_PORT=tcp://10.233.3.135:8443
AAI_PORT_8443_TCP=tcp://10.233.3.135:8443
AAI_PORT_8443_TCP_ADDR=10.233.3.135
AAI_PORT_8443_TCP_PORT=8443
AAI_PORT_8443_TCP_PROTO=tcp
AAI_RESOURCES_PORT=tcp://10.233.35.143:8447
AAI_RESOURCES_PORT_5005_TCP=tcp://10.233.35.143:5005
AAI_RESOURCES_PORT_5005_TCP_ADDR=10.233.35.143
AAI_RESOURCES_PORT_5005_TCP_PORT=5005
AAI_RESOURCES_PORT_5005_TCP_PROTO=tcp
AAI_RESOURCES_PORT_8447_TCP=tcp://10.233.35.143:8447
AAI_RESOURCES_PORT_8447_TCP_ADDR=10.233.35.143
AAI_RESOURCES_PORT_8447_TCP_PORT=8447
AAI_RESOURCES_PORT_8447_TCP_PROTO=tcp
AAI_RESOURCES_SERVICE_HOST=10.233.35.143
AAI_RESOURCES_SERVICE_PORT=8447
AAI_RESOURCES_SERVICE_PORT_AAI_RESOURCES_5005=5005
AAI_RESOURCES_SERVICE_PORT_AAI_RESOURCES_8447=8447
AAI_SERVICE_HOST=10.233.3.135
AAI_SERVICE_PORT=8443
AAI_SERVICE_PORT_AAI_SSL=8443
AAI_SPARKY_BE_PORT=tcp://10.233.58.164:8000
AAI_SPARKY_BE_PORT_8000_TCP=tcp://10.233.58.164:8000
AAI_SPARKY_BE_PORT_8000_TCP_ADDR=10.233.58.164
AAI_SPARKY_BE_PORT_8000_TCP_PORT=8000
AAI_SPARKY_BE_PORT_8000_TCP_PROTO=tcp
AAI_SPARKY_BE_SERVICE_HOST=10.233.58.164
AAI_SPARKY_BE_SERVICE_PORT=8000
AAI_SPARKY_BE_SERVICE_PORT_AAI_SPARKY_BE=8000
AAI_TRAVERSAL_PORT=tcp://10.233.28.50:8446
AAI_TRAVERSAL_PORT_5005_TCP=tcp://10.233.28.50:5005
AAI_TRAVERSAL_PORT_5005_TCP_ADDR=10.233.28.50
AAI_TRAVERSAL_PORT_5005_TCP_PORT=5005
AAI_TRAVERSAL_PORT_5005_TCP_PROTO=tcp
AAI_TRAVERSAL_PORT_8446_TCP=tcp://10.233.28.50:8446
AAI_TRAVERSAL_PORT_8446_TCP_ADDR=10.233.28.50
AAI_TRAVERSAL_PORT_8446_TCP_PORT=8446
AAI_TRAVERSAL_PORT_8446_TCP_PROTO=tcp
AAI_TRAVERSAL_SERVICE_HOST=10.233.28.50
AAI_TRAVERSAL_SERVICE_PORT=8446
AAI_TRAVERSAL_SERVICE_PORT_AAI_TRAVERSAL_5005=5005
AAI_TRAVERSAL_SERVICE_PORT_AAI_TRAVERSAL_8446=8446
ALLOW_UNSIGNED=false
AWX_POSTGRESQL_PORT=tcp://10.233.21.68:5432
AWX_POSTGRESQL_PORT_5432_TCP=tcp://10.233.21.68:5432
AWX_POSTGRESQL_PORT_5432_TCP_ADDR=10.233.21.68
AWX_POSTGRESQL_PORT_5432_TCP_PORT=5432
AWX_POSTGRESQL_PORT_5432_TCP_PROTO=tcp
AWX_POSTGRESQL_SERVICE_HOST=10.233.21.68
AWX_POSTGRESQL_SERVICE_PORT=5432
AWX_POSTGRESQL_SERVICE_PORT_AWX_POSTGRESQL=5432
AWX_RABBITMQ_PORT=tcp://10.233.8.74:15672
AWX_RABBITMQ_PORT_15672_TCP=tcp://10.233.8.74:15672
AWX_RABBITMQ_PORT_15672_TCP_ADDR=10.233.8.74
AWX_RABBITMQ_PORT_15672_TCP_PORT=15672
AWX_RABBITMQ_PORT_15672_TCP_PROTO=tcp
AWX_RABBITMQ_PORT_5672_TCP=tcp://10.233.8.74:5672
AWX_RABBITMQ_PORT_5672_TCP_ADDR=10.233.8.74
AWX_RABBITMQ_PORT_5672_TCP_PORT=5672
AWX_RABBITMQ_PORT_5672_TCP_PROTO=tcp
AWX_RABBITMQ_SERVICE_HOST=10.233.8.74
AWX_RABBITMQ_SERVICE_PORT=15672
AWX_RABBITMQ_SERVICE_PORT_AMQP=5672
AWX_RABBITMQ_SERVICE_PORT_HTTP=15672
AWX_RMQ_MGMT_PORT=tcp://10.233.25.235:15672
AWX_RMQ_MGMT_PORT_15672_TCP=tcp://10.233.25.235:15672
AWX_RMQ_MGMT_PORT_15672_TCP_ADDR=10.233.25.235
AWX_RMQ_MGMT_PORT_15672_TCP_PORT=15672
AWX_RMQ_MGMT_PORT_15672_TCP_PROTO=tcp
AWX_RMQ_MGMT_SERVICE_HOST=10.233.25.235
AWX_RMQ_MGMT_SERVICE_PORT=15672
AWX_RMQ_MGMT_SERVICE_PORT_RMQMGMT=15672
AWX_WEB_PORT=tcp://10.233.25.21:8052
AWX_WEB_PORT_8052_TCP=tcp://10.233.25.21:8052
AWX_WEB_PORT_8052_TCP_ADDR=10.233.25.21
AWX_WEB_PORT_8052_TCP_PORT=8052
AWX_WEB_PORT_8052_TCP_PROTO=tcp
AWX_WEB_SERVICE_HOST=10.233.25.21
AWX_WEB_SERVICE_PORT=8052
AWX_WEB_SERVICE_PORT_WEB=8052
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT=tcp://10.233.38.89:5701
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP=tcp://10.233.38.89:5701
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_ADDR=10.233.38.89
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_PORT=5701
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_PROTO=tcp
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_HOST=10.233.38.89
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_PORT=5701
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_PORT_BLUEPRINTS_PROCESSOR_CLUSTER=5701
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT=tcp://10.233.35.187:9111
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP=tcp://10.233.35.187:9111
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_ADDR=10.233.35.187
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_PORT=9111
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_PROTO=tcp
CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_HOST=10.233.35.187
CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_PORT=9111
CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_PORT_BLUEPRINTS_PROCESSOR_GRPC=9111
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT=tcp://10.233.4.16:8080
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP=tcp://10.233.4.16:8080
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_ADDR=10.233.4.16
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_PORT=8080
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_PROTO=tcp
CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_HOST=10.233.4.16
CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_PORT=8080
CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_PORT_BLUEPRINTS_PROCESSOR_HTTP=8080
CDS_COMMAND_EXECUTOR_PORT=tcp://10.233.49.1:50051
CDS_COMMAND_EXECUTOR_PORT_50051_TCP=tcp://10.233.49.1:50051
CDS_COMMAND_EXECUTOR_PORT_50051_TCP_ADDR=10.233.49.1
CDS_COMMAND_EXECUTOR_PORT_50051_TCP_PORT=50051
CDS_COMMAND_EXECUTOR_PORT_50051_TCP_PROTO=tcp
CDS_COMMAND_EXECUTOR_SERVICE_HOST=10.233.49.1
CDS_COMMAND_EXECUTOR_SERVICE_PORT=50051
CDS_COMMAND_EXECUTOR_SERVICE_PORT_COMMAND_EXECUTOR_GRPC=50051
CDS_DB_PORT=tcp://10.233.31.46:3306
CDS_DB_PORT_3306_TCP=tcp://10.233.31.46:3306
CDS_DB_PORT_3306_TCP_ADDR=10.233.31.46
CDS_DB_PORT_3306_TCP_PORT=3306
CDS_DB_PORT_3306_TCP_PROTO=tcp
CDS_DB_SERVICE_HOST=10.233.31.46
CDS_DB_SERVICE_PORT=3306
CDS_DB_SERVICE_PORT_MYSQL=3306
CDS_PY_EXECUTOR_PORT=tcp://10.233.9.58:50052
CDS_PY_EXECUTOR_PORT_50052_TCP=tcp://10.233.9.58:50052
CDS_PY_EXECUTOR_PORT_50052_TCP_ADDR=10.233.9.58
CDS_PY_EXECUTOR_PORT_50052_TCP_PORT=50052
CDS_PY_EXECUTOR_PORT_50052_TCP_PROTO=tcp
CDS_PY_EXECUTOR_PORT_50053_TCP=tcp://10.233.9.58:50053
CDS_PY_EXECUTOR_PORT_50053_TCP_ADDR=10.233.9.58
CDS_PY_EXECUTOR_PORT_50053_TCP_PORT=50053
CDS_PY_EXECUTOR_PORT_50053_TCP_PROTO=tcp
CDS_PY_EXECUTOR_SERVICE_HOST=10.233.9.58
CDS_PY_EXECUTOR_SERVICE_PORT=50052
CDS_PY_EXECUTOR_SERVICE_PORT_EXECUTOR_GRPC=50052
CDS_PY_EXECUTOR_SERVICE_PORT_MANAGER_GRPC=50053
CDS_SDC_LISTENER_PORT=tcp://10.233.22.47:8080
CDS_SDC_LISTENER_PORT_8080_TCP=tcp://10.233.22.47:8080
CDS_SDC_LISTENER_PORT_8080_TCP_ADDR=10.233.22.47
CDS_SDC_LISTENER_PORT_8080_TCP_PORT=8080
CDS_SDC_LISTENER_PORT_8080_TCP_PROTO=tcp
CDS_SDC_LISTENER_SERVICE_HOST=10.233.22.47
CDS_SDC_LISTENER_SERVICE_PORT=8080
CDS_SDC_LISTENER_SERVICE_PORT_CDS_SDC_LISTENER_HTTP=8080
CDS_UI_PORT=tcp://10.233.7.158:3000
CDS_UI_PORT_3000_TCP=tcp://10.233.7.158:3000
CDS_UI_PORT_3000_TCP_ADDR=10.233.7.158
CDS_UI_PORT_3000_TCP_PORT=3000
CDS_UI_PORT_3000_TCP_PROTO=tcp
CDS_UI_SERVICE_HOST=10.233.7.158
CDS_UI_SERVICE_PORT=3000
CDS_UI_SERVICE_PORT_CDS_UI_3000=3000
CHART_MUSEUM_PORT=tcp://10.233.55.247:80
CHART_MUSEUM_PORT_80_TCP=tcp://10.233.55.247:80
CHART_MUSEUM_PORT_80_TCP_ADDR=10.233.55.247
CHART_MUSEUM_PORT_80_TCP_PORT=80
CHART_MUSEUM_PORT_80_TCP_PROTO=tcp
CHART_MUSEUM_SERVICE_HOST=10.233.55.247
CHART_MUSEUM_SERVICE_PORT=80
CHART_MUSEUM_SERVICE_PORT_HTTP=80
CLI_PORT=tcp://10.233.42.135:443
CLI_PORT_443_TCP=tcp://10.233.42.135:443
CLI_PORT_443_TCP_ADDR=10.233.42.135
CLI_PORT_443_TCP_PORT=443
CLI_PORT_443_TCP_PROTO=tcp
CLI_PORT_9090_TCP=tcp://10.233.42.135:9090
CLI_PORT_9090_TCP_ADDR=10.233.42.135
CLI_PORT_9090_TCP_PORT=9090
CLI_PORT_9090_TCP_PROTO=tcp
CLI_SERVICE_HOST=10.233.42.135
CLI_SERVICE_PORT=443
CLI_SERVICE_PORT_CLI443=443
CLI_SERVICE_PORT_CLI9090=9090
COMPONENT=kafka
CONFIG_BINDING_SERVICE_PORT=tcp://10.233.47.199:10000
CONFIG_BINDING_SERVICE_PORT_10000_TCP=tcp://10.233.47.199:10000
CONFIG_BINDING_SERVICE_PORT_10000_TCP_ADDR=10.233.47.199
CONFIG_BINDING_SERVICE_PORT_10000_TCP_PORT=10000
CONFIG_BINDING_SERVICE_PORT_10000_TCP_PROTO=tcp
CONFIG_BINDING_SERVICE_PORT_10443_TCP=tcp://10.233.47.199:10443
CONFIG_BINDING_SERVICE_PORT_10443_TCP_ADDR=10.233.47.199
CONFIG_BINDING_SERVICE_PORT_10443_TCP_PORT=10443
CONFIG_BINDING_SERVICE_PORT_10443_TCP_PROTO=tcp
CONFIG_BINDING_SERVICE_SERVICE_HOST=10.233.47.199
CONFIG_BINDING_SERVICE_SERVICE_PORT=10000
CONFIG_BINDING_SERVICE_SERVICE_PORT_CONFIG_BINDING_SERVICE_INSECURE=10000
CONFIG_BINDING_SERVICE_SERVICE_PORT_CONFIG_BINDING_SERVICE_SECURE=10443
CONFLUENT_DEB_VERSION=1
CONFLUENT_MAJOR_VERSION=5
CONFLUENT_MINOR_VERSION=3
CONFLUENT_MVN_LABEL=
CONFLUENT_PATCH_VERSION=1
CONFLUENT_PLATFORM_LABEL=
CONFLUENT_VERSION=5.3.1
CONSUL_SERVER_UI_PORT=tcp://10.233.35.224:8500
CONSUL_SERVER_UI_PORT_8500_TCP=tcp://10.233.35.224:8500
CONSUL_SERVER_UI_PORT_8500_TCP_ADDR=10.233.35.224
CONSUL_SERVER_UI_PORT_8500_TCP_PORT=8500
CONSUL_SERVER_UI_PORT_8500_TCP_PROTO=tcp
CONSUL_SERVER_UI_SERVICE_HOST=10.233.35.224
CONSUL_SERVER_UI_SERVICE_PORT=8500
CONSUL_SERVER_UI_SERVICE_PORT_CONSUL_UI=8500
CPS_CORE_PORT=tcp://10.233.28.251:8080
CPS_CORE_PORT_8080_TCP=tcp://10.233.28.251:8080
CPS_CORE_PORT_8080_TCP_ADDR=10.233.28.251
CPS_CORE_PORT_8080_TCP_PORT=8080
CPS_CORE_PORT_8080_TCP_PROTO=tcp
CPS_CORE_SERVICE_HOST=10.233.28.251
CPS_CORE_SERVICE_PORT=8080
CPS_CORE_SERVICE_PORT_HTTP=8080
CPS_PG_PRIMARY_PORT=tcp://10.233.37.243:5432
CPS_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.37.243:5432
CPS_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.37.243
CPS_PG_PRIMARY_PORT_5432_TCP_PORT=5432
CPS_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
CPS_PG_PRIMARY_SERVICE_HOST=10.233.37.243
CPS_PG_PRIMARY_SERVICE_PORT=5432
CPS_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
CPS_PG_REPLICA_PORT=tcp://10.233.59.191:5432
CPS_PG_REPLICA_PORT_5432_TCP=tcp://10.233.59.191:5432
CPS_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.59.191
CPS_PG_REPLICA_PORT_5432_TCP_PORT=5432
CPS_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
CPS_PG_REPLICA_SERVICE_HOST=10.233.59.191
CPS_PG_REPLICA_SERVICE_PORT=5432
CPS_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
CPS_POSTGRES_PORT=tcp://10.233.31.123:5432
CPS_POSTGRES_PORT_5432_TCP=tcp://10.233.31.123:5432
CPS_POSTGRES_PORT_5432_TCP_ADDR=10.233.31.123
CPS_POSTGRES_PORT_5432_TCP_PORT=5432
CPS_POSTGRES_PORT_5432_TCP_PROTO=tcp
CPS_POSTGRES_SERVICE_HOST=10.233.31.123
CPS_POSTGRES_SERVICE_PORT=5432
CPS_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
CUB_CLASSPATH=/etc/confluent/docker/docker-utils.jar
DASHBOARD_PORT=tcp://10.233.47.207:8443
DASHBOARD_PORT_8443_TCP=tcp://10.233.47.207:8443
DASHBOARD_PORT_8443_TCP_ADDR=10.233.47.207
DASHBOARD_PORT_8443_TCP_PORT=8443
DASHBOARD_PORT_8443_TCP_PROTO=tcp
DASHBOARD_SERVICE_HOST=10.233.47.207
DASHBOARD_SERVICE_PORT=8443
DASHBOARD_SERVICE_PORT_DASHBOARD=8443
DBC_PG_PRIMARY_PORT=tcp://10.233.4.73:5432
DBC_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.4.73:5432
DBC_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.4.73
DBC_PG_PRIMARY_PORT_5432_TCP_PORT=5432
DBC_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
DBC_PG_PRIMARY_SERVICE_HOST=10.233.4.73
DBC_PG_PRIMARY_SERVICE_PORT=5432
DBC_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
DBC_PG_REPLICA_PORT=tcp://10.233.42.26:5432
DBC_PG_REPLICA_PORT_5432_TCP=tcp://10.233.42.26:5432
DBC_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.42.26
DBC_PG_REPLICA_PORT_5432_TCP_PORT=5432
DBC_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
DBC_PG_REPLICA_SERVICE_HOST=10.233.42.26
DBC_PG_REPLICA_SERVICE_PORT=5432
DBC_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
DBC_POSTGRES_PORT=tcp://10.233.9.134:5432
DBC_POSTGRES_PORT_5432_TCP=tcp://10.233.9.134:5432
DBC_POSTGRES_PORT_5432_TCP_ADDR=10.233.9.134
DBC_POSTGRES_PORT_5432_TCP_PORT=5432
DBC_POSTGRES_PORT_5432_TCP_PROTO=tcp
DBC_POSTGRES_SERVICE_HOST=10.233.9.134
DBC_POSTGRES_SERVICE_PORT=5432
DBC_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
DCAEMOD_DESIGNTOOL_PORT=tcp://10.233.28.175:8080
DCAEMOD_DESIGNTOOL_PORT_8080_TCP=tcp://10.233.28.175:8080
DCAEMOD_DESIGNTOOL_PORT_8080_TCP_ADDR=10.233.28.175
DCAEMOD_DESIGNTOOL_PORT_8080_TCP_PORT=8080
DCAEMOD_DESIGNTOOL_PORT_8080_TCP_PROTO=tcp
DCAEMOD_DESIGNTOOL_SERVICE_HOST=10.233.28.175
DCAEMOD_DESIGNTOOL_SERVICE_PORT=8080
DCAEMOD_DESIGNTOOL_SERVICE_PORT_HTTP=8080
DCAEMOD_DISTRIBUTOR_API_PORT=tcp://10.233.43.142:8080
DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP=tcp://10.233.43.142:8080
DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP_ADDR=10.233.43.142
DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP_PORT=8080
DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP_PROTO=tcp
DCAEMOD_DISTRIBUTOR_API_SERVICE_HOST=10.233.43.142
DCAEMOD_DISTRIBUTOR_API_SERVICE_PORT=8080
DCAEMOD_DISTRIBUTOR_API_SERVICE_PORT_HTTP=8080
DCAEMOD_GENPROCESSOR_PORT=tcp://10.233.49.54:8080
DCAEMOD_GENPROCESSOR_PORT_8080_TCP=tcp://10.233.49.54:8080
DCAEMOD_GENPROCESSOR_PORT_8080_TCP_ADDR=10.233.49.54
DCAEMOD_GENPROCESSOR_PORT_8080_TCP_PORT=8080
DCAEMOD_GENPROCESSOR_PORT_8080_TCP_PROTO=tcp
DCAEMOD_GENPROCESSOR_SERVICE_HOST=10.233.49.54
DCAEMOD_GENPROCESSOR_SERVICE_PORT=8080
DCAEMOD_GENPROCESSOR_SERVICE_PORT_HTTP=8080
DCAEMOD_HEALTHCHECK_PORT=tcp://10.233.59.213:8080
DCAEMOD_HEALTHCHECK_PORT_8080_TCP=tcp://10.233.59.213:8080
DCAEMOD_HEALTHCHECK_PORT_8080_TCP_ADDR=10.233.59.213
DCAEMOD_HEALTHCHECK_PORT_8080_TCP_PORT=8080
DCAEMOD_HEALTHCHECK_PORT_8080_TCP_PROTO=tcp
DCAEMOD_HEALTHCHECK_SERVICE_HOST=10.233.59.213
DCAEMOD_HEALTHCHECK_SERVICE_PORT=8080
DCAEMOD_HEALTHCHECK_SERVICE_PORT_HTTP=8080
DCAEMOD_NIFI_REGISTRY_PORT=tcp://10.233.16.70:18080
DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP=tcp://10.233.16.70:18080
DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP_ADDR=10.233.16.70
DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP_PORT=18080
DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP_PROTO=tcp
DCAEMOD_NIFI_REGISTRY_SERVICE_HOST=10.233.16.70
DCAEMOD_NIFI_REGISTRY_SERVICE_PORT=18080
DCAEMOD_NIFI_REGISTRY_SERVICE_PORT_HTTP=18080
DCAEMOD_ONBOARDING_API_PORT=tcp://10.233.9.166:8080
DCAEMOD_ONBOARDING_API_PORT_8080_TCP=tcp://10.233.9.166:8080
DCAEMOD_ONBOARDING_API_PORT_8080_TCP_ADDR=10.233.9.166
DCAEMOD_ONBOARDING_API_PORT_8080_TCP_PORT=8080
DCAEMOD_ONBOARDING_API_PORT_8080_TCP_PROTO=tcp
DCAEMOD_ONBOARDING_API_SERVICE_HOST=10.233.9.166
DCAEMOD_ONBOARDING_API_SERVICE_PORT=8080
DCAEMOD_ONBOARDING_API_SERVICE_PORT_HTTP=8080
DCAEMOD_PG_PRIMARY_PORT=tcp://10.233.3.53:5432
DCAEMOD_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.3.53:5432
DCAEMOD_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.3.53
DCAEMOD_PG_PRIMARY_PORT_5432_TCP_PORT=5432
DCAEMOD_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
DCAEMOD_PG_PRIMARY_SERVICE_HOST=10.233.3.53
DCAEMOD_PG_PRIMARY_SERVICE_PORT=5432
DCAEMOD_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
DCAEMOD_PG_REPLICA_PORT=tcp://10.233.62.175:5432
DCAEMOD_PG_REPLICA_PORT_5432_TCP=tcp://10.233.62.175:5432
DCAEMOD_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.62.175
DCAEMOD_PG_REPLICA_PORT_5432_TCP_PORT=5432
DCAEMOD_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
DCAEMOD_PG_REPLICA_SERVICE_HOST=10.233.62.175
DCAEMOD_PG_REPLICA_SERVICE_PORT=5432
DCAEMOD_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
DCAEMOD_POSTGRES_PORT=tcp://10.233.50.216:5432
DCAEMOD_POSTGRES_PORT_5432_TCP=tcp://10.233.50.216:5432
DCAEMOD_POSTGRES_PORT_5432_TCP_ADDR=10.233.50.216
DCAEMOD_POSTGRES_PORT_5432_TCP_PORT=5432
DCAEMOD_POSTGRES_PORT_5432_TCP_PROTO=tcp
DCAEMOD_POSTGRES_SERVICE_HOST=10.233.50.216
DCAEMOD_POSTGRES_SERVICE_PORT=5432
DCAEMOD_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
DCAEMOD_RUNTIME_API_PORT=tcp://10.233.10.241:9090
DCAEMOD_RUNTIME_API_PORT_9090_TCP=tcp://10.233.10.241:9090
DCAEMOD_RUNTIME_API_PORT_9090_TCP_ADDR=10.233.10.241
DCAEMOD_RUNTIME_API_PORT_9090_TCP_PORT=9090
DCAEMOD_RUNTIME_API_PORT_9090_TCP_PROTO=tcp
DCAEMOD_RUNTIME_API_SERVICE_HOST=10.233.10.241
DCAEMOD_RUNTIME_API_SERVICE_PORT=9090
DCAEMOD_RUNTIME_API_SERVICE_PORT_HTTP=9090
DCAE_CLOUDIFY_MANAGER_PORT=tcp://10.233.41.160:443
DCAE_CLOUDIFY_MANAGER_PORT_443_TCP=tcp://10.233.41.160:443
DCAE_CLOUDIFY_MANAGER_PORT_443_TCP_ADDR=10.233.41.160
DCAE_CLOUDIFY_MANAGER_PORT_443_TCP_PORT=443
DCAE_CLOUDIFY_MANAGER_PORT_443_TCP_PROTO=tcp
DCAE_CLOUDIFY_MANAGER_SERVICE_HOST=10.233.41.160
DCAE_CLOUDIFY_MANAGER_SERVICE_PORT=443
DCAE_CLOUDIFY_MANAGER_SERVICE_PORT_DCAE_CLOUDIFY_MANAGER=443
DCAE_DASHBOARD_PG_PRIMARY_PORT=tcp://10.233.0.4:5432
DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.0.4:5432
DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.0.4
DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP_PORT=5432
DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
DCAE_DASHBOARD_PG_PRIMARY_SERVICE_HOST=10.233.0.4
DCAE_DASHBOARD_PG_PRIMARY_SERVICE_PORT=5432
DCAE_DASHBOARD_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_DASHBOARD_PG_REPLICA_PORT=tcp://10.233.11.226:5432
DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP=tcp://10.233.11.226:5432
DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.11.226
DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP_PORT=5432
DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
DCAE_DASHBOARD_PG_REPLICA_SERVICE_HOST=10.233.11.226
DCAE_DASHBOARD_PG_REPLICA_SERVICE_PORT=5432
DCAE_DASHBOARD_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_DASHBOARD_POSTGRES_PORT=tcp://10.233.42.208:5432
DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP=tcp://10.233.42.208:5432
DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP_ADDR=10.233.42.208
DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP_PORT=5432
DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP_PROTO=tcp
DCAE_DASHBOARD_POSTGRES_SERVICE_HOST=10.233.42.208
DCAE_DASHBOARD_POSTGRES_SERVICE_PORT=5432
DCAE_DASHBOARD_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_HEALTHCHECK_PORT=tcp://10.233.56.161:80
DCAE_HEALTHCHECK_PORT_80_TCP=tcp://10.233.56.161:80
DCAE_HEALTHCHECK_PORT_80_TCP_ADDR=10.233.56.161
DCAE_HEALTHCHECK_PORT_80_TCP_PORT=80
DCAE_HEALTHCHECK_PORT_80_TCP_PROTO=tcp
DCAE_HEALTHCHECK_SERVICE_HOST=10.233.56.161
DCAE_HEALTHCHECK_SERVICE_PORT=80
DCAE_HEALTHCHECK_SERVICE_PORT_DCAE_HEALTHCHECK=80
DCAE_HV_VES_COLLECTOR_PORT=tcp://10.233.62.145:6061
DCAE_HV_VES_COLLECTOR_PORT_6061_TCP=tcp://10.233.62.145:6061
DCAE_HV_VES_COLLECTOR_PORT_6061_TCP_ADDR=10.233.62.145
DCAE_HV_VES_COLLECTOR_PORT_6061_TCP_PORT=6061
DCAE_HV_VES_COLLECTOR_PORT_6061_TCP_PROTO=tcp
DCAE_HV_VES_COLLECTOR_SERVICE_HOST=10.233.62.145
DCAE_HV_VES_COLLECTOR_SERVICE_PORT=6061
DCAE_HV_VES_COLLECTOR_SERVICE_PORT_HTTPS_HTTP=6061
DCAE_INV_PG_PRIMARY_PORT=tcp://10.233.52.219:5432
DCAE_INV_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.52.219:5432
DCAE_INV_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.52.219
DCAE_INV_PG_PRIMARY_PORT_5432_TCP_PORT=5432
DCAE_INV_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
DCAE_INV_PG_PRIMARY_SERVICE_HOST=10.233.52.219
DCAE_INV_PG_PRIMARY_SERVICE_PORT=5432
DCAE_INV_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_INV_PG_REPLICA_PORT=tcp://10.233.32.205:5432
DCAE_INV_PG_REPLICA_PORT_5432_TCP=tcp://10.233.32.205:5432
DCAE_INV_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.32.205
DCAE_INV_PG_REPLICA_PORT_5432_TCP_PORT=5432
DCAE_INV_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
DCAE_INV_PG_REPLICA_SERVICE_HOST=10.233.32.205
DCAE_INV_PG_REPLICA_SERVICE_PORT=5432
DCAE_INV_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_INV_POSTGRES_PORT=tcp://10.233.4.81:5432
DCAE_INV_POSTGRES_PORT_5432_TCP=tcp://10.233.4.81:5432
DCAE_INV_POSTGRES_PORT_5432_TCP_ADDR=10.233.4.81
DCAE_INV_POSTGRES_PORT_5432_TCP_PORT=5432
DCAE_INV_POSTGRES_PORT_5432_TCP_PROTO=tcp
DCAE_INV_POSTGRES_SERVICE_HOST=10.233.4.81
DCAE_INV_POSTGRES_SERVICE_PORT=5432
DCAE_INV_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_MONGOHOST_READ_PORT=tcp://10.233.44.158:27017
DCAE_MONGOHOST_READ_PORT_27017_TCP=tcp://10.233.44.158:27017
DCAE_MONGOHOST_READ_PORT_27017_TCP_ADDR=10.233.44.158
DCAE_MONGOHOST_READ_PORT_27017_TCP_PORT=27017
DCAE_MONGOHOST_READ_PORT_27017_TCP_PROTO=tcp
DCAE_MONGOHOST_READ_SERVICE_HOST=10.233.44.158
DCAE_MONGOHOST_READ_SERVICE_PORT=27017
DCAE_MONGOHOST_READ_SERVICE_PORT_MONGO=27017
DCAE_MS_HEALTHCHECK_PORT=tcp://10.233.17.211:8080
DCAE_MS_HEALTHCHECK_PORT_8080_TCP=tcp://10.233.17.211:8080
DCAE_MS_HEALTHCHECK_PORT_8080_TCP_ADDR=10.233.17.211
DCAE_MS_HEALTHCHECK_PORT_8080_TCP_PORT=8080
DCAE_MS_HEALTHCHECK_PORT_8080_TCP_PROTO=tcp
DCAE_MS_HEALTHCHECK_SERVICE_HOST=10.233.17.211
DCAE_MS_HEALTHCHECK_SERVICE_PORT=8080
DCAE_MS_HEALTHCHECK_SERVICE_PORT_HTTP=8080
DCAE_PG_PRIMARY_PORT=tcp://10.233.51.229:5432
DCAE_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.51.229:5432
DCAE_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.51.229
DCAE_PG_PRIMARY_PORT_5432_TCP_PORT=5432
DCAE_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
DCAE_PG_PRIMARY_SERVICE_HOST=10.233.51.229
DCAE_PG_PRIMARY_SERVICE_PORT=5432
DCAE_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_PG_REPLICA_PORT=tcp://10.233.29.105:5432
DCAE_PG_REPLICA_PORT_5432_TCP=tcp://10.233.29.105:5432
DCAE_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.29.105
DCAE_PG_REPLICA_PORT_5432_TCP_PORT=5432
DCAE_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
DCAE_PG_REPLICA_SERVICE_HOST=10.233.29.105
DCAE_PG_REPLICA_SERVICE_PORT=5432
DCAE_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_POSTGRES_PORT=tcp://10.233.9.152:5432
DCAE_POSTGRES_PORT_5432_TCP=tcp://10.233.9.152:5432
DCAE_POSTGRES_PORT_5432_TCP_ADDR=10.233.9.152
DCAE_POSTGRES_PORT_5432_TCP_PORT=5432
DCAE_POSTGRES_PORT_5432_TCP_PROTO=tcp
DCAE_POSTGRES_SERVICE_HOST=10.233.9.152
DCAE_POSTGRES_SERVICE_PORT=5432
DCAE_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_PRH_PORT=tcp://10.233.29.201:8100
DCAE_PRH_PORT_8100_TCP=tcp://10.233.29.201:8100
DCAE_PRH_PORT_8100_TCP_ADDR=10.233.29.201
DCAE_PRH_PORT_8100_TCP_PORT=8100
DCAE_PRH_PORT_8100_TCP_PROTO=tcp
DCAE_PRH_SERVICE_HOST=10.233.29.201
DCAE_PRH_SERVICE_PORT=8100
DCAE_PRH_SERVICE_PORT_HTTP=8100
DCAE_TCAGEN2_PORT=tcp://10.233.5.237:9091
DCAE_TCAGEN2_PORT_9091_TCP=tcp://10.233.5.237:9091
DCAE_TCAGEN2_PORT_9091_TCP_ADDR=10.233.5.237
DCAE_TCAGEN2_PORT_9091_TCP_PORT=9091
DCAE_TCAGEN2_PORT_9091_TCP_PROTO=tcp
DCAE_TCAGEN2_SERVICE_HOST=10.233.5.237
DCAE_TCAGEN2_SERVICE_PORT=9091
DCAE_TCAGEN2_SERVICE_PORT_HTTP=9091
DCAE_VES_COLLECTOR_PORT=tcp://10.233.53.77:8443
DCAE_VES_COLLECTOR_PORT_8443_TCP=tcp://10.233.53.77:8443
DCAE_VES_COLLECTOR_PORT_8443_TCP_ADDR=10.233.53.77
DCAE_VES_COLLECTOR_PORT_8443_TCP_PORT=8443
DCAE_VES_COLLECTOR_PORT_8443_TCP_PROTO=tcp
DCAE_VES_COLLECTOR_SERVICE_HOST=10.233.53.77
DCAE_VES_COLLECTOR_SERVICE_PORT=8443
DCAE_VES_COLLECTOR_SERVICE_PORT_HTTPS_HTTP=8443
DEPLOYMENT_HANDLER_PORT=tcp://10.233.30.95:8443
DEPLOYMENT_HANDLER_PORT_8443_TCP=tcp://10.233.30.95:8443
DEPLOYMENT_HANDLER_PORT_8443_TCP_ADDR=10.233.30.95
DEPLOYMENT_HANDLER_PORT_8443_TCP_PORT=8443
DEPLOYMENT_HANDLER_PORT_8443_TCP_PROTO=tcp
DEPLOYMENT_HANDLER_SERVICE_HOST=10.233.30.95
DEPLOYMENT_HANDLER_SERVICE_PORT=8443
DEPLOYMENT_HANDLER_SERVICE_PORT_DEPLOYMENT_HANDLER=8443
DMAAP_BC_PORT=tcp://10.233.51.16:8443
DMAAP_BC_PORT_8443_TCP=tcp://10.233.51.16:8443
DMAAP_BC_PORT_8443_TCP_ADDR=10.233.51.16
DMAAP_BC_PORT_8443_TCP_PORT=8443
DMAAP_BC_PORT_8443_TCP_PROTO=tcp
DMAAP_BC_SERVICE_HOST=10.233.51.16
DMAAP_BC_SERVICE_PORT=8443
DMAAP_BC_SERVICE_PORT_HTTPS_API=8443
DMAAP_DR_DB_PORT=tcp://10.233.17.115:3306
DMAAP_DR_DB_PORT_3306_TCP=tcp://10.233.17.115:3306
DMAAP_DR_DB_PORT_3306_TCP_ADDR=10.233.17.115
DMAAP_DR_DB_PORT_3306_TCP_PORT=3306
DMAAP_DR_DB_PORT_3306_TCP_PROTO=tcp
DMAAP_DR_DB_SERVICE_HOST=10.233.17.115
DMAAP_DR_DB_SERVICE_PORT=3306
DMAAP_DR_DB_SERVICE_PORT_MYSQL=3306
DMAAP_DR_NODE_EXTERNAL_PORT=tcp://10.233.41.233:8443
DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP=tcp://10.233.41.233:8443
DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_ADDR=10.233.41.233
DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_PORT=8443
DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_PROTO=tcp
DMAAP_DR_NODE_EXTERNAL_SERVICE_HOST=10.233.41.233
DMAAP_DR_NODE_EXTERNAL_SERVICE_PORT=8443
DMAAP_DR_NODE_EXTERNAL_SERVICE_PORT_HTTPS_API=8443
DMAAP_DR_NODE_PORT=tcp://10.233.13.73:8443
DMAAP_DR_NODE_PORT_8080_TCP=tcp://10.233.13.73:8080
DMAAP_DR_NODE_PORT_8080_TCP_ADDR=10.233.13.73
DMAAP_DR_NODE_PORT_8080_TCP_PORT=8080
DMAAP_DR_NODE_PORT_8080_TCP_PROTO=tcp
DMAAP_DR_NODE_PORT_8443_TCP=tcp://10.233.13.73:8443
DMAAP_DR_NODE_PORT_8443_TCP_ADDR=10.233.13.73
DMAAP_DR_NODE_PORT_8443_TCP_PORT=8443
DMAAP_DR_NODE_PORT_8443_TCP_PROTO=tcp
DMAAP_DR_NODE_SERVICE_HOST=10.233.13.73
DMAAP_DR_NODE_SERVICE_PORT=8443
DMAAP_DR_NODE_SERVICE_PORT_HTTPS_API=8443
DMAAP_DR_NODE_SERVICE_PORT_HTTP_API=8080
DMAAP_DR_PROV_PORT=tcp://10.233.38.209:443
DMAAP_DR_PROV_PORT_443_TCP=tcp://10.233.38.209:443
DMAAP_DR_PROV_PORT_443_TCP_ADDR=10.233.38.209
DMAAP_DR_PROV_PORT_443_TCP_PORT=443
DMAAP_DR_PROV_PORT_443_TCP_PROTO=tcp
DMAAP_DR_PROV_SERVICE_HOST=10.233.38.209
DMAAP_DR_PROV_SERVICE_PORT=443
DMAAP_DR_PROV_SERVICE_PORT_DR_PROV_PORT2=443
EJBCA_PORT=tcp://10.233.15.215:8443
EJBCA_PORT_8080_TCP=tcp://10.233.15.215:8080
EJBCA_PORT_8080_TCP_ADDR=10.233.15.215
EJBCA_PORT_8080_TCP_PORT=8080
EJBCA_PORT_8080_TCP_PROTO=tcp
EJBCA_PORT_8443_TCP=tcp://10.233.15.215:8443
EJBCA_PORT_8443_TCP_ADDR=10.233.15.215
EJBCA_PORT_8443_TCP_PORT=8443
EJBCA_PORT_8443_TCP_PROTO=tcp
EJBCA_SERVICE_HOST=10.233.15.215
EJBCA_SERVICE_PORT=8443
EJBCA_SERVICE_PORT_HTTPS_API=8443
EJBCA_SERVICE_PORT_HTTP_API=8080
ESR_GUI_PORT=tcp://10.233.31.150:8080
ESR_GUI_PORT_8080_TCP=tcp://10.233.31.150:8080
ESR_GUI_PORT_8080_TCP_ADDR=10.233.31.150
ESR_GUI_PORT_8080_TCP_PORT=8080
ESR_GUI_PORT_8080_TCP_PROTO=tcp
ESR_GUI_SERVICE_HOST=10.233.31.150
ESR_GUI_SERVICE_PORT=8080
ESR_GUI_SERVICE_PORT_ESR_GUI=8080
ESR_SERVER_PORT=tcp://10.233.7.17:9518
ESR_SERVER_PORT_9518_TCP=tcp://10.233.7.17:9518
ESR_SERVER_PORT_9518_TCP_ADDR=10.233.7.17
ESR_SERVER_PORT_9518_TCP_PORT=9518
ESR_SERVER_PORT_9518_TCP_PROTO=tcp
ESR_SERVER_SERVICE_HOST=10.233.7.17
ESR_SERVER_SERVICE_PORT=9518
ESR_SERVER_SERVICE_PORT_ESR_SERVER=9518
HOLMES_ENGINE_MGMT_PORT=tcp://10.233.20.135:9102
HOLMES_ENGINE_MGMT_PORT_9102_TCP=tcp://10.233.20.135:9102
HOLMES_ENGINE_MGMT_PORT_9102_TCP_ADDR=10.233.20.135
HOLMES_ENGINE_MGMT_PORT_9102_TCP_PORT=9102
HOLMES_ENGINE_MGMT_PORT_9102_TCP_PROTO=tcp
HOLMES_ENGINE_MGMT_SERVICE_HOST=10.233.20.135
HOLMES_ENGINE_MGMT_SERVICE_PORT=9102
HOLMES_ENGINE_MGMT_SERVICE_PORT_HTTPS_REST=9102
HOLMES_POSTGRES_PORT=tcp://10.233.0.92:5432
HOLMES_POSTGRES_PORT_5432_TCP=tcp://10.233.0.92:5432
HOLMES_POSTGRES_PORT_5432_TCP_ADDR=10.233.0.92
HOLMES_POSTGRES_PORT_5432_TCP_PORT=5432
HOLMES_POSTGRES_PORT_5432_TCP_PROTO=tcp
HOLMES_POSTGRES_PRIMARY_PORT=tcp://10.233.15.9:5432
HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP=tcp://10.233.15.9:5432
HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_ADDR=10.233.15.9
HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_PORT=5432
HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_PROTO=tcp
HOLMES_POSTGRES_PRIMARY_SERVICE_HOST=10.233.15.9
HOLMES_POSTGRES_PRIMARY_SERVICE_PORT=5432
HOLMES_POSTGRES_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
HOLMES_POSTGRES_REPLICA_PORT=tcp://10.233.26.116:5432
HOLMES_POSTGRES_REPLICA_PORT_5432_TCP=tcp://10.233.26.116:5432
HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_ADDR=10.233.26.116
HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_PORT=5432
HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_PROTO=tcp
HOLMES_POSTGRES_REPLICA_SERVICE_HOST=10.233.26.116
HOLMES_POSTGRES_REPLICA_SERVICE_PORT=5432
HOLMES_POSTGRES_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
HOLMES_POSTGRES_SERVICE_HOST=10.233.0.92
HOLMES_POSTGRES_SERVICE_PORT=5432
HOLMES_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
HOLMES_RULE_MGMT_PORT=tcp://10.233.48.182:9101
HOLMES_RULE_MGMT_PORT_9101_TCP=tcp://10.233.48.182:9101
HOLMES_RULE_MGMT_PORT_9101_TCP_ADDR=10.233.48.182
HOLMES_RULE_MGMT_PORT_9101_TCP_PORT=9101
HOLMES_RULE_MGMT_PORT_9101_TCP_PROTO=tcp
HOLMES_RULE_MGMT_PORT_9104_TCP=tcp://10.233.48.182:9104
HOLMES_RULE_MGMT_PORT_9104_TCP_ADDR=10.233.48.182
HOLMES_RULE_MGMT_PORT_9104_TCP_PORT=9104
HOLMES_RULE_MGMT_PORT_9104_TCP_PROTO=tcp
HOLMES_RULE_MGMT_SERVICE_HOST=10.233.48.182
HOLMES_RULE_MGMT_SERVICE_PORT=9101
HOLMES_RULE_MGMT_SERVICE_PORT_HTTPS_REST=9101
HOLMES_RULE_MGMT_SERVICE_PORT_HTTPS_UI=9104
HOME=/home/mrkafka
HOSTNAME=onap-message-router-kafka-2
HOST_IP=10.253.0.253
INVENTORY_PORT=tcp://10.233.12.27:8080
INVENTORY_PORT_8080_TCP=tcp://10.233.12.27:8080
INVENTORY_PORT_8080_TCP_ADDR=10.233.12.27
INVENTORY_PORT_8080_TCP_PORT=8080
INVENTORY_PORT_8080_TCP_PROTO=tcp
INVENTORY_SERVICE_HOST=10.233.12.27
INVENTORY_SERVICE_PORT=8080
INVENTORY_SERVICE_PORT_INVENTORY=8080
KAFKA_ADVERTISED_LISTENERS=EXTERNAL_SASL_PLAINTEXT://10.253.0.253:30492,INTERNAL_SASL_PLAINTEXT://:9092
KAFKA_AUTHORIZER_CLASS_NAME=org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer
KAFKA_BROKER_ID=2
KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE=false
KAFKA_DEFAULT_REPLICATION_FACTOR=3
KAFKA_INTER_BROKER_LISTENER_NAME=INTERNAL_SASL_PLAINTEXT
KAFKA_JMX_PORT=5555
KAFKA_LISTENERS=EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT
KAFKA_LOG_DIRS=/var/lib/kafka/data
KAFKA_LOG_RETENTION_HOURS=168
KAFKA_NUM_PARTITIONS=3
KAFKA_NUM_RECOVERY_THREADS_PER_DATA_DIR=5
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=3
KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/secrets/jaas/kafka_server_jaas.conf
KAFKA_SASL_ENABLED_MECHANISMS=PLAIN
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL=PLAIN
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR=1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR=1
KAFKA_USER=mrkafka
KAFKA_VERSION=5.3.1
KAFKA_ZOOKEEPER_CONNECT=onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181
KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS=6000
KAFKA_ZOOKEEPER_SET_ACL=true
KUBERNETES_PORT=tcp://10.233.0.1:443
KUBERNETES_PORT_443_TCP=tcp://10.233.0.1:443
KUBERNETES_PORT_443_TCP_ADDR=10.233.0.1
KUBERNETES_PORT_443_TCP_PORT=443
KUBERNETES_PORT_443_TCP_PROTO=tcp
KUBERNETES_SERVICE_HOST=10.233.0.1
KUBERNETES_SERVICE_PORT=443
KUBERNETES_SERVICE_PORT_HTTPS=443
LANG=C.UTF-8
MARIADB_GALERA_PORT=tcp://10.233.32.204:3306
MARIADB_GALERA_PORT_3306_TCP=tcp://10.233.32.204:3306
MARIADB_GALERA_PORT_3306_TCP_ADDR=10.233.32.204
MARIADB_GALERA_PORT_3306_TCP_PORT=3306
MARIADB_GALERA_PORT_3306_TCP_PROTO=tcp
MARIADB_GALERA_SERVICE_HOST=10.233.32.204
MARIADB_GALERA_SERVICE_PORT=3306
MARIADB_GALERA_SERVICE_PORT_MYSQL=3306
MESSAGE_ROUTER_EXTERNAL_PORT=tcp://10.233.25.177:3905
MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP=tcp://10.233.25.177:3905
MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_ADDR=10.233.25.177
MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_PORT=3905
MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_PROTO=tcp
MESSAGE_ROUTER_EXTERNAL_SERVICE_HOST=10.233.25.177
MESSAGE_ROUTER_EXTERNAL_SERVICE_PORT=3905
MESSAGE_ROUTER_EXTERNAL_SERVICE_PORT_HTTPS_API=3905
MESSAGE_ROUTER_KAFKA_0_PORT=tcp://10.233.3.137:9091
MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP=tcp://10.233.3.137:9091
MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_ADDR=10.233.3.137
MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_PORT=9091
MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_PROTO=tcp
MESSAGE_ROUTER_KAFKA_0_SERVICE_HOST=10.233.3.137
MESSAGE_ROUTER_KAFKA_0_SERVICE_PORT=9091
MESSAGE_ROUTER_KAFKA_0_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_0=9091
MESSAGE_ROUTER_KAFKA_1_PORT=tcp://10.233.50.217:9091
MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP=tcp://10.233.50.217:9091
MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_ADDR=10.233.50.217
MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_PORT=9091
MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_PROTO=tcp
MESSAGE_ROUTER_KAFKA_1_SERVICE_HOST=10.233.50.217
MESSAGE_ROUTER_KAFKA_1_SERVICE_PORT=9091
MESSAGE_ROUTER_KAFKA_1_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_1=9091
MESSAGE_ROUTER_KAFKA_2_PORT=tcp://10.233.49.149:9091
MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP=tcp://10.233.49.149:9091
MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_ADDR=10.233.49.149
MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_PORT=9091
MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_PROTO=tcp
MESSAGE_ROUTER_KAFKA_2_SERVICE_HOST=10.233.49.149
MESSAGE_ROUTER_KAFKA_2_SERVICE_PORT=9091
MESSAGE_ROUTER_KAFKA_2_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_2=9091
MESSAGE_ROUTER_PORT=tcp://10.233.63.30:3905
MESSAGE_ROUTER_PORT_3904_TCP=tcp://10.233.63.30:3904
MESSAGE_ROUTER_PORT_3904_TCP_ADDR=10.233.63.30
MESSAGE_ROUTER_PORT_3904_TCP_PORT=3904
MESSAGE_ROUTER_PORT_3904_TCP_PROTO=tcp
MESSAGE_ROUTER_PORT_3905_TCP=tcp://10.233.63.30:3905
MESSAGE_ROUTER_PORT_3905_TCP_ADDR=10.233.63.30
MESSAGE_ROUTER_PORT_3905_TCP_PORT=3905
MESSAGE_ROUTER_PORT_3905_TCP_PROTO=tcp
MESSAGE_ROUTER_SERVICE_HOST=10.233.63.30
MESSAGE_ROUTER_SERVICE_PORT=3905
MESSAGE_ROUTER_SERVICE_PORT_HTTPS_API=3905
MESSAGE_ROUTER_SERVICE_PORT_HTTP_API=3904
MODELING_ETSICATALOG_PORT=tcp://10.233.17.64:8806
MODELING_ETSICATALOG_PORT_8806_TCP=tcp://10.233.17.64:8806
MODELING_ETSICATALOG_PORT_8806_TCP_ADDR=10.233.17.64
MODELING_ETSICATALOG_PORT_8806_TCP_PORT=8806
MODELING_ETSICATALOG_PORT_8806_TCP_PROTO=tcp
MODELING_ETSICATALOG_SERVICE_HOST=10.233.17.64
MODELING_ETSICATALOG_SERVICE_PORT=8806
MODELING_ETSICATALOG_SERVICE_PORT_MODELING_ETSICATALOG=8806
MSB_CONSUL_PORT=tcp://10.233.0.38:8500
MSB_CONSUL_PORT_8500_TCP=tcp://10.233.0.38:8500
MSB_CONSUL_PORT_8500_TCP_ADDR=10.233.0.38
MSB_CONSUL_PORT_8500_TCP_PORT=8500
MSB_CONSUL_PORT_8500_TCP_PROTO=tcp
MSB_CONSUL_SERVICE_HOST=10.233.0.38
MSB_CONSUL_SERVICE_PORT=8500
MSB_CONSUL_SERVICE_PORT_HTTP_MSB_CONSUL=8500
MSB_DISCOVERY_PORT=tcp://10.233.54.199:10081
MSB_DISCOVERY_PORT_10081_TCP=tcp://10.233.54.199:10081
MSB_DISCOVERY_PORT_10081_TCP_ADDR=10.233.54.199
MSB_DISCOVERY_PORT_10081_TCP_PORT=10081
MSB_DISCOVERY_PORT_10081_TCP_PROTO=tcp
MSB_DISCOVERY_SERVICE_HOST=10.233.54.199
MSB_DISCOVERY_SERVICE_PORT=10081
MSB_DISCOVERY_SERVICE_PORT_HTTP_MSB_DISCOVERY=10081
MSB_EAG_PORT=tcp://10.233.58.100:443
MSB_EAG_PORT_443_TCP=tcp://10.233.58.100:443
MSB_EAG_PORT_443_TCP_ADDR=10.233.58.100
MSB_EAG_PORT_443_TCP_PORT=443
MSB_EAG_PORT_443_TCP_PROTO=tcp
MSB_EAG_SERVICE_HOST=10.233.58.100
MSB_EAG_SERVICE_PORT=443
MSB_EAG_SERVICE_PORT_HTTPS_MSB_EAG=443
MSB_IAG_PORT=tcp://10.233.37.248:443
MSB_IAG_PORT_443_TCP=tcp://10.233.37.248:443
MSB_IAG_PORT_443_TCP_ADDR=10.233.37.248
MSB_IAG_PORT_443_TCP_PORT=443
MSB_IAG_PORT_443_TCP_PROTO=tcp
MSB_IAG_SERVICE_HOST=10.233.37.248
MSB_IAG_SERVICE_PORT=443
MSB_IAG_SERVICE_PORT_HTTPS_MSB_IAG=443
MULTICLOUD_FCAPS_PORT=tcp://10.233.2.244:9011
MULTICLOUD_FCAPS_PORT_9011_TCP=tcp://10.233.2.244:9011
MULTICLOUD_FCAPS_PORT_9011_TCP_ADDR=10.233.2.244
MULTICLOUD_FCAPS_PORT_9011_TCP_PORT=9011
MULTICLOUD_FCAPS_PORT_9011_TCP_PROTO=tcp
MULTICLOUD_FCAPS_SERVICE_HOST=10.233.2.244
MULTICLOUD_FCAPS_SERVICE_PORT=9011
MULTICLOUD_FCAPS_SERVICE_PORT_MULTICLOUD_FCAPS=9011
MULTICLOUD_FRAMEWORK_PORT=tcp://10.233.44.81:9001
MULTICLOUD_FRAMEWORK_PORT_9001_TCP=tcp://10.233.44.81:9001
MULTICLOUD_FRAMEWORK_PORT_9001_TCP_ADDR=10.233.44.81
MULTICLOUD_FRAMEWORK_PORT_9001_TCP_PORT=9001
MULTICLOUD_FRAMEWORK_PORT_9001_TCP_PROTO=tcp
MULTICLOUD_FRAMEWORK_SERVICE_HOST=10.233.44.81
MULTICLOUD_FRAMEWORK_SERVICE_PORT=9001
MULTICLOUD_FRAMEWORK_SERVICE_PORT_MULTICLOUD_FRAMEWORK=9001
MULTICLOUD_K8S_MONGO_READ_PORT=tcp://10.233.17.51:27017
MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP=tcp://10.233.17.51:27017
MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_ADDR=10.233.17.51
MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_PORT=27017
MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_PROTO=tcp
MULTICLOUD_K8S_MONGO_READ_SERVICE_HOST=10.233.17.51
MULTICLOUD_K8S_MONGO_READ_SERVICE_PORT=27017
MULTICLOUD_K8S_MONGO_READ_SERVICE_PORT_MONGO=27017
MULTICLOUD_K8S_PORT=tcp://10.233.61.128:9015
MULTICLOUD_K8S_PORT_9015_TCP=tcp://10.233.61.128:9015
MULTICLOUD_K8S_PORT_9015_TCP_ADDR=10.233.61.128
MULTICLOUD_K8S_PORT_9015_TCP_PORT=9015
MULTICLOUD_K8S_PORT_9015_TCP_PROTO=tcp
MULTICLOUD_K8S_SERVICE_HOST=10.233.61.128
MULTICLOUD_K8S_SERVICE_PORT=9015
MULTICLOUD_PIKE_PORT=tcp://10.233.42.1:9007
MULTICLOUD_PIKE_PORT_9007_TCP=tcp://10.233.42.1:9007
MULTICLOUD_PIKE_PORT_9007_TCP_ADDR=10.233.42.1
MULTICLOUD_PIKE_PORT_9007_TCP_PORT=9007
MULTICLOUD_PIKE_PORT_9007_TCP_PROTO=tcp
MULTICLOUD_PIKE_SERVICE_HOST=10.233.42.1
MULTICLOUD_PIKE_SERVICE_PORT=9007
MULTICLOUD_PIKE_SERVICE_PORT_MULTICLOUD_PIKE=9007
NBI_MONGOHOST_READ_PORT=tcp://10.233.15.47:27017
NBI_MONGOHOST_READ_PORT_27017_TCP=tcp://10.233.15.47:27017
NBI_MONGOHOST_READ_PORT_27017_TCP_ADDR=10.233.15.47
NBI_MONGOHOST_READ_PORT_27017_TCP_PORT=27017
NBI_MONGOHOST_READ_PORT_27017_TCP_PROTO=tcp
NBI_MONGOHOST_READ_SERVICE_HOST=10.233.15.47
NBI_MONGOHOST_READ_SERVICE_PORT=27017
NBI_MONGOHOST_READ_SERVICE_PORT_MONGO=27017
NBI_PORT=tcp://10.233.5.123:8443
NBI_PORT_8443_TCP=tcp://10.233.5.123:8443
NBI_PORT_8443_TCP_ADDR=10.233.5.123
NBI_PORT_8443_TCP_PORT=8443
NBI_PORT_8443_TCP_PROTO=tcp
NBI_SERVICE_HOST=10.233.5.123
NBI_SERVICE_PORT=8443
NBI_SERVICE_PORT_API_8443=8443
NENG_SERV_PORT=tcp://10.233.4.35:8080
NENG_SERV_PORT_8080_TCP=tcp://10.233.4.35:8080
NENG_SERV_PORT_8080_TCP_ADDR=10.233.4.35
NENG_SERV_PORT_8080_TCP_PORT=8080
NENG_SERV_PORT_8080_TCP_PROTO=tcp
NENG_SERV_SERVICE_HOST=10.233.4.35
NENG_SERV_SERVICE_PORT=8080
NENG_SERV_SERVICE_PORT_NENG_SERV_PORT=8080
NETBOX_APP_PORT=tcp://10.233.8.40:8001
NETBOX_APP_PORT_8001_TCP=tcp://10.233.8.40:8001
NETBOX_APP_PORT_8001_TCP_ADDR=10.233.8.40
NETBOX_APP_PORT_8001_TCP_PORT=8001
NETBOX_APP_PORT_8001_TCP_PROTO=tcp
NETBOX_APP_SERVICE_HOST=10.233.8.40
NETBOX_APP_SERVICE_PORT=8001
NETBOX_APP_SERVICE_PORT_NETBOX_APP=8001
NETBOX_NGINX_PORT=tcp://10.233.39.100:8080
NETBOX_NGINX_PORT_8080_TCP=tcp://10.233.39.100:8080
NETBOX_NGINX_PORT_8080_TCP_ADDR=10.233.39.100
NETBOX_NGINX_PORT_8080_TCP_PORT=8080
NETBOX_NGINX_PORT_8080_TCP_PROTO=tcp
NETBOX_NGINX_SERVICE_HOST=10.233.39.100
NETBOX_NGINX_SERVICE_PORT=8080
NETBOX_POSTGRES_PORT=tcp://10.233.57.124:5432
NETBOX_POSTGRES_PORT_5432_TCP=tcp://10.233.57.124:5432
NETBOX_POSTGRES_PORT_5432_TCP_ADDR=10.233.57.124
NETBOX_POSTGRES_PORT_5432_TCP_PORT=5432
NETBOX_POSTGRES_PORT_5432_TCP_PROTO=tcp
NETBOX_POSTGRES_SERVICE_HOST=10.233.57.124
NETBOX_POSTGRES_SERVICE_PORT=5432
NETBOX_POSTGRES_SERVICE_PORT_NETBOX_POSTGRES=5432
ONAP_CDS_DB_METRICS_PORT=tcp://10.233.13.51:9104
ONAP_CDS_DB_METRICS_PORT_9104_TCP=tcp://10.233.13.51:9104
ONAP_CDS_DB_METRICS_PORT_9104_TCP_ADDR=10.233.13.51
ONAP_CDS_DB_METRICS_PORT_9104_TCP_PORT=9104
ONAP_CDS_DB_METRICS_PORT_9104_TCP_PROTO=tcp
ONAP_CDS_DB_METRICS_SERVICE_HOST=10.233.13.51
ONAP_CDS_DB_METRICS_SERVICE_PORT=9104
ONAP_CDS_DB_METRICS_SERVICE_PORT_METRICS=9104
ONAP_DMAAP_DR_DB_METRICS_PORT=tcp://10.233.0.114:9104
ONAP_DMAAP_DR_DB_METRICS_PORT_9104_TCP=tcp://10.233.0.114:9104
ONAP_DMAAP_DR_DB_METRICS_PORT_9104_TCP_ADDR=10.233.0.114
ONAP_DMAAP_DR_DB_METRICS_PORT_9104_TCP_PORT=9104
ONAP_DMAAP_DR_DB_METRICS_PORT_9104_TCP_PROTO=tcp
ONAP_DMAAP_DR_DB_METRICS_SERVICE_HOST=10.233.0.114
ONAP_DMAAP_DR_DB_METRICS_SERVICE_PORT=9104
ONAP_DMAAP_DR_DB_METRICS_SERVICE_PORT_METRICS=9104
ONAP_MARIADB_GALERA_METRICS_PORT=tcp://10.233.15.7:9104
ONAP_MARIADB_GALERA_METRICS_PORT_9104_TCP=tcp://10.233.15.7:9104
ONAP_MARIADB_GALERA_METRICS_PORT_9104_TCP_ADDR=10.233.15.7
ONAP_MARIADB_GALERA_METRICS_PORT_9104_TCP_PORT=9104
ONAP_MARIADB_GALERA_METRICS_PORT_9104_TCP_PROTO=tcp
ONAP_MARIADB_GALERA_METRICS_SERVICE_HOST=10.233.15.7
ONAP_MARIADB_GALERA_METRICS_SERVICE_PORT=9104
ONAP_MARIADB_GALERA_METRICS_SERVICE_PORT_METRICS=9104
ONAP_POLICY_MARIADB_METRICS_PORT=tcp://10.233.19.238:9104
ONAP_POLICY_MARIADB_METRICS_PORT_9104_TCP=tcp://10.233.19.238:9104
ONAP_POLICY_MARIADB_METRICS_PORT_9104_TCP_ADDR=10.233.19.238
ONAP_POLICY_MARIADB_METRICS_PORT_9104_TCP_PORT=9104
ONAP_POLICY_MARIADB_METRICS_PORT_9104_TCP_PROTO=tcp
ONAP_POLICY_MARIADB_METRICS_SERVICE_HOST=10.233.19.238
ONAP_POLICY_MARIADB_METRICS_SERVICE_PORT=9104
ONAP_POLICY_MARIADB_METRICS_SERVICE_PORT_METRICS=9104
OOF_HAS_API_PORT=tcp://10.233.49.9:8091
OOF_HAS_API_PORT_8091_TCP=tcp://10.233.49.9:8091
OOF_HAS_API_PORT_8091_TCP_ADDR=10.233.49.9
OOF_HAS_API_PORT_8091_TCP_PORT=8091
OOF_HAS_API_PORT_8091_TCP_PROTO=tcp
OOF_HAS_API_SERVICE_HOST=10.233.49.9
OOF_HAS_API_SERVICE_PORT=8091
OOF_HAS_API_SERVICE_PORT_OOF_HAS_API=8091
OOF_OSDF_PORT=tcp://10.233.18.30:8698
OOF_OSDF_PORT_8698_TCP=tcp://10.233.18.30:8698
OOF_OSDF_PORT_8698_TCP_ADDR=10.233.18.30
OOF_OSDF_PORT_8698_TCP_PORT=8698
OOF_OSDF_PORT_8698_TCP_PROTO=tcp
OOF_OSDF_SERVICE_HOST=10.233.18.30
OOF_OSDF_SERVICE_PORT=8698
OOM_CERT_SERVICE_PORT=tcp://10.233.28.113:8443
OOM_CERT_SERVICE_PORT_8443_TCP=tcp://10.233.28.113:8443
OOM_CERT_SERVICE_PORT_8443_TCP_ADDR=10.233.28.113
OOM_CERT_SERVICE_PORT_8443_TCP_PORT=8443
OOM_CERT_SERVICE_PORT_8443_TCP_PROTO=tcp
OOM_CERT_SERVICE_SERVICE_HOST=10.233.28.113
OOM_CERT_SERVICE_SERVICE_PORT=8443
OOM_CERT_SERVICE_SERVICE_PORT_HTTPS_HTTP=8443
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
POLICY_APEX_PDP_PORT=tcp://10.233.45.213:6969
POLICY_APEX_PDP_PORT_6969_TCP=tcp://10.233.45.213:6969
POLICY_APEX_PDP_PORT_6969_TCP_ADDR=10.233.45.213
POLICY_APEX_PDP_PORT_6969_TCP_PORT=6969
POLICY_APEX_PDP_PORT_6969_TCP_PROTO=tcp
POLICY_APEX_PDP_SERVICE_HOST=10.233.45.213
POLICY_APEX_PDP_SERVICE_PORT=6969
POLICY_APEX_PDP_SERVICE_PORT_POLICY_APEX_PDP=6969
POLICY_API_PORT=tcp://10.233.31.131:6969
POLICY_API_PORT_6969_TCP=tcp://10.233.31.131:6969
POLICY_API_PORT_6969_TCP_ADDR=10.233.31.131
POLICY_API_PORT_6969_TCP_PORT=6969
POLICY_API_PORT_6969_TCP_PROTO=tcp
POLICY_API_SERVICE_HOST=10.233.31.131
POLICY_API_SERVICE_PORT=6969
POLICY_API_SERVICE_PORT_POLICY_API=6969
POLICY_CLAMP_BE_PORT=tcp://10.233.10.48:8443
POLICY_CLAMP_BE_PORT_8443_TCP=tcp://10.233.10.48:8443
POLICY_CLAMP_BE_PORT_8443_TCP_ADDR=10.233.10.48
POLICY_CLAMP_BE_PORT_8443_TCP_PORT=8443
POLICY_CLAMP_BE_PORT_8443_TCP_PROTO=tcp
POLICY_CLAMP_BE_SERVICE_HOST=10.233.10.48
POLICY_CLAMP_BE_SERVICE_PORT=8443
POLICY_CLAMP_BE_SERVICE_PORT_POLICY_CLAMP_BE=8443
POLICY_CLAMP_FE_PORT=tcp://10.233.22.77:2443
POLICY_CLAMP_FE_PORT_2443_TCP=tcp://10.233.22.77:2443
POLICY_CLAMP_FE_PORT_2443_TCP_ADDR=10.233.22.77
POLICY_CLAMP_FE_PORT_2443_TCP_PORT=2443
POLICY_CLAMP_FE_PORT_2443_TCP_PROTO=tcp
POLICY_CLAMP_FE_SERVICE_HOST=10.233.22.77
POLICY_CLAMP_FE_SERVICE_PORT=2443
POLICY_CLAMP_FE_SERVICE_PORT_POLICY_CLAMP_FE=2443
POLICY_DISTRIBUTION_PORT=tcp://10.233.26.75:6969
POLICY_DISTRIBUTION_PORT_6969_TCP=tcp://10.233.26.75:6969
POLICY_DISTRIBUTION_PORT_6969_TCP_ADDR=10.233.26.75
POLICY_DISTRIBUTION_PORT_6969_TCP_PORT=6969
POLICY_DISTRIBUTION_PORT_6969_TCP_PROTO=tcp
POLICY_DISTRIBUTION_SERVICE_HOST=10.233.26.75
POLICY_DISTRIBUTION_SERVICE_PORT=6969
POLICY_DISTRIBUTION_SERVICE_PORT_POLICY_DISTRIBUTION=6969
POLICY_DROOLS_PDP_PORT=tcp://10.233.27.171:6969
POLICY_DROOLS_PDP_PORT_6969_TCP=tcp://10.233.27.171:6969
POLICY_DROOLS_PDP_PORT_6969_TCP_ADDR=10.233.27.171
POLICY_DROOLS_PDP_PORT_6969_TCP_PORT=6969
POLICY_DROOLS_PDP_PORT_6969_TCP_PROTO=tcp
POLICY_DROOLS_PDP_PORT_9696_TCP=tcp://10.233.27.171:9696
POLICY_DROOLS_PDP_PORT_9696_TCP_ADDR=10.233.27.171
POLICY_DROOLS_PDP_PORT_9696_TCP_PORT=9696
POLICY_DROOLS_PDP_PORT_9696_TCP_PROTO=tcp
POLICY_DROOLS_PDP_SERVICE_HOST=10.233.27.171
POLICY_DROOLS_PDP_SERVICE_PORT=6969
POLICY_DROOLS_PDP_SERVICE_PORT_POLICY_DROOLS_PDP_6969=6969
POLICY_DROOLS_PDP_SERVICE_PORT_POLICY_DROOLS_PDP_9696=9696
POLICY_HANDLER_PORT=tcp://10.233.48.227:80
POLICY_HANDLER_PORT_80_TCP=tcp://10.233.48.227:80
POLICY_HANDLER_PORT_80_TCP_ADDR=10.233.48.227
POLICY_HANDLER_PORT_80_TCP_PORT=80
POLICY_HANDLER_PORT_80_TCP_PROTO=tcp
POLICY_HANDLER_SERVICE_HOST=10.233.48.227
POLICY_HANDLER_SERVICE_PORT=80
POLICY_HANDLER_SERVICE_PORT_POLICY_HANDLER=80
POLICY_MARIADB_PORT=tcp://10.233.24.144:3306
POLICY_MARIADB_PORT_3306_TCP=tcp://10.233.24.144:3306
POLICY_MARIADB_PORT_3306_TCP_ADDR=10.233.24.144
POLICY_MARIADB_PORT_3306_TCP_PORT=3306
POLICY_MARIADB_PORT_3306_TCP_PROTO=tcp
POLICY_MARIADB_SERVICE_HOST=10.233.24.144
POLICY_MARIADB_SERVICE_PORT=3306
POLICY_MARIADB_SERVICE_PORT_MYSQL=3306
POLICY_PAP_PORT=tcp://10.233.30.191:6969
POLICY_PAP_PORT_6969_TCP=tcp://10.233.30.191:6969
POLICY_PAP_PORT_6969_TCP_ADDR=10.233.30.191
POLICY_PAP_PORT_6969_TCP_PORT=6969
POLICY_PAP_PORT_6969_TCP_PROTO=tcp
POLICY_PAP_SERVICE_HOST=10.233.30.191
POLICY_PAP_SERVICE_PORT=6969
POLICY_PAP_SERVICE_PORT_HTTP_API=6969
POLICY_XACML_PDP_PORT=tcp://10.233.50.74:6969
POLICY_XACML_PDP_PORT_6969_TCP=tcp://10.233.50.74:6969
POLICY_XACML_PDP_PORT_6969_TCP_ADDR=10.233.50.74
POLICY_XACML_PDP_PORT_6969_TCP_PORT=6969
POLICY_XACML_PDP_PORT_6969_TCP_PROTO=tcp
POLICY_XACML_PDP_SERVICE_HOST=10.233.50.74
POLICY_XACML_PDP_SERVICE_PORT=6969
POLICY_XACML_PDP_SERVICE_PORT_POLICY_XACML_PDP=6969
PORTAL_APP_PORT=tcp://10.233.10.225:8443
PORTAL_APP_PORT_8443_TCP=tcp://10.233.10.225:8443
PORTAL_APP_PORT_8443_TCP_ADDR=10.233.10.225
PORTAL_APP_PORT_8443_TCP_PORT=8443
PORTAL_APP_PORT_8443_TCP_PROTO=tcp
PORTAL_APP_SERVICE_HOST=10.233.10.225
PORTAL_APP_SERVICE_PORT=8443
PORTAL_APP_SERVICE_PORT_PORTAL_APP4=8443
PORTAL_CASSANDRA_PORT=tcp://10.233.11.163:9160
PORTAL_CASSANDRA_PORT_7000_TCP=tcp://10.233.11.163:7000
PORTAL_CASSANDRA_PORT_7000_TCP_ADDR=10.233.11.163
PORTAL_CASSANDRA_PORT_7000_TCP_PORT=7000
PORTAL_CASSANDRA_PORT_7000_TCP_PROTO=tcp
PORTAL_CASSANDRA_PORT_7001_TCP=tcp://10.233.11.163:7001
PORTAL_CASSANDRA_PORT_7001_TCP_ADDR=10.233.11.163
PORTAL_CASSANDRA_PORT_7001_TCP_PORT=7001
PORTAL_CASSANDRA_PORT_7001_TCP_PROTO=tcp
PORTAL_CASSANDRA_PORT_7199_TCP=tcp://10.233.11.163:7199
PORTAL_CASSANDRA_PORT_7199_TCP_ADDR=10.233.11.163
PORTAL_CASSANDRA_PORT_7199_TCP_PORT=7199
PORTAL_CASSANDRA_PORT_7199_TCP_PROTO=tcp
PORTAL_CASSANDRA_PORT_9042_TCP=tcp://10.233.11.163:9042
PORTAL_CASSANDRA_PORT_9042_TCP_ADDR=10.233.11.163
PORTAL_CASSANDRA_PORT_9042_TCP_PORT=9042
PORTAL_CASSANDRA_PORT_9042_TCP_PROTO=tcp
PORTAL_CASSANDRA_PORT_9160_TCP=tcp://10.233.11.163:9160
PORTAL_CASSANDRA_PORT_9160_TCP_ADDR=10.233.11.163
PORTAL_CASSANDRA_PORT_9160_TCP_PORT=9160
PORTAL_CASSANDRA_PORT_9160_TCP_PROTO=tcp
PORTAL_CASSANDRA_SERVICE_HOST=10.233.11.163
PORTAL_CASSANDRA_SERVICE_PORT=9160
PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA2=7000
PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA3=7001
PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA4=7199
PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA5=9042
PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA=9160
PORTAL_DB_PORT=tcp://10.233.59.182:3306
PORTAL_DB_PORT_3306_TCP=tcp://10.233.59.182:3306
PORTAL_DB_PORT_3306_TCP_ADDR=10.233.59.182
PORTAL_DB_PORT_3306_TCP_PORT=3306
PORTAL_DB_PORT_3306_TCP_PROTO=tcp
PORTAL_DB_SERVICE_HOST=10.233.59.182
PORTAL_DB_SERVICE_PORT=3306
PORTAL_DB_SERVICE_PORT_PORTAL_DB=3306
PORTAL_SDK_PORT=tcp://10.233.19.99:8443
PORTAL_SDK_PORT_8443_TCP=tcp://10.233.19.99:8443
PORTAL_SDK_PORT_8443_TCP_ADDR=10.233.19.99
PORTAL_SDK_PORT_8443_TCP_PORT=8443
PORTAL_SDK_PORT_8443_TCP_PROTO=tcp
PORTAL_SDK_SERVICE_HOST=10.233.19.99
PORTAL_SDK_SERVICE_PORT=8443
PORTAL_SDK_SERVICE_PORT_PORTAL_SDK=8443
PORTAL_WIDGET_PORT=tcp://10.233.7.212:8082
PORTAL_WIDGET_PORT_8082_TCP=tcp://10.233.7.212:8082
PORTAL_WIDGET_PORT_8082_TCP_ADDR=10.233.7.212
PORTAL_WIDGET_PORT_8082_TCP_PORT=8082
PORTAL_WIDGET_PORT_8082_TCP_PROTO=tcp
PORTAL_WIDGET_SERVICE_HOST=10.233.7.212
PORTAL_WIDGET_SERVICE_PORT=8082
PORTAL_WIDGET_SERVICE_PORT_PORTAL_WIDGET=8082
PWD=/
PYTHON_PIP_VERSION=8.1.2
PYTHON_VERSION=2.7.9-1
ROBOT_PORT=tcp://10.233.4.77:443
ROBOT_PORT_443_TCP=tcp://10.233.4.77:443
ROBOT_PORT_443_TCP_ADDR=10.233.4.77
ROBOT_PORT_443_TCP_PORT=443
ROBOT_PORT_443_TCP_PROTO=tcp
ROBOT_SERVICE_HOST=10.233.4.77
ROBOT_SERVICE_PORT=443
ROBOT_SERVICE_PORT_HTTPD=443
SCALA_VERSION=2.12
SDC_BE_EXTERNAL_PORT=tcp://10.233.18.141:8443
SDC_BE_EXTERNAL_PORT_8443_TCP=tcp://10.233.18.141:8443
SDC_BE_EXTERNAL_PORT_8443_TCP_ADDR=10.233.18.141
SDC_BE_EXTERNAL_PORT_8443_TCP_PORT=8443
SDC_BE_EXTERNAL_PORT_8443_TCP_PROTO=tcp
SDC_BE_EXTERNAL_SERVICE_HOST=10.233.18.141
SDC_BE_EXTERNAL_SERVICE_PORT=8443
SDC_BE_EXTERNAL_SERVICE_PORT_HTTPS_API=8443
SDC_BE_PORT=tcp://10.233.53.104:8443
SDC_BE_PORT_8080_TCP=tcp://10.233.53.104:8080
SDC_BE_PORT_8080_TCP_ADDR=10.233.53.104
SDC_BE_PORT_8080_TCP_PORT=8080
SDC_BE_PORT_8080_TCP_PROTO=tcp
SDC_BE_PORT_8443_TCP=tcp://10.233.53.104:8443
SDC_BE_PORT_8443_TCP_ADDR=10.233.53.104
SDC_BE_PORT_8443_TCP_PORT=8443
SDC_BE_PORT_8443_TCP_PROTO=tcp
SDC_BE_SERVICE_HOST=10.233.53.104
SDC_BE_SERVICE_PORT=8443
SDC_BE_SERVICE_PORT_HTTPS_API=8443
SDC_BE_SERVICE_PORT_HTTP_API=8080
SDC_FE_PORT=tcp://10.233.7.221:9443
SDC_FE_PORT_9443_TCP=tcp://10.233.7.221:9443
SDC_FE_PORT_9443_TCP_ADDR=10.233.7.221
SDC_FE_PORT_9443_TCP_PORT=9443
SDC_FE_PORT_9443_TCP_PROTO=tcp
SDC_FE_SERVICE_HOST=10.233.7.221
SDC_FE_SERVICE_PORT=9443
SDC_FE_SERVICE_PORT_SDC_FE2=9443
SDC_HELM_VALIDATOR_PORT=tcp://10.233.38.90:8080
SDC_HELM_VALIDATOR_PORT_8080_TCP=tcp://10.233.38.90:8080
SDC_HELM_VALIDATOR_PORT_8080_TCP_ADDR=10.233.38.90
SDC_HELM_VALIDATOR_PORT_8080_TCP_PORT=8080
SDC_HELM_VALIDATOR_PORT_8080_TCP_PROTO=tcp
SDC_HELM_VALIDATOR_SERVICE_HOST=10.233.38.90
SDC_HELM_VALIDATOR_SERVICE_PORT=8080
SDC_HELM_VALIDATOR_SERVICE_PORT_HTTP=8080
SDC_ONBOARDING_BE_PORT=tcp://10.233.55.96:8445
SDC_ONBOARDING_BE_PORT_8081_TCP=tcp://10.233.55.96:8081
SDC_ONBOARDING_BE_PORT_8081_TCP_ADDR=10.233.55.96
SDC_ONBOARDING_BE_PORT_8081_TCP_PORT=8081
SDC_ONBOARDING_BE_PORT_8081_TCP_PROTO=tcp
SDC_ONBOARDING_BE_PORT_8445_TCP=tcp://10.233.55.96:8445
SDC_ONBOARDING_BE_PORT_8445_TCP_ADDR=10.233.55.96
SDC_ONBOARDING_BE_PORT_8445_TCP_PORT=8445
SDC_ONBOARDING_BE_PORT_8445_TCP_PROTO=tcp
SDC_ONBOARDING_BE_SERVICE_HOST=10.233.55.96
SDC_ONBOARDING_BE_SERVICE_PORT=8445
SDC_ONBOARDING_BE_SERVICE_PORT_SDC_ONBOARDING_BE2=8081
SDC_ONBOARDING_BE_SERVICE_PORT_SDC_ONBOARDING_BE=8445
SDC_WFD_BE_PORT=tcp://10.233.30.10:8443
SDC_WFD_BE_PORT_8443_TCP=tcp://10.233.30.10:8443
SDC_WFD_BE_PORT_8443_TCP_ADDR=10.233.30.10
SDC_WFD_BE_PORT_8443_TCP_PORT=8443
SDC_WFD_BE_PORT_8443_TCP_PROTO=tcp
SDC_WFD_BE_SERVICE_HOST=10.233.30.10
SDC_WFD_BE_SERVICE_PORT=8443
SDC_WFD_BE_SERVICE_PORT_SDC_WFD_BE=8443
SDC_WFD_FE_PORT=tcp://10.233.15.234:8443
SDC_WFD_FE_PORT_8443_TCP=tcp://10.233.15.234:8443
SDC_WFD_FE_PORT_8443_TCP_ADDR=10.233.15.234
SDC_WFD_FE_PORT_8443_TCP_PORT=8443
SDC_WFD_FE_PORT_8443_TCP_PROTO=tcp
SDC_WFD_FE_SERVICE_HOST=10.233.15.234
SDC_WFD_FE_SERVICE_PORT=8443
SDC_WFD_FE_SERVICE_PORT_SDC_WFD_FE=8443
SDNC_ANSIBLE_SERVER_PORT=tcp://10.233.53.147:8000
SDNC_ANSIBLE_SERVER_PORT_8000_TCP=tcp://10.233.53.147:8000
SDNC_ANSIBLE_SERVER_PORT_8000_TCP_ADDR=10.233.53.147
SDNC_ANSIBLE_SERVER_PORT_8000_TCP_PORT=8000
SDNC_ANSIBLE_SERVER_PORT_8000_TCP_PROTO=tcp
SDNC_ANSIBLE_SERVER_SERVICE_HOST=10.233.53.147
SDNC_ANSIBLE_SERVER_SERVICE_PORT=8000
SDNC_ANSIBLE_SERVER_SERVICE_PORT_SDNC_ANSIBLE_SERVER=8000
SDNC_CALLHOME_PORT=tcp://10.233.4.52:6666
SDNC_CALLHOME_PORT_6666_TCP=tcp://10.233.4.52:6666
SDNC_CALLHOME_PORT_6666_TCP_ADDR=10.233.4.52
SDNC_CALLHOME_PORT_6666_TCP_PORT=6666
SDNC_CALLHOME_PORT_6666_TCP_PROTO=tcp
SDNC_CALLHOME_SERVICE_HOST=10.233.4.52
SDNC_CALLHOME_SERVICE_PORT=6666
SDNC_CALLHOME_SERVICE_PORT_SDNC_CALLHOME=6666
SDNC_DGBUILDER_PORT=tcp://10.233.57.130:3000
SDNC_DGBUILDER_PORT_3000_TCP=tcp://10.233.57.130:3000
SDNC_DGBUILDER_PORT_3000_TCP_ADDR=10.233.57.130
SDNC_DGBUILDER_PORT_3000_TCP_PORT=3000
SDNC_DGBUILDER_PORT_3000_TCP_PROTO=tcp
SDNC_DGBUILDER_SERVICE_HOST=10.233.57.130
SDNC_DGBUILDER_SERVICE_PORT=3000
SDNC_DGBUILDER_SERVICE_PORT_DGBUILDER=3000
SDNC_OAM_PORT=tcp://10.233.48.208:8282
SDNC_OAM_PORT_8202_TCP=tcp://10.233.48.208:8202
SDNC_OAM_PORT_8202_TCP_ADDR=10.233.48.208
SDNC_OAM_PORT_8202_TCP_PORT=8202
SDNC_OAM_PORT_8202_TCP_PROTO=tcp
SDNC_OAM_PORT_8282_TCP=tcp://10.233.48.208:8282
SDNC_OAM_PORT_8282_TCP_ADDR=10.233.48.208
SDNC_OAM_PORT_8282_TCP_PORT=8282
SDNC_OAM_PORT_8282_TCP_PROTO=tcp
SDNC_OAM_SERVICE_HOST=10.233.48.208
SDNC_OAM_SERVICE_PORT=8282
SDNC_OAM_SERVICE_PORT_SDNC_KARAF=8202
SDNC_OAM_SERVICE_PORT_SDNC_RESTCONF_ALT=8282
SDNC_PORT=tcp://10.233.17.237:8443
SDNC_PORT_8443_TCP=tcp://10.233.17.237:8443
SDNC_PORT_8443_TCP_ADDR=10.233.17.237
SDNC_PORT_8443_TCP_PORT=8443
SDNC_PORT_8443_TCP_PROTO=tcp
SDNC_SERVICE_HOST=10.233.17.237
SDNC_SERVICE_PORT=8443
SDNC_SERVICE_PORT_SDNC_RESTCONF=8443
SDNC_WEB_SERVICE_PORT=tcp://10.233.3.228:8443
SDNC_WEB_SERVICE_PORT_8443_TCP=tcp://10.233.3.228:8443
SDNC_WEB_SERVICE_PORT_8443_TCP_ADDR=10.233.3.228
SDNC_WEB_SERVICE_PORT_8443_TCP_PORT=8443
SDNC_WEB_SERVICE_PORT_8443_TCP_PROTO=tcp
SDNC_WEB_SERVICE_SERVICE_HOST=10.233.3.228
SDNC_WEB_SERVICE_SERVICE_PORT=8443
SDNC_WEB_SERVICE_SERVICE_PORT_SDNC_WEB=8443
SDNRDB_PORT=tcp://10.233.45.62:9200
SDNRDB_PORT_9200_TCP=tcp://10.233.45.62:9200
SDNRDB_PORT_9200_TCP_ADDR=10.233.45.62
SDNRDB_PORT_9200_TCP_PORT=9200
SDNRDB_PORT_9200_TCP_PROTO=tcp
SDNRDB_SERVICE_HOST=10.233.45.62
SDNRDB_SERVICE_PORT=9200
SDNRDB_SERVICE_PORT_9300_TCP=tcp://10.233.7.22:9300
SDNRDB_SERVICE_PORT_9300_TCP_ADDR=10.233.7.22
SDNRDB_SERVICE_PORT_9300_TCP_PORT=9300
SDNRDB_SERVICE_PORT_9300_TCP_PROTO=tcp
SDNRDB_SERVICE_PORT_ELASTICSEARCH=9200
SDNRDB_SERVICE_SERVICE_HOST=10.233.7.22
SDNRDB_SERVICE_SERVICE_PORT=9300
SDNRDB_SERVICE_SERVICE_PORT_HTTP_TRANSPORT=9300
SHLVL=1
SO_ADMIN_COCKPIT_PORT=tcp://10.233.46.90:9091
SO_ADMIN_COCKPIT_PORT_9091_TCP=tcp://10.233.46.90:9091
SO_ADMIN_COCKPIT_PORT_9091_TCP_ADDR=10.233.46.90
SO_ADMIN_COCKPIT_PORT_9091_TCP_PORT=9091
SO_ADMIN_COCKPIT_PORT_9091_TCP_PROTO=tcp
SO_ADMIN_COCKPIT_SERVICE_HOST=10.233.46.90
SO_ADMIN_COCKPIT_SERVICE_PORT=9091
SO_ADMIN_COCKPIT_SERVICE_PORT_HTTP=9091
SO_BPMN_INFRA_PORT=tcp://10.233.30.199:8081
SO_BPMN_INFRA_PORT_8081_TCP=tcp://10.233.30.199:8081
SO_BPMN_INFRA_PORT_8081_TCP_ADDR=10.233.30.199
SO_BPMN_INFRA_PORT_8081_TCP_PORT=8081
SO_BPMN_INFRA_PORT_8081_TCP_PROTO=tcp
SO_BPMN_INFRA_SERVICE_HOST=10.233.30.199
SO_BPMN_INFRA_SERVICE_PORT=8081
SO_BPMN_INFRA_SERVICE_PORT_SO_BPMN_PORT=8081
SO_CATALOG_DB_ADAPTER_PORT=tcp://10.233.29.9:8082
SO_CATALOG_DB_ADAPTER_PORT_8082_TCP=tcp://10.233.29.9:8082
SO_CATALOG_DB_ADAPTER_PORT_8082_TCP_ADDR=10.233.29.9
SO_CATALOG_DB_ADAPTER_PORT_8082_TCP_PORT=8082
SO_CATALOG_DB_ADAPTER_PORT_8082_TCP_PROTO=tcp
SO_CATALOG_DB_ADAPTER_SERVICE_HOST=10.233.29.9
SO_CATALOG_DB_ADAPTER_SERVICE_PORT=8082
SO_CATALOG_DB_ADAPTER_SERVICE_PORT_SO_CATDB_PORT=8082
SO_CNF_ADAPTER_PORT=tcp://10.233.23.163:8090
SO_CNF_ADAPTER_PORT_8090_TCP=tcp://10.233.23.163:8090
SO_CNF_ADAPTER_PORT_8090_TCP_ADDR=10.233.23.163
SO_CNF_ADAPTER_PORT_8090_TCP_PORT=8090
SO_CNF_ADAPTER_PORT_8090_TCP_PROTO=tcp
SO_CNF_ADAPTER_SERVICE_HOST=10.233.23.163
SO_CNF_ADAPTER_SERVICE_PORT=8090
SO_CNF_ADAPTER_SERVICE_PORT_HTTP_API=8090
SO_ETSI_NFVO_NS_LCM_PORT=tcp://10.233.35.41:9095
SO_ETSI_NFVO_NS_LCM_PORT_9095_TCP=tcp://10.233.35.41:9095
SO_ETSI_NFVO_NS_LCM_PORT_9095_TCP_ADDR=10.233.35.41
SO_ETSI_NFVO_NS_LCM_PORT_9095_TCP_PORT=9095
SO_ETSI_NFVO_NS_LCM_PORT_9095_TCP_PROTO=tcp
SO_ETSI_NFVO_NS_LCM_SERVICE_HOST=10.233.35.41
SO_ETSI_NFVO_NS_LCM_SERVICE_PORT=9095
SO_ETSI_NFVO_NS_LCM_SERVICE_PORT_HTTP_API=9095
SO_ETSI_SOL003_ADAPTER_PORT=tcp://10.233.62.49:9092
SO_ETSI_SOL003_ADAPTER_PORT_9092_TCP=tcp://10.233.62.49:9092
SO_ETSI_SOL003_ADAPTER_PORT_9092_TCP_ADDR=10.233.62.49
SO_ETSI_SOL003_ADAPTER_PORT_9092_TCP_PORT=9092
SO_ETSI_SOL003_ADAPTER_PORT_9092_TCP_PROTO=tcp
SO_ETSI_SOL003_ADAPTER_SERVICE_HOST=10.233.62.49
SO_ETSI_SOL003_ADAPTER_SERVICE_PORT=9092
SO_ETSI_SOL003_ADAPTER_SERVICE_PORT_HTTP=9092
SO_ETSI_SOL005_ADAPTER_PORT=tcp://10.233.20.79:8084
SO_ETSI_SOL005_ADAPTER_PORT_8084_TCP=tcp://10.233.20.79:8084
SO_ETSI_SOL005_ADAPTER_PORT_8084_TCP_ADDR=10.233.20.79
SO_ETSI_SOL005_ADAPTER_PORT_8084_TCP_PORT=8084
SO_ETSI_SOL005_ADAPTER_PORT_8084_TCP_PROTO=tcp
SO_ETSI_SOL005_ADAPTER_SERVICE_HOST=10.233.20.79
SO_ETSI_SOL005_ADAPTER_SERVICE_PORT=8084
SO_ETSI_SOL005_ADAPTER_SERVICE_PORT_HTTP=8084
SO_NSSMF_ADAPTER_PORT=tcp://10.233.5.189:8088
SO_NSSMF_ADAPTER_PORT_8088_TCP=tcp://10.233.5.189:8088
SO_NSSMF_ADAPTER_PORT_8088_TCP_ADDR=10.233.5.189
SO_NSSMF_ADAPTER_PORT_8088_TCP_PORT=8088
SO_NSSMF_ADAPTER_PORT_8088_TCP_PROTO=tcp
SO_NSSMF_ADAPTER_SERVICE_HOST=10.233.5.189
SO_NSSMF_ADAPTER_SERVICE_PORT=8088
SO_NSSMF_ADAPTER_SERVICE_PORT_API=8088
SO_OOF_ADAPTER_PORT=tcp://10.233.36.99:8090
SO_OOF_ADAPTER_PORT_8090_TCP=tcp://10.233.36.99:8090
SO_OOF_ADAPTER_PORT_8090_TCP_ADDR=10.233.36.99
SO_OOF_ADAPTER_PORT_8090_TCP_PORT=8090
SO_OOF_ADAPTER_PORT_8090_TCP_PROTO=tcp
SO_OOF_ADAPTER_SERVICE_HOST=10.233.36.99
SO_OOF_ADAPTER_SERVICE_PORT=8090
SO_OOF_ADAPTER_SERVICE_PORT_API=8090
SO_OPENSTACK_ADAPTER_PORT=tcp://10.233.9.28:8087
SO_OPENSTACK_ADAPTER_PORT_8087_TCP=tcp://10.233.9.28:8087
SO_OPENSTACK_ADAPTER_PORT_8087_TCP_ADDR=10.233.9.28
SO_OPENSTACK_ADAPTER_PORT_8087_TCP_PORT=8087
SO_OPENSTACK_ADAPTER_PORT_8087_TCP_PROTO=tcp
SO_OPENSTACK_ADAPTER_SERVICE_HOST=10.233.9.28
SO_OPENSTACK_ADAPTER_SERVICE_PORT=8087
SO_OPENSTACK_ADAPTER_SERVICE_PORT_SO_OPTACK_PORT=8087
SO_PORT=tcp://10.233.14.205:8080
SO_PORT_8080_TCP=tcp://10.233.14.205:8080
SO_PORT_8080_TCP_ADDR=10.233.14.205
SO_PORT_8080_TCP_PORT=8080
SO_PORT_8080_TCP_PROTO=tcp
SO_REQUEST_DB_ADAPTER_PORT=tcp://10.233.44.76:8083
SO_REQUEST_DB_ADAPTER_PORT_8083_TCP=tcp://10.233.44.76:8083
SO_REQUEST_DB_ADAPTER_PORT_8083_TCP_ADDR=10.233.44.76
SO_REQUEST_DB_ADAPTER_PORT_8083_TCP_PORT=8083
SO_REQUEST_DB_ADAPTER_PORT_8083_TCP_PROTO=tcp
SO_REQUEST_DB_ADAPTER_SERVICE_HOST=10.233.44.76
SO_REQUEST_DB_ADAPTER_SERVICE_PORT=8083
SO_REQUEST_DB_ADAPTER_SERVICE_PORT_SO_REQDB_PORT=8083
SO_SDC_CONTROLLER_PORT=tcp://10.233.35.235:8085
SO_SDC_CONTROLLER_PORT_8085_TCP=tcp://10.233.35.235:8085
SO_SDC_CONTROLLER_PORT_8085_TCP_ADDR=10.233.35.235
SO_SDC_CONTROLLER_PORT_8085_TCP_PORT=8085
SO_SDC_CONTROLLER_PORT_8085_TCP_PROTO=tcp
SO_SDC_CONTROLLER_SERVICE_HOST=10.233.35.235
SO_SDC_CONTROLLER_SERVICE_PORT=8085
SO_SDC_CONTROLLER_SERVICE_PORT_SO_SDC_PORT=8085
SO_SDNC_ADAPTER_PORT=tcp://10.233.52.83:8086
SO_SDNC_ADAPTER_PORT_8086_TCP=tcp://10.233.52.83:8086
SO_SDNC_ADAPTER_PORT_8086_TCP_ADDR=10.233.52.83
SO_SDNC_ADAPTER_PORT_8086_TCP_PORT=8086
SO_SDNC_ADAPTER_PORT_8086_TCP_PROTO=tcp
SO_SDNC_ADAPTER_SERVICE_HOST=10.233.52.83
SO_SDNC_ADAPTER_SERVICE_PORT=8086
SO_SDNC_ADAPTER_SERVICE_PORT_SO_SDNC_PORT=8086
SO_SERVICE_HOST=10.233.14.205
SO_SERVICE_PORT=8080
SO_SERVICE_PORT_SO_APIH_PORT=8080
ZULU_OPENJDK_VERSION=8=8.38.0.13
_=/usr/bin/env
aaf_locate_url=https://aaf-locate.onap:8095
enableCadi=true
===> User
uid=1000(mrkafka) gid=0(root) groups=0(root)
===> Configuring ...
SASL is enabled.
===> Running preflight checks ... 
===> Check if /var/lib/kafka/data is writable ...
===> Check if Zookeeper is healthy ...
[main] INFO io.confluent.admin.utils.ClusterStatus - SASL is enabled. java.security.auth.login.config=/etc/kafka/secrets/jaas/kafka_server_jaas.conf
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:zookeeper.version=3.4.14-4c25d480e66aadd371de8bd2fd8da255ac140bcf, built on 03/06/2019 16:18 GMT
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:host.name=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.version=1.8.0_212
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.vendor=Azul Systems, Inc.
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.class.path=/etc/confluent/docker/docker-utils.jar
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.io.tmpdir=/tmp
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.compiler=
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.name=Linux
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.arch=amd64
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.version=4.19.0-17-cloud-amd64
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.name=mrkafka
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.home=/home/mrkafka
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.dir=/
[main] INFO org.apache.zookeeper.ZooKeeper - Initiating client connection, connectString=onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 sessionTimeout=40000 watcher=io.confluent.admin.utils.ZookeeperConnectionWatcher@30dae81
[main-SendThread(onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.Login - Client successfully logged in.
[main-SendThread(onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.client.ZooKeeperSaslClient - Client will use DIGEST-MD5 as SASL mechanism.
[main-SendThread(onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.ClientCnxn - Opening socket connection to server onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.233.74.211:2181. Will attempt to SASL-authenticate using Login Context section 'Client'
[main-SendThread(onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.ClientCnxn - Socket connection established to onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.233.74.211:2181, initiating session
[main-SendThread(onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.ClientCnxn - Session establishment complete on server onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.233.74.211:2181, sessionid = 0x30000679db10000, negotiated timeout = 40000
[main] INFO org.apache.zookeeper.ZooKeeper - Session: 0x30000679db10000 closed
[main-EventThread] INFO org.apache.zookeeper.ClientCnxn - EventThread shut down for session: 0x30000679db10000
===> Launching ... 
===> Launching kafka ... 
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/share/java/kafka/slf4j-log4j12-1.7.26.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/share/java/kafka/kafka11aaf-jar-with-dependencies.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
[2021-08-25 01:15:09,557] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)
[2021-08-25 01:15:09,798] INFO KafkaConfig values: 
	advertised.host.name = null
	advertised.listeners = EXTERNAL_SASL_PLAINTEXT://10.253.0.253:30492,INTERNAL_SASL_PLAINTEXT://:9092
	advertised.port = null
	alter.config.policy.class.name = null
	alter.log.dirs.replication.quota.window.num = 11
	alter.log.dirs.replication.quota.window.size.seconds = 1
	authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer
	auto.create.topics.enable = true
	auto.leader.rebalance.enable = true
	background.threads = 10
	broker.id = 2
	broker.id.generation.enable = true
	broker.rack = null
	client.quota.callback.class = null
	compression.type = producer
	connection.failed.authentication.delay.ms = 100
	connections.max.idle.ms = 600000
	connections.max.reauth.ms = 0
	control.plane.listener.name = null
	controlled.shutdown.enable = true
	controlled.shutdown.max.retries = 3
	controlled.shutdown.retry.backoff.ms = 5000
	controller.socket.timeout.ms = 30000
	create.topic.policy.class.name = null
	default.replication.factor = 3
	delegation.token.expiry.check.interval.ms = 3600000
	delegation.token.expiry.time.ms = 86400000
	delegation.token.master.key = null
	delegation.token.max.lifetime.ms = 604800000
	delete.records.purgatory.purge.interval.requests = 1
	delete.topic.enable = true
	fetch.purgatory.purge.interval.requests = 1000
	group.initial.rebalance.delay.ms = 3000
	group.max.session.timeout.ms = 1800000
	group.max.size = 2147483647
	group.min.session.timeout.ms = 6000
	host.name = 
	inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT
	inter.broker.protocol.version = 2.3-IV1
	kafka.metrics.polling.interval.secs = 10
	kafka.metrics.reporters = []
	leader.imbalance.check.interval.seconds = 300
	leader.imbalance.per.broker.percentage = 10
	listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT
	listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092
	log.cleaner.backoff.ms = 15000
	log.cleaner.dedupe.buffer.size = 134217728
	log.cleaner.delete.retention.ms = 86400000
	log.cleaner.enable = true
	log.cleaner.io.buffer.load.factor = 0.9
	log.cleaner.io.buffer.size = 524288
	log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
	log.cleaner.max.compaction.lag.ms = 9223372036854775807
	log.cleaner.min.cleanable.ratio = 0.5
	log.cleaner.min.compaction.lag.ms = 0
	log.cleaner.threads = 1
	log.cleanup.policy = [delete]
	log.dir = /tmp/kafka-logs
	log.dirs = /var/lib/kafka/data
	log.flush.interval.messages = 9223372036854775807
	log.flush.interval.ms = null
	log.flush.offset.checkpoint.interval.ms = 60000
	log.flush.scheduler.interval.ms = 9223372036854775807
	log.flush.start.offset.checkpoint.interval.ms = 60000
	log.index.interval.bytes = 4096
	log.index.size.max.bytes = 10485760
	log.message.downconversion.enable = true
	log.message.format.version = 2.3-IV1
	log.message.timestamp.difference.max.ms = 9223372036854775807
	log.message.timestamp.type = CreateTime
	log.preallocate = false
	log.retention.bytes = -1
	log.retention.check.interval.ms = 300000
	log.retention.hours = 168
	log.retention.minutes = null
	log.retention.ms = null
	log.roll.hours = 168
	log.roll.jitter.hours = 0
	log.roll.jitter.ms = null
	log.roll.ms = null
	log.segment.bytes = 1073741824
	log.segment.delete.delay.ms = 60000
	max.connections = 2147483647
	max.connections.per.ip = 2147483647
	max.connections.per.ip.overrides = 
	max.incremental.fetch.session.cache.slots = 1000
	message.max.bytes = 1000012
	metric.reporters = []
	metrics.num.samples = 2
	metrics.recording.level = INFO
	metrics.sample.window.ms = 30000
	min.insync.replicas = 1
	num.io.threads = 8
	num.network.threads = 3
	num.partitions = 3
	num.recovery.threads.per.data.dir = 5
	num.replica.alter.log.dirs.threads = null
	num.replica.fetchers = 1
	offset.metadata.max.bytes = 4096
	offsets.commit.required.acks = -1
	offsets.commit.timeout.ms = 5000
	offsets.load.buffer.size = 5242880
	offsets.retention.check.interval.ms = 600000
	offsets.retention.minutes = 10080
	offsets.topic.compression.codec = 0
	offsets.topic.num.partitions = 50
	offsets.topic.replication.factor = 3
	offsets.topic.segment.bytes = 104857600
	password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
	password.encoder.iterations = 4096
	password.encoder.key.length = 128
	password.encoder.keyfactory.algorithm = null
	password.encoder.old.secret = null
	password.encoder.secret = null
	port = 9092
	principal.builder.class = null
	producer.purgatory.purge.interval.requests = 1000
	queued.max.request.bytes = -1
	queued.max.requests = 500
	quota.consumer.default = 9223372036854775807
	quota.producer.default = 9223372036854775807
	quota.window.num = 11
	quota.window.size.seconds = 1
	replica.fetch.backoff.ms = 1000
	replica.fetch.max.bytes = 1048576
	replica.fetch.min.bytes = 1
	replica.fetch.response.max.bytes = 10485760
	replica.fetch.wait.max.ms = 500
	replica.high.watermark.checkpoint.interval.ms = 5000
	replica.lag.time.max.ms = 10000
	replica.socket.receive.buffer.bytes = 65536
	replica.socket.timeout.ms = 30000
	replication.quota.window.num = 11
	replication.quota.window.size.seconds = 1
	request.timeout.ms = 30000
	reserved.broker.max.id = 1000
	sasl.client.callback.handler.class = null
	sasl.enabled.mechanisms = [PLAIN]
	sasl.jaas.config = null
	sasl.kerberos.kinit.cmd = /usr/bin/kinit
	sasl.kerberos.min.time.before.relogin = 60000
	sasl.kerberos.principal.to.local.rules = [DEFAULT]
	sasl.kerberos.service.name = null
	sasl.kerberos.ticket.renew.jitter = 0.05
	sasl.kerberos.ticket.renew.window.factor = 0.8
	sasl.login.callback.handler.class = null
	sasl.login.class = null
	sasl.login.refresh.buffer.seconds = 300
	sasl.login.refresh.min.period.seconds = 60
	sasl.login.refresh.window.factor = 0.8
	sasl.login.refresh.window.jitter = 0.05
	sasl.mechanism.inter.broker.protocol = PLAIN
	sasl.server.callback.handler.class = null
	security.inter.broker.protocol = PLAINTEXT
	socket.receive.buffer.bytes = 102400
	socket.request.max.bytes = 104857600
	socket.send.buffer.bytes = 102400
	ssl.cipher.suites = []
	ssl.client.auth = none
	ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
	ssl.endpoint.identification.algorithm = https
	ssl.key.password = null
	ssl.keymanager.algorithm = SunX509
	ssl.keystore.location = null
	ssl.keystore.password = null
	ssl.keystore.type = JKS
	ssl.principal.mapping.rules = [DEFAULT]
	ssl.protocol = TLS
	ssl.provider = null
	ssl.secure.random.implementation = null
	ssl.trustmanager.algorithm = PKIX
	ssl.truststore.location = null
	ssl.truststore.password = null
	ssl.truststore.type = JKS
	transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
	transaction.max.timeout.ms = 900000
	transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
	transaction.state.log.load.buffer.size = 5242880
	transaction.state.log.min.isr = 1
	transaction.state.log.num.partitions = 50
	transaction.state.log.replication.factor = 1
	transaction.state.log.segment.bytes = 104857600
	transactional.id.expiration.ms = 604800000
	unclean.leader.election.enable = false
	zookeeper.connect = onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181
	zookeeper.connection.timeout.ms = 6000
	zookeeper.max.in.flight.requests = 10
	zookeeper.session.timeout.ms = 6000
	zookeeper.set.acl = true
	zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig)
[2021-08-25 01:15:09,854] WARN The package io.confluent.support.metrics.collectors.FullCollector for collecting the full set of support metrics could not be loaded, so we are reverting to anonymous, basic metric collection. If you are a Confluent customer, please refer to the Confluent Platform documentation, section Proactive Support, on how to activate full metrics collection. (io.confluent.support.metrics.KafkaSupportConfig)
[2021-08-25 01:15:09,854] WARN The support metrics collection feature ("Metrics") of Proactive Support is disabled. (io.confluent.support.metrics.SupportedServerStartable)
[2021-08-25 01:15:09,856] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler)
[2021-08-25 01:15:09,856] INFO starting (kafka.server.KafkaServer)
[2021-08-25 01:15:09,857] INFO Connecting to zookeeper on onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 (kafka.server.KafkaServer)
[2021-08-25 01:15:09,877] INFO [ZooKeeperClient Kafka server] Initializing a new session to onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181. (kafka.zookeeper.ZooKeeperClient)
[2021-08-25 01:15:09,882] INFO Client environment:zookeeper.version=3.4.14-4c25d480e66aadd371de8bd2fd8da255ac140bcf, built on 03/06/2019 16:18 GMT (org.apache.zookeeper.ZooKeeper)
[2021-08-25 01:15:09,882] INFO Client environment:host.name=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local (org.apache.zookeeper.ZooKeeper)
[2021-08-25 01:15:09,882] INFO Client environment:java.version=1.8.0_212 (org.apache.zookeeper.ZooKeeper)
[2021-08-25 01:15:09,882] INFO Client environment:java.vendor=Azul Systems, Inc. (org.apache.zookeeper.ZooKeeper)
[2021-08-25 01:15:09,882] INFO Client environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre (org.apache.zookeeper.ZooKeeper)
[2021-08-25 01:15:09,882] INFO Client environment:java.class.path=/usr/bin/../share/java/kafka/activation-1.1.1.jar:/usr/bin/../share/java/kafka/aopalliance-repackaged-2.5.0.jar:/usr/bin/../share/java/kafka/argparse4j-0.7.0.jar:/usr/bin/../share/java/kafka/audience-annotations-0.5.0.jar:/usr/bin/../share/java/kafka/avro-1.8.1.jar:/usr/bin/../share/java/kafka/commons-codec-1.11.jar:/usr/bin/../share/java/kafka/commons-compress-1.8.1.jar:/usr/bin/../share/java/kafka/commons-lang3-3.8.1.jar:/usr/bin/../share/java/kafka/commons-logging-1.2.jar:/usr/bin/../share/java/kafka/connect-api-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-basic-auth-extension-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-file-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-json-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-runtime-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-transforms-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/guava-20.0.jar:/usr/bin/../share/java/kafka/hk2-api-2.5.0.jar:/usr/bin/../share/java/kafka/hk2-locator-2.5.0.jar:/usr/bin/../share/java/kafka/hk2-utils-2.5.0.jar:/usr/bin/../share/java/kafka/httpclient-4.5.7.jar:/usr/bin/../share/java/kafka/httpcore-4.4.11.jar:/usr/bin/../share/java/kafka/httpmime-4.5.7.jar:/usr/bin/../share/java/kafka/jackson-annotations-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-core-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-core-asl-1.9.13.jar:/usr/bin/../share/java/kafka/jackson-databind-2.9.9.3.jar:/usr/bin/../share/java/kafka/jackson-dataformat-csv-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-datatype-jdk8-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-base-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-json-provider-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-mapper-asl-1.9.13.jar:/usr/bin/../share/java/kafka/jackson-module-jaxb-annotations-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-module-paranamer-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-module-scala_2.11-2.9.9.jar:/usr/bin/../share/java/kafka/jakarta.annotation-api-1.3.4.jar:/usr/bin/../share/java/kafka/jakarta.inject-2.5.0.jar:/usr/bin/../share/java/kafka/jakarta.ws.rs-api-2.1.5.jar:/usr/bin/../share/java/kafka/javassist-3.22.0-CR2.jar:/usr/bin/../share/java/kafka/javax.servlet-api-3.1.0.jar:/usr/bin/../share/java/kafka/javax.ws.rs-api-2.1.1.jar:/usr/bin/../share/java/kafka/jaxb-api-2.3.0.jar:/usr/bin/../share/java/kafka/jersey-client-2.28.jar:/usr/bin/../share/java/kafka/jersey-common-2.28.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-2.28.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-core-2.28.jar:/usr/bin/../share/java/kafka/jersey-hk2-2.28.jar:/usr/bin/../share/java/kafka/jersey-media-jaxb-2.28.jar:/usr/bin/../share/java/kafka/jersey-server-2.28.jar:/usr/bin/../share/java/kafka/jetty-client-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-continuation-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-http-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-io-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-security-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-server-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-servlet-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-servlets-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-util-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jopt-simple-5.0.4.jar:/usr/bin/../share/java/kafka/jsr305-3.0.2.jar:/usr/bin/../share/java/kafka/kafka-clients-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-log4j-appender-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-examples-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-scala_2.11-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-test-utils-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-tools-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-javadoc.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-scaladoc.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-sources.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-test-sources.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-test.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/log4j-1.2.17.jar:/usr/bin/../share/java/kafka/lz4-java-1.6.0.jar:/usr/bin/../share/java/kafka/maven-artifact-3.6.1.jar:/usr/bin/../share/java/kafka/metrics-core-2.2.0.jar:/usr/bin/../share/java/kafka/osgi-resource-locator-1.0.1.jar:/usr/bin/../share/java/kafka/paranamer-2.7.jar:/usr/bin/../share/java/kafka/paranamer-2.8.jar:/usr/bin/../share/java/kafka/plexus-utils-3.2.0.jar:/usr/bin/../share/java/kafka/reflections-0.9.11.jar:/usr/bin/../share/java/kafka/rocksdbjni-5.18.3.jar:/usr/bin/../share/java/kafka/scala-library-2.11.12.jar:/usr/bin/../share/java/kafka/scala-logging_2.11-3.9.0.jar:/usr/bin/../share/java/kafka/scala-reflect-2.11.12.jar:/usr/bin/../share/java/kafka/slf4j-api-1.7.26.jar:/usr/bin/../share/java/kafka/slf4j-log4j12-1.7.26.jar:/usr/bin/../share/java/kafka/snappy-java-1.1.7.3.jar:/usr/bin/../share/java/kafka/spotbugs-annotations-3.1.9.jar:/usr/bin/../share/java/kafka/support-metrics-client-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/support-metrics-common-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/validation-api-2.0.1.Final.jar:/usr/bin/../share/java/kafka/xz-1.5.jar:/usr/bin/../share/java/kafka/zkclient-0.11.jar:/usr/bin/../share/java/kafka/zookeeper-3.4.14.jar:/usr/bin/../share/java/kafka/zstd-jni-1.4.0-1.jar:/usr/bin/../share/java/kafka/kafka11aaf-jar-with-dependencies.jar:/usr/bin/../support-metrics-client/build/dependant-libs-2.12/*:/usr/bin/../support-metrics-client/build/libs/*:/usr/share/java/support-metrics-client/* (org.apache.zookeeper.ZooKeeper)
[2021-08-25 01:15:09,882] INFO Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper)
[2021-08-25 01:15:09,883] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper)
[2021-08-25 01:15:09,883] INFO Client environment:java.compiler= (org.apache.zookeeper.ZooKeeper)
[2021-08-25 01:15:09,883] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper)
[2021-08-25 01:15:09,883] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper)
[2021-08-25 01:15:09,883] INFO Client environment:os.version=4.19.0-17-cloud-amd64 (org.apache.zookeeper.ZooKeeper)
[2021-08-25 01:15:09,883] INFO Client environment:user.name=mrkafka (org.apache.zookeeper.ZooKeeper)
[2021-08-25 01:15:09,883] INFO Client environment:user.home=/home/mrkafka (org.apache.zookeeper.ZooKeeper)
[2021-08-25 01:15:09,883] INFO Client environment:user.dir=/ (org.apache.zookeeper.ZooKeeper)
[2021-08-25 01:15:09,884] INFO Initiating client connection, connectString=onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 sessionTimeout=6000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@28701274 (org.apache.zookeeper.ZooKeeper)
[2021-08-25 01:15:09,894] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)
[2021-08-25 01:15:09,902] INFO Client successfully logged in. (org.apache.zookeeper.Login)
[2021-08-25 01:15:09,903] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient)
[2021-08-25 01:15:09,928] INFO Opening socket connection to server onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.233.74.211:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn)
[2021-08-25 01:15:09,933] INFO Socket connection established to onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.233.74.211:2181, initiating session (org.apache.zookeeper.ClientCnxn)
[2021-08-25 01:15:09,942] INFO Session establishment complete on server onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.233.74.211:2181, sessionid = 0x30000679db10001, negotiated timeout = 6000 (org.apache.zookeeper.ClientCnxn)
[2021-08-25 01:15:09,966] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient)
[2021-08-25 01:15:10,242] INFO Cluster ID = P3Yzo-7gQzaXtwUehK5FWw (kafka.server.KafkaServer)
[2021-08-25 01:15:10,245] WARN No meta.properties file under dir /var/lib/kafka/data/meta.properties (kafka.server.BrokerMetadataCheckpoint)
[2021-08-25 01:15:10,287] INFO KafkaConfig values: 
	advertised.host.name = null
	advertised.listeners = EXTERNAL_SASL_PLAINTEXT://10.253.0.253:30492,INTERNAL_SASL_PLAINTEXT://:9092
	advertised.port = null
	alter.config.policy.class.name = null
	alter.log.dirs.replication.quota.window.num = 11
	alter.log.dirs.replication.quota.window.size.seconds = 1
	authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer
	auto.create.topics.enable = true
	auto.leader.rebalance.enable = true
	background.threads = 10
	broker.id = 2
	broker.id.generation.enable = true
	broker.rack = null
	client.quota.callback.class = null
	compression.type = producer
	connection.failed.authentication.delay.ms = 100
	connections.max.idle.ms = 600000
	connections.max.reauth.ms = 0
	control.plane.listener.name = null
	controlled.shutdown.enable = true
	controlled.shutdown.max.retries = 3
	controlled.shutdown.retry.backoff.ms = 5000
	controller.socket.timeout.ms = 30000
	create.topic.policy.class.name = null
	default.replication.factor = 3
	delegation.token.expiry.check.interval.ms = 3600000
	delegation.token.expiry.time.ms = 86400000
	delegation.token.master.key = null
	delegation.token.max.lifetime.ms = 604800000
	delete.records.purgatory.purge.interval.requests = 1
	delete.topic.enable = true
	fetch.purgatory.purge.interval.requests = 1000
	group.initial.rebalance.delay.ms = 3000
	group.max.session.timeout.ms = 1800000
	group.max.size = 2147483647
	group.min.session.timeout.ms = 6000
	host.name = 
	inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT
	inter.broker.protocol.version = 2.3-IV1
	kafka.metrics.polling.interval.secs = 10
	kafka.metrics.reporters = []
	leader.imbalance.check.interval.seconds = 300
	leader.imbalance.per.broker.percentage = 10
	listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT
	listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092
	log.cleaner.backoff.ms = 15000
	log.cleaner.dedupe.buffer.size = 134217728
	log.cleaner.delete.retention.ms = 86400000
	log.cleaner.enable = true
	log.cleaner.io.buffer.load.factor = 0.9
	log.cleaner.io.buffer.size = 524288
	log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
	log.cleaner.max.compaction.lag.ms = 9223372036854775807
	log.cleaner.min.cleanable.ratio = 0.5
	log.cleaner.min.compaction.lag.ms = 0
	log.cleaner.threads = 1
	log.cleanup.policy = [delete]
	log.dir = /tmp/kafka-logs
	log.dirs = /var/lib/kafka/data
	log.flush.interval.messages = 9223372036854775807
	log.flush.interval.ms = null
	log.flush.offset.checkpoint.interval.ms = 60000
	log.flush.scheduler.interval.ms = 9223372036854775807
	log.flush.start.offset.checkpoint.interval.ms = 60000
	log.index.interval.bytes = 4096
	log.index.size.max.bytes = 10485760
	log.message.downconversion.enable = true
	log.message.format.version = 2.3-IV1
	log.message.timestamp.difference.max.ms = 9223372036854775807
	log.message.timestamp.type = CreateTime
	log.preallocate = false
	log.retention.bytes = -1
	log.retention.check.interval.ms = 300000
	log.retention.hours = 168
	log.retention.minutes = null
	log.retention.ms = null
	log.roll.hours = 168
	log.roll.jitter.hours = 0
	log.roll.jitter.ms = null
	log.roll.ms = null
	log.segment.bytes = 1073741824
	log.segment.delete.delay.ms = 60000
	max.connections = 2147483647
	max.connections.per.ip = 2147483647
	max.connections.per.ip.overrides = 
	max.incremental.fetch.session.cache.slots = 1000
	message.max.bytes = 1000012
	metric.reporters = []
	metrics.num.samples = 2
	metrics.recording.level = INFO
	metrics.sample.window.ms = 30000
	min.insync.replicas = 1
	num.io.threads = 8
	num.network.threads = 3
	num.partitions = 3
	num.recovery.threads.per.data.dir = 5
	num.replica.alter.log.dirs.threads = null
	num.replica.fetchers = 1
	offset.metadata.max.bytes = 4096
	offsets.commit.required.acks = -1
	offsets.commit.timeout.ms = 5000
	offsets.load.buffer.size = 5242880
	offsets.retention.check.interval.ms = 600000
	offsets.retention.minutes = 10080
	offsets.topic.compression.codec = 0
	offsets.topic.num.partitions = 50
	offsets.topic.replication.factor = 3
	offsets.topic.segment.bytes = 104857600
	password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
	password.encoder.iterations = 4096
	password.encoder.key.length = 128
	password.encoder.keyfactory.algorithm = null
	password.encoder.old.secret = null
	password.encoder.secret = null
	port = 9092
	principal.builder.class = null
	producer.purgatory.purge.interval.requests = 1000
	queued.max.request.bytes = -1
	queued.max.requests = 500
	quota.consumer.default = 9223372036854775807
	quota.producer.default = 9223372036854775807
	quota.window.num = 11
	quota.window.size.seconds = 1
	replica.fetch.backoff.ms = 1000
	replica.fetch.max.bytes = 1048576
	replica.fetch.min.bytes = 1
	replica.fetch.response.max.bytes = 10485760
	replica.fetch.wait.max.ms = 500
	replica.high.watermark.checkpoint.interval.ms = 5000
	replica.lag.time.max.ms = 10000
	replica.socket.receive.buffer.bytes = 65536
	replica.socket.timeout.ms = 30000
	replication.quota.window.num = 11
	replication.quota.window.size.seconds = 1
	request.timeout.ms = 30000
	reserved.broker.max.id = 1000
	sasl.client.callback.handler.class = null
	sasl.enabled.mechanisms = [PLAIN]
	sasl.jaas.config = null
	sasl.kerberos.kinit.cmd = /usr/bin/kinit
	sasl.kerberos.min.time.before.relogin = 60000
	sasl.kerberos.principal.to.local.rules = [DEFAULT]
	sasl.kerberos.service.name = null
	sasl.kerberos.ticket.renew.jitter = 0.05
	sasl.kerberos.ticket.renew.window.factor = 0.8
	sasl.login.callback.handler.class = null
	sasl.login.class = null
	sasl.login.refresh.buffer.seconds = 300
	sasl.login.refresh.min.period.seconds = 60
	sasl.login.refresh.window.factor = 0.8
	sasl.login.refresh.window.jitter = 0.05
	sasl.mechanism.inter.broker.protocol = PLAIN
	sasl.server.callback.handler.class = null
	security.inter.broker.protocol = PLAINTEXT
	socket.receive.buffer.bytes = 102400
	socket.request.max.bytes = 104857600
	socket.send.buffer.bytes = 102400
	ssl.cipher.suites = []
	ssl.client.auth = none
	ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
	ssl.endpoint.identification.algorithm = https
	ssl.key.password = null
	ssl.keymanager.algorithm = SunX509
	ssl.keystore.location = null
	ssl.keystore.password = null
	ssl.keystore.type = JKS
	ssl.principal.mapping.rules = [DEFAULT]
	ssl.protocol = TLS
	ssl.provider = null
	ssl.secure.random.implementation = null
	ssl.trustmanager.algorithm = PKIX
	ssl.truststore.location = null
	ssl.truststore.password = null
	ssl.truststore.type = JKS
	transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
	transaction.max.timeout.ms = 900000
	transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
	transaction.state.log.load.buffer.size = 5242880
	transaction.state.log.min.isr = 1
	transaction.state.log.num.partitions = 50
	transaction.state.log.replication.factor = 1
	transaction.state.log.segment.bytes = 104857600
	transactional.id.expiration.ms = 604800000
	unclean.leader.election.enable = false
	zookeeper.connect = onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181
	zookeeper.connection.timeout.ms = 6000
	zookeeper.max.in.flight.requests = 10
	zookeeper.session.timeout.ms = 6000
	zookeeper.set.acl = true
	zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig)
[2021-08-25 01:15:10,295] INFO KafkaConfig values: 
	advertised.host.name = null
	advertised.listeners = EXTERNAL_SASL_PLAINTEXT://10.253.0.253:30492,INTERNAL_SASL_PLAINTEXT://:9092
	advertised.port = null
	alter.config.policy.class.name = null
	alter.log.dirs.replication.quota.window.num = 11
	alter.log.dirs.replication.quota.window.size.seconds = 1
	authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer
	auto.create.topics.enable = true
	auto.leader.rebalance.enable = true
	background.threads = 10
	broker.id = 2
	broker.id.generation.enable = true
	broker.rack = null
	client.quota.callback.class = null
	compression.type = producer
	connection.failed.authentication.delay.ms = 100
	connections.max.idle.ms = 600000
	connections.max.reauth.ms = 0
	control.plane.listener.name = null
	controlled.shutdown.enable = true
	controlled.shutdown.max.retries = 3
	controlled.shutdown.retry.backoff.ms = 5000
	controller.socket.timeout.ms = 30000
	create.topic.policy.class.name = null
	default.replication.factor = 3
	delegation.token.expiry.check.interval.ms = 3600000
	delegation.token.expiry.time.ms = 86400000
	delegation.token.master.key = null
	delegation.token.max.lifetime.ms = 604800000
	delete.records.purgatory.purge.interval.requests = 1
	delete.topic.enable = true
	fetch.purgatory.purge.interval.requests = 1000
	group.initial.rebalance.delay.ms = 3000
	group.max.session.timeout.ms = 1800000
	group.max.size = 2147483647
	group.min.session.timeout.ms = 6000
	host.name = 
	inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT
	inter.broker.protocol.version = 2.3-IV1
	kafka.metrics.polling.interval.secs = 10
	kafka.metrics.reporters = []
	leader.imbalance.check.interval.seconds = 300
	leader.imbalance.per.broker.percentage = 10
	listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT
	listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092
	log.cleaner.backoff.ms = 15000
	log.cleaner.dedupe.buffer.size = 134217728
	log.cleaner.delete.retention.ms = 86400000
	log.cleaner.enable = true
	log.cleaner.io.buffer.load.factor = 0.9
	log.cleaner.io.buffer.size = 524288
	log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
	log.cleaner.max.compaction.lag.ms = 9223372036854775807
	log.cleaner.min.cleanable.ratio = 0.5
	log.cleaner.min.compaction.lag.ms = 0
	log.cleaner.threads = 1
	log.cleanup.policy = [delete]
	log.dir = /tmp/kafka-logs
	log.dirs = /var/lib/kafka/data
	log.flush.interval.messages = 9223372036854775807
	log.flush.interval.ms = null
	log.flush.offset.checkpoint.interval.ms = 60000
	log.flush.scheduler.interval.ms = 9223372036854775807
	log.flush.start.offset.checkpoint.interval.ms = 60000
	log.index.interval.bytes = 4096
	log.index.size.max.bytes = 10485760
	log.message.downconversion.enable = true
	log.message.format.version = 2.3-IV1
	log.message.timestamp.difference.max.ms = 9223372036854775807
	log.message.timestamp.type = CreateTime
	log.preallocate = false
	log.retention.bytes = -1
	log.retention.check.interval.ms = 300000
	log.retention.hours = 168
	log.retention.minutes = null
	log.retention.ms = null
	log.roll.hours = 168
	log.roll.jitter.hours = 0
	log.roll.jitter.ms = null
	log.roll.ms = null
	log.segment.bytes = 1073741824
	log.segment.delete.delay.ms = 60000
	max.connections = 2147483647
	max.connections.per.ip = 2147483647
	max.connections.per.ip.overrides = 
	max.incremental.fetch.session.cache.slots = 1000
	message.max.bytes = 1000012
	metric.reporters = []
	metrics.num.samples = 2
	metrics.recording.level = INFO
	metrics.sample.window.ms = 30000
	min.insync.replicas = 1
	num.io.threads = 8
	num.network.threads = 3
	num.partitions = 3
	num.recovery.threads.per.data.dir = 5
	num.replica.alter.log.dirs.threads = null
	num.replica.fetchers = 1
	offset.metadata.max.bytes = 4096
	offsets.commit.required.acks = -1
	offsets.commit.timeout.ms = 5000
	offsets.load.buffer.size = 5242880
	offsets.retention.check.interval.ms = 600000
	offsets.retention.minutes = 10080
	offsets.topic.compression.codec = 0
	offsets.topic.num.partitions = 50
	offsets.topic.replication.factor = 3
	offsets.topic.segment.bytes = 104857600
	password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
	password.encoder.iterations = 4096
	password.encoder.key.length = 128
	password.encoder.keyfactory.algorithm = null
	password.encoder.old.secret = null
	password.encoder.secret = null
	port = 9092
	principal.builder.class = null
	producer.purgatory.purge.interval.requests = 1000
	queued.max.request.bytes = -1
	queued.max.requests = 500
	quota.consumer.default = 9223372036854775807
	quota.producer.default = 9223372036854775807
	quota.window.num = 11
	quota.window.size.seconds = 1
	replica.fetch.backoff.ms = 1000
	replica.fetch.max.bytes = 1048576
	replica.fetch.min.bytes = 1
	replica.fetch.response.max.bytes = 10485760
	replica.fetch.wait.max.ms = 500
	replica.high.watermark.checkpoint.interval.ms = 5000
	replica.lag.time.max.ms = 10000
	replica.socket.receive.buffer.bytes = 65536
	replica.socket.timeout.ms = 30000
	replication.quota.window.num = 11
	replication.quota.window.size.seconds = 1
	request.timeout.ms = 30000
	reserved.broker.max.id = 1000
	sasl.client.callback.handler.class = null
	sasl.enabled.mechanisms = [PLAIN]
	sasl.jaas.config = null
	sasl.kerberos.kinit.cmd = /usr/bin/kinit
	sasl.kerberos.min.time.before.relogin = 60000
	sasl.kerberos.principal.to.local.rules = [DEFAULT]
	sasl.kerberos.service.name = null
	sasl.kerberos.ticket.renew.jitter = 0.05
	sasl.kerberos.ticket.renew.window.factor = 0.8
	sasl.login.callback.handler.class = null
	sasl.login.class = null
	sasl.login.refresh.buffer.seconds = 300
	sasl.login.refresh.min.period.seconds = 60
	sasl.login.refresh.window.factor = 0.8
	sasl.login.refresh.window.jitter = 0.05
	sasl.mechanism.inter.broker.protocol = PLAIN
	sasl.server.callback.handler.class = null
	security.inter.broker.protocol = PLAINTEXT
	socket.receive.buffer.bytes = 102400
	socket.request.max.bytes = 104857600
	socket.send.buffer.bytes = 102400
	ssl.cipher.suites = []
	ssl.client.auth = none
	ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
	ssl.endpoint.identification.algorithm = https
	ssl.key.password = null
	ssl.keymanager.algorithm = SunX509
	ssl.keystore.location = null
	ssl.keystore.password = null
	ssl.keystore.type = JKS
	ssl.principal.mapping.rules = [DEFAULT]
	ssl.protocol = TLS
	ssl.provider = null
	ssl.secure.random.implementation = null
	ssl.trustmanager.algorithm = PKIX
	ssl.truststore.location = null
	ssl.truststore.password = null
	ssl.truststore.type = JKS
	transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
	transaction.max.timeout.ms = 900000
	transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
	transaction.state.log.load.buffer.size = 5242880
	transaction.state.log.min.isr = 1
	transaction.state.log.num.partitions = 50
	transaction.state.log.replication.factor = 1
	transaction.state.log.segment.bytes = 104857600
	transactional.id.expiration.ms = 604800000
	unclean.leader.election.enable = false
	zookeeper.connect = onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181
	zookeeper.connection.timeout.ms = 6000
	zookeeper.max.in.flight.requests = 10
	zookeeper.session.timeout.ms = 6000
	zookeeper.set.acl = true
	zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig)
[2021-08-25 01:15:10,317] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-08-25 01:15:10,317] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-08-25 01:15:10,319] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-08-25 01:15:10,348] INFO Loading logs. (kafka.log.LogManager)
[2021-08-25 01:15:10,358] INFO Logs loading complete in 9 ms. (kafka.log.LogManager)
[2021-08-25 01:15:10,370] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager)
[2021-08-25 01:15:10,371] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager)
[2021-08-25 01:15:10,374] INFO Starting the log cleaner (kafka.log.LogCleaner)
[2021-08-25 01:15:10,432] INFO [kafka-log-cleaner-thread-0]: Starting (kafka.log.LogCleaner)
[2021-08-25 01:15:10,703] INFO Awaiting socket connections on 0.0.0.0:9091. (kafka.network.Acceptor)
[2021-08-25 01:15:10,720] INFO Successfully logged in. (org.apache.kafka.common.security.authenticator.AbstractLogin)
[2021-08-25 01:15:10,739] INFO [SocketServer brokerId=2] Created data-plane acceptor and processors for endpoint : EndPoint(0.0.0.0,9091,ListenerName(EXTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT) (kafka.network.SocketServer)
[2021-08-25 01:15:10,739] INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.Acceptor)
[2021-08-25 01:15:10,750] INFO [SocketServer brokerId=2] Created data-plane acceptor and processors for endpoint : EndPoint(0.0.0.0,9092,ListenerName(INTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT) (kafka.network.SocketServer)
[2021-08-25 01:15:10,751] INFO [SocketServer brokerId=2] Started 2 acceptor threads for data-plane (kafka.network.SocketServer)
[2021-08-25 01:15:10,781] INFO [ExpirationReaper-2-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-08-25 01:15:10,782] INFO [ExpirationReaper-2-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-08-25 01:15:10,783] INFO [ExpirationReaper-2-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-08-25 01:15:10,784] INFO [ExpirationReaper-2-ElectPreferredLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-08-25 01:15:10,796] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler)
[2021-08-25 01:15:10,851] INFO Creating /brokers/ids/2 (is it secure? true) (kafka.zk.KafkaZkClient)
[2021-08-25 01:15:10,872] INFO Stat of the created znode at /brokers/ids/2 is: 12884901950,12884901950,1629854110861,1629854110861,1,0,0,216173227141038081,366,0,12884901950
 (kafka.zk.KafkaZkClient)
[2021-08-25 01:15:10,872] INFO Registered broker 2 at path /brokers/ids/2 with addresses: ArrayBuffer(EndPoint(10.253.0.253,30492,ListenerName(EXTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT), EndPoint(onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local,9092,ListenerName(INTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT)), czxid (broker epoch): 12884901950 (kafka.zk.KafkaZkClient)
[2021-08-25 01:15:10,874] WARN No meta.properties file under dir /var/lib/kafka/data/meta.properties (kafka.server.BrokerMetadataCheckpoint)
[2021-08-25 01:15:10,915] INFO [ControllerEventThread controllerId=2] Starting (kafka.controller.ControllerEventManager$ControllerEventThread)
[2021-08-25 01:15:10,918] INFO [ExpirationReaper-2-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-08-25 01:15:10,921] INFO [ExpirationReaper-2-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-08-25 01:15:10,922] INFO [ExpirationReaper-2-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-08-25 01:15:10,934] DEBUG [Controller id=2] Broker 1 has been elected as the controller, so stopping the election process. (kafka.controller.KafkaController)
[2021-08-25 01:15:10,935] INFO [GroupCoordinator 2]: Starting up. (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:15:10,936] INFO [GroupCoordinator 2]: Startup complete. (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:15:10,939] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 2 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:15:10,978] INFO [ProducerId Manager 2]: Acquired new producerId block (brokerId:2,blockStartProducerId:11000,blockEndProducerId:11999) by writing to Zk with path version 12 (kafka.coordinator.transaction.ProducerIdManager)
[2021-08-25 01:15:11,018] INFO [TransactionCoordinator id=2] Starting up. (kafka.coordinator.transaction.TransactionCoordinator)
[2021-08-25 01:15:11,020] INFO [Transaction Marker Channel Manager 2]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager)
[2021-08-25 01:15:11,020] INFO [TransactionCoordinator id=2] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator)
[2021-08-25 01:15:11,049] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread)
[2021-08-25 01:15:11,064] INFO [SocketServer brokerId=2] Started data-plane processors for 2 acceptors (kafka.network.SocketServer)
[2021-08-25 01:15:11,066] INFO Kafka version: 5.3.1-ccs (org.apache.kafka.common.utils.AppInfoParser)
[2021-08-25 01:15:11,066] INFO Kafka commitId: 03799faf9878a999 (org.apache.kafka.common.utils.AppInfoParser)
[2021-08-25 01:15:11,067] INFO Kafka startTimeMs: 1629854111065 (org.apache.kafka.common.utils.AppInfoParser)
[2021-08-25 01:15:11,068] INFO [KafkaServer id=2] started (kafka.server.KafkaServer)
2021-08-25T01:16:51.859+0000 INIT [cadi] Loading CADI Properties from /opt/app/osaaf/local/org.onap.dmaap.mr.location.props
2021-08-25T01:16:51.860+0000 INIT [cadi] Loading CADI Properties from /opt/app/osaaf/local/org.onap.dmaap.mr.cred.props
2021-08-25T01:16:51.864+0000 INIT [cadi] cadi_keyfile points to /opt/app/osaaf/local/org.onap.dmaap.mr.keyfile
2021-08-25T01:16:52.119+0000 INIT [cadi] cadi_protocols is set to TLSv1.1,TLSv1.2
2021-08-25T01:16:52.240+0000 INIT [cadi] AAFLocator for https://aaf-locate.onap:8095/locate/%CNS.%AAF_NS.service:2.1 could not be created. java.net.URISyntaxException: Malformed escape pair at index 36: https://aaf-locate.onap:8095/locate/%CNS.%AAF_NS.service:2.1
2021-08-25T01:16:52.241+0000 ERROR [cadi] Null Locator passed [Ljava.lang.Object;@dbbe969
org.onap.aaf.cadi.LocatorException: Null Locator passed
	at org.onap.aaf.cadi.http.HMangr.(HMangr.java:53)
	at org.onap.aaf.cadi.aaf.v2_0.AAFConHttp.(AAFConHttp.java:54)
	at org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider.setup(Cadi3AAFProvider.java:141)
	at org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider.(Cadi3AAFProvider.java:111)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
	at java.lang.Class.newInstance(Class.java:442)
	at java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:380)
	at java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404)
	at java.util.ServiceLoader$1.next(ServiceLoader.java:480)
	at org.onap.dmaap.commonauth.kafka.base.authorization.AuthorizationProviderFactory.(AuthorizationProviderFactory.java:34)
	at org.onap.dmaap.commonauth.kafka.base.authorization.AuthorizationProviderFactory.(AuthorizationProviderFactory.java:29)
	at org.onap.dmaap.kafkaAuthorize.PlainSaslServer1.evaluateResponse(PlainSaslServer1.java:106)
	at org.apache.kafka.common.security.authenticator.SaslServerAuthenticator.handleSaslToken(SaslServerAuthenticator.java:451)
	at org.apache.kafka.common.security.authenticator.SaslServerAuthenticator.authenticate(SaslServerAuthenticator.java:291)
	at org.apache.kafka.common.network.KafkaChannel.prepare(KafkaChannel.java:173)
	at org.apache.kafka.common.network.Selector.pollSelectionKeys(Selector.java:547)
	at org.apache.kafka.common.network.Selector.poll(Selector.java:483)
	at kafka.network.Processor.poll(SocketServer.scala:863)
	at kafka.network.Processor.run(SocketServer.scala:762)
	at java.lang.Thread.run(Thread.java:748)
[2021-08-25 01:16:52,242] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:16:52,242] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:11,307] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 1 from controller 1 epoch 8 for partition POLICY-PDP-PAP-1 (state.change.logger)
[2021-08-25 01:23:11,307] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 1 from controller 1 epoch 8 for partition POLICY-PDP-PAP-2 (state.change.logger)
[2021-08-25 01:23:11,307] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 1 from controller 1 epoch 8 for partition POLICY-PDP-PAP-0 (state.change.logger)
[2021-08-25 01:23:11,314] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 1 epoch 8 starting the become-leader transition for partition POLICY-PDP-PAP-0 (state.change.logger)
[2021-08-25 01:23:11,315] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(POLICY-PDP-PAP-0) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:23:11,377] INFO [Log partition=POLICY-PDP-PAP-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:11,387] INFO [Log partition=POLICY-PDP-PAP-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 55 ms (kafka.log.Log)
[2021-08-25 01:23:11,390] INFO Created log for partition POLICY-PDP-PAP-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:11,391] INFO [Partition POLICY-PDP-PAP-0 broker=2] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-0 (kafka.cluster.Partition)
[2021-08-25 01:23:11,394] INFO Replica loaded for partition POLICY-PDP-PAP-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:11,395] INFO Replica loaded for partition POLICY-PDP-PAP-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:11,395] INFO Replica loaded for partition POLICY-PDP-PAP-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:11,397] INFO [Partition POLICY-PDP-PAP-0 broker=2] POLICY-PDP-PAP-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:11,412] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 1 for partition POLICY-PDP-PAP-0 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:11,413] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 1 epoch 8 for the become-leader transition for partition POLICY-PDP-PAP-0 (state.change.logger)
[2021-08-25 01:23:11,414] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 1 epoch 8 starting the become-follower transition for partition POLICY-PDP-PAP-1 with leader 0 (state.change.logger)
[2021-08-25 01:23:11,414] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 1 from controller 1 epoch 8 starting the become-follower transition for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger)
[2021-08-25 01:23:11,416] INFO Replica loaded for partition POLICY-PDP-PAP-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:11,429] INFO [Log partition=POLICY-PDP-PAP-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:11,432] INFO [Log partition=POLICY-PDP-PAP-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log)
[2021-08-25 01:23:11,433] INFO Created log for partition POLICY-PDP-PAP-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:11,434] INFO [Partition POLICY-PDP-PAP-1 broker=2] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-1 (kafka.cluster.Partition)
[2021-08-25 01:23:11,434] INFO Replica loaded for partition POLICY-PDP-PAP-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:11,434] INFO Replica loaded for partition POLICY-PDP-PAP-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:11,435] INFO Replica loaded for partition POLICY-PDP-PAP-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:11,435] INFO Replica loaded for partition POLICY-PDP-PAP-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:11,448] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:11,451] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log)
[2021-08-25 01:23:11,452] INFO Created log for partition POLICY-PDP-PAP-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:11,453] INFO [Partition POLICY-PDP-PAP-2 broker=2] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-2 (kafka.cluster.Partition)
[2021-08-25 01:23:11,453] INFO Replica loaded for partition POLICY-PDP-PAP-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:11,454] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(POLICY-PDP-PAP-2, POLICY-PDP-PAP-1) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:23:11,456] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 1 for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger)
[2021-08-25 01:23:11,456] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 1 for partition POLICY-PDP-PAP-1 with leader 0 (state.change.logger)
[2021-08-25 01:23:11,459] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition POLICY-PDP-PAP-2 as part of become-follower request with correlation id 1 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:11,459] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition POLICY-PDP-PAP-1 as part of become-follower request with correlation id 1 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:11,477] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Starting (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:11,482] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-PDP-PAP-2 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:23:11,486] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-PDP-PAP-1 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:23:11,486] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Starting (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:11,488] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 1 for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger)
[2021-08-25 01:23:11,488] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 1 for partition POLICY-PDP-PAP-1 with leader 0 (state.change.logger)
[2021-08-25 01:23:11,489] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 1 epoch 8 for the become-follower transition for partition POLICY-PDP-PAP-1 with leader 0 (state.change.logger)
[2021-08-25 01:23:11,489] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 1 from controller 1 epoch 8 for the become-follower transition for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger)
[2021-08-25 01:23:11,497] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition POLICY-PDP-PAP-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:11,497] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition POLICY-PDP-PAP-1 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:11,504] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:11,504] INFO [Log partition=POLICY-PDP-PAP-1, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:11,510] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition POLICY-PDP-PAP-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 2 (state.change.logger)
[2021-08-25 01:23:11,510] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition POLICY-PDP-PAP-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 2 (state.change.logger)
[2021-08-25 01:23:11,510] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition POLICY-PDP-PAP-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 2 (state.change.logger)
[2021-08-25 01:23:11,534] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:11,534] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:12,495] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:12,496] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:14,359] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-13 (state.change.logger)
[2021-08-25 01:23:14,360] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-46 (state.change.logger)
[2021-08-25 01:23:14,360] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-9 (state.change.logger)
[2021-08-25 01:23:14,360] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-42 (state.change.logger)
[2021-08-25 01:23:14,360] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-21 (state.change.logger)
[2021-08-25 01:23:14,360] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-17 (state.change.logger)
[2021-08-25 01:23:14,360] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-30 (state.change.logger)
[2021-08-25 01:23:14,360] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-26 (state.change.logger)
[2021-08-25 01:23:14,360] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-5 (state.change.logger)
[2021-08-25 01:23:14,360] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-38 (state.change.logger)
[2021-08-25 01:23:14,360] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-1 (state.change.logger)
[2021-08-25 01:23:14,361] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-34 (state.change.logger)
[2021-08-25 01:23:14,361] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-16 (state.change.logger)
[2021-08-25 01:23:14,361] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-45 (state.change.logger)
[2021-08-25 01:23:14,361] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-12 (state.change.logger)
[2021-08-25 01:23:14,361] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-41 (state.change.logger)
[2021-08-25 01:23:14,361] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-24 (state.change.logger)
[2021-08-25 01:23:14,361] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-20 (state.change.logger)
[2021-08-25 01:23:14,361] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-49 (state.change.logger)
[2021-08-25 01:23:14,361] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-0 (state.change.logger)
[2021-08-25 01:23:14,361] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-29 (state.change.logger)
[2021-08-25 01:23:14,361] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-25 (state.change.logger)
[2021-08-25 01:23:14,361] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-8 (state.change.logger)
[2021-08-25 01:23:14,362] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-37 (state.change.logger)
[2021-08-25 01:23:14,362] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-4 (state.change.logger)
[2021-08-25 01:23:14,362] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-33 (state.change.logger)
[2021-08-25 01:23:14,362] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-15 (state.change.logger)
[2021-08-25 01:23:14,362] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-48 (state.change.logger)
[2021-08-25 01:23:14,362] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-11 (state.change.logger)
[2021-08-25 01:23:14,362] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-44 (state.change.logger)
[2021-08-25 01:23:14,362] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-23 (state.change.logger)
[2021-08-25 01:23:14,363] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-19 (state.change.logger)
[2021-08-25 01:23:14,363] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-32 (state.change.logger)
[2021-08-25 01:23:14,363] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-28 (state.change.logger)
[2021-08-25 01:23:14,363] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-7 (state.change.logger)
[2021-08-25 01:23:14,363] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-40 (state.change.logger)
[2021-08-25 01:23:14,363] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-3 (state.change.logger)
[2021-08-25 01:23:14,363] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-36 (state.change.logger)
[2021-08-25 01:23:14,363] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-47 (state.change.logger)
[2021-08-25 01:23:14,363] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-14 (state.change.logger)
[2021-08-25 01:23:14,363] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-43 (state.change.logger)
[2021-08-25 01:23:14,363] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-10 (state.change.logger)
[2021-08-25 01:23:14,363] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-22 (state.change.logger)
[2021-08-25 01:23:14,364] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-18 (state.change.logger)
[2021-08-25 01:23:14,364] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-31 (state.change.logger)
[2021-08-25 01:23:14,364] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-27 (state.change.logger)
[2021-08-25 01:23:14,364] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-39 (state.change.logger)
[2021-08-25 01:23:14,364] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-6 (state.change.logger)
[2021-08-25 01:23:14,364] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-35 (state.change.logger)
[2021-08-25 01:23:14,364] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 3 from controller 1 epoch 8 for partition __consumer_offsets-2 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-0 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-48 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-45 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-42 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-39 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-36 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-33 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-30 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-27 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-24 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-21 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-18 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-15 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-12 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-9 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-6 (state.change.logger)
[2021-08-25 01:23:14,391] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-3 (state.change.logger)
[2021-08-25 01:23:14,392] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(__consumer_offsets-30, __consumer_offsets-21, __consumer_offsets-27, __consumer_offsets-9, __consumer_offsets-33, __consumer_offsets-36, __consumer_offsets-42, __consumer_offsets-3, __consumer_offsets-18, __consumer_offsets-15, __consumer_offsets-24, __consumer_offsets-48, __consumer_offsets-6, __consumer_offsets-0, __consumer_offsets-39, __consumer_offsets-12, __consumer_offsets-45) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:23:14,406] INFO [Log partition=__consumer_offsets-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,409] INFO [Log partition=__consumer_offsets-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log)
[2021-08-25 01:23:14,410] INFO Created log for partition __consumer_offsets-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,413] INFO [Partition __consumer_offsets-0 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-0 (kafka.cluster.Partition)
[2021-08-25 01:23:14,413] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,413] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,413] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,413] INFO [Partition __consumer_offsets-0 broker=2] __consumer_offsets-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,417] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-0 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,433] INFO [Log partition=__consumer_offsets-48, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,437] INFO [Log partition=__consumer_offsets-48, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log)
[2021-08-25 01:23:14,438] INFO Created log for partition __consumer_offsets-48 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,439] INFO [Partition __consumer_offsets-48 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-48 (kafka.cluster.Partition)
[2021-08-25 01:23:14,439] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,439] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,439] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,439] INFO [Partition __consumer_offsets-48 broker=2] __consumer_offsets-48 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,442] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-48 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,455] INFO [Log partition=__consumer_offsets-45, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,458] INFO [Log partition=__consumer_offsets-45, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log)
[2021-08-25 01:23:14,459] INFO Created log for partition __consumer_offsets-45 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,460] INFO [Partition __consumer_offsets-45 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-45 (kafka.cluster.Partition)
[2021-08-25 01:23:14,460] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,460] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,460] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,460] INFO [Partition __consumer_offsets-45 broker=2] __consumer_offsets-45 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,464] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-45 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,476] INFO [Log partition=__consumer_offsets-42, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,478] INFO [Log partition=__consumer_offsets-42, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 11 ms (kafka.log.Log)
[2021-08-25 01:23:14,479] INFO Created log for partition __consumer_offsets-42 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,479] INFO [Partition __consumer_offsets-42 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-42 (kafka.cluster.Partition)
[2021-08-25 01:23:14,479] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,479] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,479] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,480] INFO [Partition __consumer_offsets-42 broker=2] __consumer_offsets-42 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,482] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-42 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,501] INFO [Log partition=__consumer_offsets-39, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,504] INFO [Log partition=__consumer_offsets-39, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log)
[2021-08-25 01:23:14,505] INFO Created log for partition __consumer_offsets-39 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,506] INFO [Partition __consumer_offsets-39 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-39 (kafka.cluster.Partition)
[2021-08-25 01:23:14,506] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,506] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,506] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,506] INFO [Partition __consumer_offsets-39 broker=2] __consumer_offsets-39 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,509] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-39 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,524] INFO [Log partition=__consumer_offsets-36, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,527] INFO [Log partition=__consumer_offsets-36, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log)
[2021-08-25 01:23:14,528] INFO Created log for partition __consumer_offsets-36 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,528] INFO [Partition __consumer_offsets-36 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-36 (kafka.cluster.Partition)
[2021-08-25 01:23:14,528] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,528] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,528] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,529] INFO [Partition __consumer_offsets-36 broker=2] __consumer_offsets-36 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,531] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-36 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,543] INFO [Log partition=__consumer_offsets-33, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,546] INFO [Log partition=__consumer_offsets-33, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log)
[2021-08-25 01:23:14,547] INFO Created log for partition __consumer_offsets-33 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,548] INFO [Partition __consumer_offsets-33 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-33 (kafka.cluster.Partition)
[2021-08-25 01:23:14,548] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,548] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,548] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,548] INFO [Partition __consumer_offsets-33 broker=2] __consumer_offsets-33 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,550] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-33 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,564] INFO [Log partition=__consumer_offsets-30, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,567] INFO [Log partition=__consumer_offsets-30, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log)
[2021-08-25 01:23:14,568] INFO Created log for partition __consumer_offsets-30 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,569] INFO [Partition __consumer_offsets-30 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-30 (kafka.cluster.Partition)
[2021-08-25 01:23:14,569] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,569] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,569] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,569] INFO [Partition __consumer_offsets-30 broker=2] __consumer_offsets-30 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,572] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-30 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,589] INFO [Log partition=__consumer_offsets-27, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,593] INFO [Log partition=__consumer_offsets-27, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 17 ms (kafka.log.Log)
[2021-08-25 01:23:14,594] INFO Created log for partition __consumer_offsets-27 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,595] INFO [Partition __consumer_offsets-27 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-27 (kafka.cluster.Partition)
[2021-08-25 01:23:14,595] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,595] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,595] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,595] INFO [Partition __consumer_offsets-27 broker=2] __consumer_offsets-27 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,597] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-27 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,609] INFO [Log partition=__consumer_offsets-24, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,613] INFO [Log partition=__consumer_offsets-24, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log)
[2021-08-25 01:23:14,614] INFO Created log for partition __consumer_offsets-24 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,614] INFO [Partition __consumer_offsets-24 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-24 (kafka.cluster.Partition)
[2021-08-25 01:23:14,614] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,615] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,615] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,615] INFO [Partition __consumer_offsets-24 broker=2] __consumer_offsets-24 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,618] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-24 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,633] INFO [Log partition=__consumer_offsets-21, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,635] INFO [Log partition=__consumer_offsets-21, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log)
[2021-08-25 01:23:14,636] INFO Created log for partition __consumer_offsets-21 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,637] INFO [Partition __consumer_offsets-21 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-21 (kafka.cluster.Partition)
[2021-08-25 01:23:14,637] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,637] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,637] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,637] INFO [Partition __consumer_offsets-21 broker=2] __consumer_offsets-21 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,640] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-21 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,655] INFO [Log partition=__consumer_offsets-18, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,659] INFO [Log partition=__consumer_offsets-18, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log)
[2021-08-25 01:23:14,660] INFO Created log for partition __consumer_offsets-18 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,661] INFO [Partition __consumer_offsets-18 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-18 (kafka.cluster.Partition)
[2021-08-25 01:23:14,661] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,661] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,661] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,662] INFO [Partition __consumer_offsets-18 broker=2] __consumer_offsets-18 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,665] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-18 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,682] INFO [Log partition=__consumer_offsets-15, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,685] INFO [Log partition=__consumer_offsets-15, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log)
[2021-08-25 01:23:14,686] INFO Created log for partition __consumer_offsets-15 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,687] INFO [Partition __consumer_offsets-15 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-15 (kafka.cluster.Partition)
[2021-08-25 01:23:14,687] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,687] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,687] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,687] INFO [Partition __consumer_offsets-15 broker=2] __consumer_offsets-15 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,690] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-15 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,706] INFO [Log partition=__consumer_offsets-12, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,709] INFO [Log partition=__consumer_offsets-12, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log)
[2021-08-25 01:23:14,710] INFO Created log for partition __consumer_offsets-12 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,711] INFO [Partition __consumer_offsets-12 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-12 (kafka.cluster.Partition)
[2021-08-25 01:23:14,711] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,711] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,711] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,711] INFO [Partition __consumer_offsets-12 broker=2] __consumer_offsets-12 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,714] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-12 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,732] INFO [Log partition=__consumer_offsets-9, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,737] INFO [Log partition=__consumer_offsets-9, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 19 ms (kafka.log.Log)
[2021-08-25 01:23:14,739] INFO Created log for partition __consumer_offsets-9 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,740] INFO [Partition __consumer_offsets-9 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-9 (kafka.cluster.Partition)
[2021-08-25 01:23:14,740] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,740] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,740] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,740] INFO [Partition __consumer_offsets-9 broker=2] __consumer_offsets-9 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,743] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-9 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,758] INFO [Log partition=__consumer_offsets-6, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,761] INFO [Log partition=__consumer_offsets-6, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log)
[2021-08-25 01:23:14,762] INFO Created log for partition __consumer_offsets-6 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,763] INFO [Partition __consumer_offsets-6 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-6 (kafka.cluster.Partition)
[2021-08-25 01:23:14,763] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,763] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,763] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,763] INFO [Partition __consumer_offsets-6 broker=2] __consumer_offsets-6 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,766] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-6 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,780] INFO [Log partition=__consumer_offsets-3, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,783] INFO [Log partition=__consumer_offsets-3, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log)
[2021-08-25 01:23:14,784] INFO Created log for partition __consumer_offsets-3 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,785] INFO [Partition __consumer_offsets-3 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-3 (kafka.cluster.Partition)
[2021-08-25 01:23:14,785] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,785] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,785] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,785] INFO [Partition __consumer_offsets-3 broker=2] __consumer_offsets-3 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-3 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-0 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-48 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-45 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-42 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-39 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-36 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-33 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-30 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-27 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-24 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-21 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-18 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-15 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-12 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-9 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-6 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-3 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-29 with leader 1 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-10 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-26 with leader 1 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-7 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-4 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-23 with leader 1 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-1 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-20 with leader 1 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-17 with leader 1 (state.change.logger)
[2021-08-25 01:23:14,787] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-14 with leader 1 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-49 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-11 with leader 1 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-46 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-8 with leader 1 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-43 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-5 with leader 1 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-2 with leader 1 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-40 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-37 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-34 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-31 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-47 with leader 1 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-19 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-28 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-38 with leader 1 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-35 with leader 1 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-44 with leader 1 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-25 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-16 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-22 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-41 with leader 1 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-32 with leader 1 (state.change.logger)
[2021-08-25 01:23:14,788] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 3 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-13 with leader 0 (state.change.logger)
[2021-08-25 01:23:14,788] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,788] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,800] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,802] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 11 ms (kafka.log.Log)
[2021-08-25 01:23:14,803] INFO Created log for partition __consumer_offsets-29 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,803] INFO [Partition __consumer_offsets-29 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-29 (kafka.cluster.Partition)
[2021-08-25 01:23:14,803] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,803] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,814] INFO [Log partition=__consumer_offsets-10, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,817] INFO [Log partition=__consumer_offsets-10, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 11 ms (kafka.log.Log)
[2021-08-25 01:23:14,817] INFO Created log for partition __consumer_offsets-10 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,818] INFO [Partition __consumer_offsets-10 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-10 (kafka.cluster.Partition)
[2021-08-25 01:23:14,818] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,818] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,818] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,830] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,833] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log)
[2021-08-25 01:23:14,834] INFO Created log for partition __consumer_offsets-26 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,834] INFO [Partition __consumer_offsets-26 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-26 (kafka.cluster.Partition)
[2021-08-25 01:23:14,834] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,834] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,834] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,834] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,848] INFO [Log partition=__consumer_offsets-7, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,852] INFO [Log partition=__consumer_offsets-7, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log)
[2021-08-25 01:23:14,853] INFO Created log for partition __consumer_offsets-7 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,854] INFO [Partition __consumer_offsets-7 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-7 (kafka.cluster.Partition)
[2021-08-25 01:23:14,854] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,854] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,870] INFO [Log partition=__consumer_offsets-4, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,874] INFO [Log partition=__consumer_offsets-4, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log)
[2021-08-25 01:23:14,875] INFO Created log for partition __consumer_offsets-4 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,876] INFO [Partition __consumer_offsets-4 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-4 (kafka.cluster.Partition)
[2021-08-25 01:23:14,876] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,876] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,876] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,876] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,898] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,901] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log)
[2021-08-25 01:23:14,902] INFO Created log for partition __consumer_offsets-23 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,903] INFO [Partition __consumer_offsets-23 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-23 (kafka.cluster.Partition)
[2021-08-25 01:23:14,903] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,904] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,904] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,919] INFO [Log partition=__consumer_offsets-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,922] INFO [Log partition=__consumer_offsets-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log)
[2021-08-25 01:23:14,923] INFO Created log for partition __consumer_offsets-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,924] INFO [Partition __consumer_offsets-1 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-1 (kafka.cluster.Partition)
[2021-08-25 01:23:14,924] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,924] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,938] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,942] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log)
[2021-08-25 01:23:14,943] INFO Created log for partition __consumer_offsets-20 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,943] INFO [Partition __consumer_offsets-20 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-20 (kafka.cluster.Partition)
[2021-08-25 01:23:14,943] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,943] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,944] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,944] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,965] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,969] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 22 ms (kafka.log.Log)
[2021-08-25 01:23:14,969] INFO Created log for partition __consumer_offsets-17 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,970] INFO [Partition __consumer_offsets-17 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-17 (kafka.cluster.Partition)
[2021-08-25 01:23:14,970] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,971] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,992] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:14,995] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 20 ms (kafka.log.Log)
[2021-08-25 01:23:14,996] INFO Created log for partition __consumer_offsets-14 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:14,996] INFO [Partition __consumer_offsets-14 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-14 (kafka.cluster.Partition)
[2021-08-25 01:23:14,996] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,996] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,997] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:14,997] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,011] INFO [Log partition=__consumer_offsets-49, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,014] INFO [Log partition=__consumer_offsets-49, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log)
[2021-08-25 01:23:15,015] INFO Created log for partition __consumer_offsets-49 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,015] INFO [Partition __consumer_offsets-49 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-49 (kafka.cluster.Partition)
[2021-08-25 01:23:15,015] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,016] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,016] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,030] INFO [Log partition=__consumer_offsets-11, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,034] INFO [Log partition=__consumer_offsets-11, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log)
[2021-08-25 01:23:15,035] INFO Created log for partition __consumer_offsets-11 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,035] INFO [Partition __consumer_offsets-11 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-11 (kafka.cluster.Partition)
[2021-08-25 01:23:15,035] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,036] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,052] INFO [Log partition=__consumer_offsets-46, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,056] INFO [Log partition=__consumer_offsets-46, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log)
[2021-08-25 01:23:15,058] INFO Created log for partition __consumer_offsets-46 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,059] INFO [Partition __consumer_offsets-46 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-46 (kafka.cluster.Partition)
[2021-08-25 01:23:15,059] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,059] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,059] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,076] INFO [Log partition=__consumer_offsets-8, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,080] INFO [Log partition=__consumer_offsets-8, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 17 ms (kafka.log.Log)
[2021-08-25 01:23:15,081] INFO Created log for partition __consumer_offsets-8 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,086] INFO [Partition __consumer_offsets-8 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-8 (kafka.cluster.Partition)
[2021-08-25 01:23:15,087] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,087] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,087] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,087] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,103] INFO [Log partition=__consumer_offsets-43, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,106] INFO [Log partition=__consumer_offsets-43, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log)
[2021-08-25 01:23:15,107] INFO Created log for partition __consumer_offsets-43 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,107] INFO [Partition __consumer_offsets-43 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-43 (kafka.cluster.Partition)
[2021-08-25 01:23:15,107] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,107] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,108] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,123] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,127] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log)
[2021-08-25 01:23:15,128] INFO Created log for partition __consumer_offsets-5 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,128] INFO [Partition __consumer_offsets-5 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-5 (kafka.cluster.Partition)
[2021-08-25 01:23:15,128] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,129] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,142] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,145] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log)
[2021-08-25 01:23:15,146] INFO Created log for partition __consumer_offsets-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,147] INFO [Partition __consumer_offsets-2 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-2 (kafka.cluster.Partition)
[2021-08-25 01:23:15,147] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,147] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,147] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,163] INFO [Log partition=__consumer_offsets-40, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,166] INFO [Log partition=__consumer_offsets-40, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log)
[2021-08-25 01:23:15,167] INFO Created log for partition __consumer_offsets-40 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,167] INFO [Partition __consumer_offsets-40 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-40 (kafka.cluster.Partition)
[2021-08-25 01:23:15,168] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,168] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,168] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,168] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,183] INFO [Log partition=__consumer_offsets-37, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,186] INFO [Log partition=__consumer_offsets-37, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log)
[2021-08-25 01:23:15,187] INFO Created log for partition __consumer_offsets-37 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,188] INFO [Partition __consumer_offsets-37 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-37 (kafka.cluster.Partition)
[2021-08-25 01:23:15,188] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,188] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,206] INFO [Log partition=__consumer_offsets-34, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,210] INFO [Log partition=__consumer_offsets-34, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log)
[2021-08-25 01:23:15,211] INFO Created log for partition __consumer_offsets-34 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,212] INFO [Partition __consumer_offsets-34 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-34 (kafka.cluster.Partition)
[2021-08-25 01:23:15,212] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,212] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,212] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,213] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,230] INFO [Log partition=__consumer_offsets-31, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,234] INFO [Log partition=__consumer_offsets-31, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 18 ms (kafka.log.Log)
[2021-08-25 01:23:15,235] INFO Created log for partition __consumer_offsets-31 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,236] INFO [Partition __consumer_offsets-31 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-31 (kafka.cluster.Partition)
[2021-08-25 01:23:15,236] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,236] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,236] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,256] INFO [Log partition=__consumer_offsets-47, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,260] INFO [Log partition=__consumer_offsets-47, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 19 ms (kafka.log.Log)
[2021-08-25 01:23:15,261] INFO Created log for partition __consumer_offsets-47 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,261] INFO [Partition __consumer_offsets-47 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-47 (kafka.cluster.Partition)
[2021-08-25 01:23:15,262] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,262] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,262] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,287] INFO [Log partition=__consumer_offsets-19, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,291] INFO [Log partition=__consumer_offsets-19, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 24 ms (kafka.log.Log)
[2021-08-25 01:23:15,292] INFO Created log for partition __consumer_offsets-19 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,293] INFO [Partition __consumer_offsets-19 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-19 (kafka.cluster.Partition)
[2021-08-25 01:23:15,293] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,293] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,306] INFO [Log partition=__consumer_offsets-28, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,309] INFO [Log partition=__consumer_offsets-28, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log)
[2021-08-25 01:23:15,310] INFO Created log for partition __consumer_offsets-28 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,310] INFO [Partition __consumer_offsets-28 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-28 (kafka.cluster.Partition)
[2021-08-25 01:23:15,310] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,310] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,311] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,323] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,326] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log)
[2021-08-25 01:23:15,327] INFO Created log for partition __consumer_offsets-38 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,327] INFO [Partition __consumer_offsets-38 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-38 (kafka.cluster.Partition)
[2021-08-25 01:23:15,327] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,327] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,327] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,327] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,341] INFO [Log partition=__consumer_offsets-35, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,345] INFO [Log partition=__consumer_offsets-35, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log)
[2021-08-25 01:23:15,345] INFO Created log for partition __consumer_offsets-35 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,346] INFO [Partition __consumer_offsets-35 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-35 (kafka.cluster.Partition)
[2021-08-25 01:23:15,346] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,346] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,362] INFO [Log partition=__consumer_offsets-44, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,366] INFO [Log partition=__consumer_offsets-44, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log)
[2021-08-25 01:23:15,367] INFO Created log for partition __consumer_offsets-44 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,367] INFO [Partition __consumer_offsets-44 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-44 (kafka.cluster.Partition)
[2021-08-25 01:23:15,367] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,367] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,368] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,368] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,387] INFO [Log partition=__consumer_offsets-25, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,391] INFO [Log partition=__consumer_offsets-25, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 20 ms (kafka.log.Log)
[2021-08-25 01:23:15,392] INFO Created log for partition __consumer_offsets-25 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,392] INFO [Partition __consumer_offsets-25 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-25 (kafka.cluster.Partition)
[2021-08-25 01:23:15,393] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,393] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,408] INFO [Log partition=__consumer_offsets-16, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,411] INFO [Log partition=__consumer_offsets-16, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log)
[2021-08-25 01:23:15,412] INFO Created log for partition __consumer_offsets-16 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,412] INFO [Partition __consumer_offsets-16 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-16 (kafka.cluster.Partition)
[2021-08-25 01:23:15,413] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,413] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,413] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,430] INFO [Log partition=__consumer_offsets-22, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,434] INFO [Log partition=__consumer_offsets-22, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 17 ms (kafka.log.Log)
[2021-08-25 01:23:15,435] INFO Created log for partition __consumer_offsets-22 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,435] INFO [Partition __consumer_offsets-22 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-22 (kafka.cluster.Partition)
[2021-08-25 01:23:15,435] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,435] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,436] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,436] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,453] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,457] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 17 ms (kafka.log.Log)
[2021-08-25 01:23:15,458] INFO Created log for partition __consumer_offsets-41 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,459] INFO [Partition __consumer_offsets-41 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-41 (kafka.cluster.Partition)
[2021-08-25 01:23:15,459] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,459] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,475] INFO [Log partition=__consumer_offsets-32, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,479] INFO [Log partition=__consumer_offsets-32, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log)
[2021-08-25 01:23:15,479] INFO Created log for partition __consumer_offsets-32 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,480] INFO [Partition __consumer_offsets-32 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-32 (kafka.cluster.Partition)
[2021-08-25 01:23:15,480] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,480] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,480] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,480] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,496] INFO [Log partition=__consumer_offsets-13, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:15,500] INFO [Log partition=__consumer_offsets-13, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log)
[2021-08-25 01:23:15,500] INFO Created log for partition __consumer_offsets-13 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:15,501] INFO [Partition __consumer_offsets-13 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-13 (kafka.cluster.Partition)
[2021-08-25 01:23:15,501] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:15,502] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(__consumer_offsets-28, __consumer_offsets-10, __consumer_offsets-32, __consumer_offsets-14, __consumer_offsets-40, __consumer_offsets-37, __consumer_offsets-22, __consumer_offsets-41, __consumer_offsets-4, __consumer_offsets-23, __consumer_offsets-26, __consumer_offsets-8, __consumer_offsets-49, __consumer_offsets-31, __consumer_offsets-13, __consumer_offsets-35, __consumer_offsets-17, __consumer_offsets-43, __consumer_offsets-25, __consumer_offsets-44, __consumer_offsets-47, __consumer_offsets-7, __consumer_offsets-29, __consumer_offsets-11, __consumer_offsets-34, __consumer_offsets-19, __consumer_offsets-16, __consumer_offsets-38, __consumer_offsets-1, __consumer_offsets-20, __consumer_offsets-5, __consumer_offsets-46, __consumer_offsets-2) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:23:15,502] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-22 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,502] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-25 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,502] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-28 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,502] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-31 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,502] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-34 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,502] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-37 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,502] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-40 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,502] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-43 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,502] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-46 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,502] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-49 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,502] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-41 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,502] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-44 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,502] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-47 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,502] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-1 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,502] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-4 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-7 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-10 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-13 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-16 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-19 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-2 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-5 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-11 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-14 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-17 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-20 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-23 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-26 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-29 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-32 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-35 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-38 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-22 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-25 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-28 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-31 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-34 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-37 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-40 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-43 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-46 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-49 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-41 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-44 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-47 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,503] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-1 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-4 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-7 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-10 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-13 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-16 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-19 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-2 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-5 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-8 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-11 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-14 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-17 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-20 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-23 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-26 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-29 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-32 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-35 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,504] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-38 as part of become-follower request with correlation id 3 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,508] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(__consumer_offsets-8 -> (offset=0, leaderEpoch=0), __consumer_offsets-35 -> (offset=0, leaderEpoch=0), __consumer_offsets-41 -> (offset=0, leaderEpoch=0), __consumer_offsets-23 -> (offset=0, leaderEpoch=0), __consumer_offsets-47 -> (offset=0, leaderEpoch=0), __consumer_offsets-38 -> (offset=0, leaderEpoch=0), __consumer_offsets-17 -> (offset=0, leaderEpoch=0), __consumer_offsets-11 -> (offset=0, leaderEpoch=0), __consumer_offsets-2 -> (offset=0, leaderEpoch=0), __consumer_offsets-14 -> (offset=0, leaderEpoch=0), __consumer_offsets-20 -> (offset=0, leaderEpoch=0), __consumer_offsets-44 -> (offset=0, leaderEpoch=0), __consumer_offsets-5 -> (offset=0, leaderEpoch=0), __consumer_offsets-26 -> (offset=0, leaderEpoch=0), __consumer_offsets-29 -> (offset=0, leaderEpoch=0), __consumer_offsets-32 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:23:15,508] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(__consumer_offsets-22 -> (offset=0, leaderEpoch=0), __consumer_offsets-4 -> (offset=0, leaderEpoch=0), __consumer_offsets-7 -> (offset=0, leaderEpoch=0), __consumer_offsets-46 -> (offset=0, leaderEpoch=0), __consumer_offsets-25 -> (offset=0, leaderEpoch=0), __consumer_offsets-49 -> (offset=0, leaderEpoch=0), __consumer_offsets-16 -> (offset=0, leaderEpoch=0), __consumer_offsets-28 -> (offset=0, leaderEpoch=0), __consumer_offsets-31 -> (offset=0, leaderEpoch=0), __consumer_offsets-37 -> (offset=0, leaderEpoch=0), __consumer_offsets-19 -> (offset=0, leaderEpoch=0), __consumer_offsets-13 -> (offset=0, leaderEpoch=0), __consumer_offsets-43 -> (offset=0, leaderEpoch=0), __consumer_offsets-1 -> (offset=0, leaderEpoch=0), __consumer_offsets-34 -> (offset=0, leaderEpoch=0), __consumer_offsets-10 -> (offset=0, leaderEpoch=0), __consumer_offsets-40 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:23:15,509] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-22 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,509] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-25 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,509] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-28 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,509] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-31 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,509] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-34 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,509] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-37 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,509] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-40 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,509] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-43 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,509] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-46 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,509] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-49 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,509] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-41 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,509] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-44 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-47 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-1 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-4 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-7 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-10 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-13 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-16 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-19 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-2 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-5 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-11 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-14 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-17 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-20 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-23 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-26 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-29 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-32 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-35 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 3 for partition __consumer_offsets-38 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-29 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-10 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-26 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-7 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-4 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-23 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-1 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-20 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-17 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-14 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-49 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-11 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-46 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-8 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-43 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-5 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,536] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-2 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-40 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-37 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-34 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-31 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-47 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-19 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-28 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-38 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-35 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-44 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-25 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-16 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-22 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-41 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-32 with leader 1 (state.change.logger)
[2021-08-25 01:23:15,537] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 3 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-13 with leader 0 (state.change.logger)
[2021-08-25 01:23:15,538] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-0 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-3 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-6 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-9 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-12 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-15 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-18 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-21 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-24 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-27 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-30 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-33 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-36 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-39 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-42 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-45 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-48 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,539] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-22 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-25 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-28 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-31 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-34 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-37 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-40 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-43 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-46 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-49 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-41 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-44 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-47 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-1 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-4 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-7 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-10 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-13 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-16 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-19 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-2 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-5 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-8 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-11 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-14 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-17 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-20 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-23 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-26 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-29 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-32 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-35 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,540] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-38 (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,546] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-13 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,547] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-46 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,547] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-9 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,547] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-42 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,547] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-21 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,547] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-17 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,547] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-30 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,547] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-26 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,547] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-5 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,547] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-38 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,547] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,547] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-34 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,547] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-16 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,547] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-45 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,547] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-12 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-41 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-24 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-20 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-49 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-29 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-25 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-8 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-37 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-4 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-33 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-15 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-48 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-11 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-44 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-23 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-19 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-32 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-28 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-7 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-40 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-3 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-36 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-47 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-14 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-43 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-10 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-22 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-18 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-31 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,548] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-27 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,549] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-39 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,549] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-6 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,549] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-35 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,549] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 4 (state.change.logger)
[2021-08-25 01:23:15,551] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-0 in 12 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,552] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-3 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,552] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-6 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,552] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-9 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,552] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-12 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,552] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-15 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,552] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-18 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,552] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-21 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,552] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-24 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,552] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-27 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,553] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-30 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,553] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-33 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,553] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-36 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,553] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-39 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,553] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-42 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,553] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-45 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,553] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-48 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,554] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-22. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-25. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-28. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-31. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-34. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-37. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-40. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-43. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-46. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-49. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-41. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-44. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-47. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-1. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-4. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-7. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-10. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-13. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-16. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-19. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-2. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-5. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-8. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-11. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-14. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-17. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-20. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-23. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-26. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-29. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-32. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-35. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,555] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-38. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:23:15,637] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-17 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,637] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,637] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-32 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,638] INFO [Log partition=__consumer_offsets-32, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,638] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-47 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,638] INFO [Log partition=__consumer_offsets-47, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,638] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-14 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,638] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,638] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-44 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,638] INFO [Log partition=__consumer_offsets-44, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,638] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-29 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,638] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,638] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-11 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,638] INFO [Log partition=__consumer_offsets-11, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,638] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-41 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,638] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,638] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-26 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,638] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,638] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-23 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,639] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,639] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-8 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,639] INFO [Log partition=__consumer_offsets-8, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,639] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-38 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,639] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,639] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-20 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,639] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,639] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-5 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,639] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,639] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-35 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,639] INFO [Log partition=__consumer_offsets-35, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,639] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,639] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,794] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-28 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,795] INFO [Log partition=__consumer_offsets-28, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,795] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-43 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,795] INFO [Log partition=__consumer_offsets-43, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,795] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-10 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,795] INFO [Log partition=__consumer_offsets-10, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,795] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-25 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,795] INFO [Log partition=__consumer_offsets-25, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,796] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-7 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,796] INFO [Log partition=__consumer_offsets-7, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,796] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-37 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,796] INFO [Log partition=__consumer_offsets-37, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,796] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-40 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,796] INFO [Log partition=__consumer_offsets-40, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,796] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-22 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,796] INFO [Log partition=__consumer_offsets-22, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,796] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-4 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,796] INFO [Log partition=__consumer_offsets-4, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,796] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-34 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,796] INFO [Log partition=__consumer_offsets-34, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,796] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-19 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,797] INFO [Log partition=__consumer_offsets-19, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,797] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-49 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,797] INFO [Log partition=__consumer_offsets-49, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,797] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-16 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,797] INFO [Log partition=__consumer_offsets-16, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,797] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-1 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,797] INFO [Log partition=__consumer_offsets-1, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,797] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-31 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,797] INFO [Log partition=__consumer_offsets-31, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,797] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-46 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,797] INFO [Log partition=__consumer_offsets-46, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:15,797] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-13 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:23:15,797] INFO [Log partition=__consumer_offsets-13, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:23:16,154] ERROR [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Error for partition __consumer_offsets-8 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:23:16,158] ERROR [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Error for partition __consumer_offsets-35 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:23:16,159] ERROR [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Error for partition __consumer_offsets-41 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:23:16,159] ERROR [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Error for partition __consumer_offsets-23 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:23:16,159] ERROR [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Error for partition __consumer_offsets-47 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:23:16,159] ERROR [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Error for partition __consumer_offsets-38 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:23:16,160] ERROR [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Error for partition __consumer_offsets-17 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:23:16,160] ERROR [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Error for partition __consumer_offsets-11 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:23:16,160] ERROR [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Error for partition __consumer_offsets-2 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:23:16,160] ERROR [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Error for partition __consumer_offsets-14 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:23:16,160] ERROR [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Error for partition __consumer_offsets-20 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:23:16,160] ERROR [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Error for partition __consumer_offsets-44 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:23:16,160] ERROR [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Error for partition __consumer_offsets-5 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:23:16,160] ERROR [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Error for partition __consumer_offsets-26 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:23:16,160] ERROR [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Error for partition __consumer_offsets-29 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:23:16,161] ERROR [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Error for partition __consumer_offsets-32 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:23:19,035] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:19,035] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:19,041] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:19,041] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:22,252] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:22,252] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:26,726] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:26,726] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:27,446] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2, zkVersion=0, replicas=2, isNew=true) correlation id 5 from controller 1 epoch 8 for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (state.change.logger)
[2021-08-25 01:23:27,448] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 5 from controller 1 epoch 8 starting the become-leader transition for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (state.change.logger)
[2021-08-25 01:23:27,448] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(SDC-DISTR-NOTIF-TOPIC-AUTO-0) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:23:27,473] INFO [Log partition=SDC-DISTR-NOTIF-TOPIC-AUTO-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:27,477] INFO [Log partition=SDC-DISTR-NOTIF-TOPIC-AUTO-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 23 ms (kafka.log.Log)
[2021-08-25 01:23:27,478] INFO Created log for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:27,483] INFO [Partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 broker=2] No checkpointed highwatermark is found for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (kafka.cluster.Partition)
[2021-08-25 01:23:27,484] INFO Replica loaded for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:27,484] INFO [Partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 broker=2] SDC-DISTR-NOTIF-TOPIC-AUTO-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:27,489] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 5 for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:27,489] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 5 from controller 1 epoch 8 for the become-leader transition for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (state.change.logger)
[2021-08-25 01:23:27,492] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 6 (state.change.logger)
[2021-08-25 01:23:27,565] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2, zkVersion=0, replicas=2, isNew=true) correlation id 7 from controller 1 epoch 8 for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (state.change.logger)
[2021-08-25 01:23:27,566] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 7 from controller 1 epoch 8 starting the become-leader transition for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (state.change.logger)
[2021-08-25 01:23:27,566] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(SDC-DISTR-STATUS-TOPIC-AUTO-0) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:23:27,589] INFO [Log partition=SDC-DISTR-STATUS-TOPIC-AUTO-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:23:27,593] INFO [Log partition=SDC-DISTR-STATUS-TOPIC-AUTO-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 21 ms (kafka.log.Log)
[2021-08-25 01:23:27,594] INFO Created log for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:23:27,597] INFO [Partition SDC-DISTR-STATUS-TOPIC-AUTO-0 broker=2] No checkpointed highwatermark is found for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (kafka.cluster.Partition)
[2021-08-25 01:23:27,597] INFO Replica loaded for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:23:27,598] INFO [Partition SDC-DISTR-STATUS-TOPIC-AUTO-0 broker=2] SDC-DISTR-STATUS-TOPIC-AUTO-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:23:27,601] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 7 for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:23:27,601] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 7 from controller 1 epoch 8 for the become-leader transition for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (state.change.logger)
[2021-08-25 01:23:27,605] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 8 (state.change.logger)
[2021-08-25 01:23:28,200] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:28,200] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:29,881] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:29,881] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:31,415] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:31,415] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:45,563] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:45,564] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:47,287] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:47,287] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:50,438] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:50,439] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:53,228] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:53,228] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:54,346] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:54,346] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:54,375] INFO [GroupCoordinator 2]: Preparing to rebalance group SO-OpenSource-Env11--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-0) (reason: Adding new member SO-COpenSource-Env11-343967ee-ce2a-4f2c-b1d1-9660bc136c7c with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:23:57,388] INFO [GroupCoordinator 2]: Stabilized group SO-OpenSource-Env11--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-0) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:23:57,399] INFO [GroupCoordinator 2]: Assignment received from leader for group SO-OpenSource-Env11--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:23:57,462] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:57,462] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:57,554] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:57,554] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:58,267] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:58,267] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:23:58,272] INFO [GroupCoordinator 2]: Preparing to rebalance group aai-ml-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-27) (reason: Adding new member aai-ml-a9322ec2-0f6f-4843-819e-c73e63e63169 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:24:01,274] INFO [GroupCoordinator 2]: Stabilized group aai-ml-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-27) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:24:01,277] INFO [GroupCoordinator 2]: Assignment received from leader for group aai-ml-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:24:01,403] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:24:01,403] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:24:20,737] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2, zkVersion=0, replicas=2, isNew=true) correlation id 9 from controller 1 epoch 8 for partition org.onap.dmaap.mr.PNF_READY-1 (state.change.logger)
[2021-08-25 01:24:20,739] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 9 from controller 1 epoch 8 starting the become-leader transition for partition org.onap.dmaap.mr.PNF_READY-1 (state.change.logger)
[2021-08-25 01:24:20,739] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(org.onap.dmaap.mr.PNF_READY-1) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:24:20,761] INFO [Log partition=org.onap.dmaap.mr.PNF_READY-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:24:20,765] INFO [Log partition=org.onap.dmaap.mr.PNF_READY-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 21 ms (kafka.log.Log)
[2021-08-25 01:24:20,766] INFO Created log for partition org.onap.dmaap.mr.PNF_READY-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:24:20,771] INFO [Partition org.onap.dmaap.mr.PNF_READY-1 broker=2] No checkpointed highwatermark is found for partition org.onap.dmaap.mr.PNF_READY-1 (kafka.cluster.Partition)
[2021-08-25 01:24:20,771] INFO Replica loaded for partition org.onap.dmaap.mr.PNF_READY-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:24:20,771] INFO [Partition org.onap.dmaap.mr.PNF_READY-1 broker=2] org.onap.dmaap.mr.PNF_READY-1 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:24:20,774] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 9 for partition org.onap.dmaap.mr.PNF_READY-1 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:24:20,774] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 9 from controller 1 epoch 8 for the become-leader transition for partition org.onap.dmaap.mr.PNF_READY-1 (state.change.logger)
[2021-08-25 01:24:20,777] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 10 (state.change.logger)
[2021-08-25 01:24:20,778] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 10 (state.change.logger)
[2021-08-25 01:24:21,834] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2, zkVersion=0, replicas=2, isNew=true) correlation id 11 from controller 1 epoch 8 for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 (state.change.logger)
[2021-08-25 01:24:21,836] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 11 from controller 1 epoch 8 starting the become-leader transition for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 (state.change.logger)
[2021-08-25 01:24:21,836] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(org.onap.dmaap.mr.PNF_REGISTRATION-0) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:24:21,855] INFO [Log partition=org.onap.dmaap.mr.PNF_REGISTRATION-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:24:21,860] INFO [Log partition=org.onap.dmaap.mr.PNF_REGISTRATION-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 20 ms (kafka.log.Log)
[2021-08-25 01:24:21,861] INFO Created log for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:24:21,866] INFO [Partition org.onap.dmaap.mr.PNF_REGISTRATION-0 broker=2] No checkpointed highwatermark is found for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 (kafka.cluster.Partition)
[2021-08-25 01:24:21,866] INFO Replica loaded for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:24:21,866] INFO [Partition org.onap.dmaap.mr.PNF_REGISTRATION-0 broker=2] org.onap.dmaap.mr.PNF_REGISTRATION-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:24:21,869] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 11 for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:24:21,869] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 11 from controller 1 epoch 8 for the become-leader transition for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 (state.change.logger)
[2021-08-25 01:24:21,872] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 12 (state.change.logger)
[2021-08-25 01:24:21,873] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 12 (state.change.logger)
[2021-08-25 01:24:23,161] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.mirrormakeragent-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 13 (state.change.logger)
[2021-08-25 01:24:26,190] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:24:26,190] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:24:31,335] INFO Creating topic POLICY-NOTIFICATION with configuration {} and initial partition assignment Map(2 -> ArrayBuffer(1, 2, 0), 1 -> ArrayBuffer(0, 1, 2), 0 -> ArrayBuffer(2, 0, 1)) (kafka.zk.AdminZkClient)
[2021-08-25 01:24:31,350] INFO [KafkaApi-2] Auto creation of topic POLICY-NOTIFICATION with 3 partitions and replication factor 3 is successful (kafka.server.KafkaApis)
[2021-08-25 01:24:31,373] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 14 from controller 1 epoch 8 for partition POLICY-NOTIFICATION-0 (state.change.logger)
[2021-08-25 01:24:31,373] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 14 from controller 1 epoch 8 for partition POLICY-NOTIFICATION-1 (state.change.logger)
[2021-08-25 01:24:31,373] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 14 from controller 1 epoch 8 for partition POLICY-NOTIFICATION-2 (state.change.logger)
[2021-08-25 01:24:31,376] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 14 from controller 1 epoch 8 starting the become-leader transition for partition POLICY-NOTIFICATION-0 (state.change.logger)
[2021-08-25 01:24:31,376] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(POLICY-NOTIFICATION-0) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:24:31,392] INFO [Log partition=POLICY-NOTIFICATION-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:24:31,395] INFO [Log partition=POLICY-NOTIFICATION-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log)
[2021-08-25 01:24:31,396] INFO Created log for partition POLICY-NOTIFICATION-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:24:31,399] INFO [Partition POLICY-NOTIFICATION-0 broker=2] No checkpointed highwatermark is found for partition POLICY-NOTIFICATION-0 (kafka.cluster.Partition)
[2021-08-25 01:24:31,399] INFO Replica loaded for partition POLICY-NOTIFICATION-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:24:31,399] INFO Replica loaded for partition POLICY-NOTIFICATION-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:24:31,399] INFO Replica loaded for partition POLICY-NOTIFICATION-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:24:31,399] INFO [Partition POLICY-NOTIFICATION-0 broker=2] POLICY-NOTIFICATION-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:24:31,401] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 14 for partition POLICY-NOTIFICATION-0 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:24:31,401] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 14 from controller 1 epoch 8 for the become-leader transition for partition POLICY-NOTIFICATION-0 (state.change.logger)
[2021-08-25 01:24:31,401] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 14 from controller 1 epoch 8 starting the become-follower transition for partition POLICY-NOTIFICATION-1 with leader 0 (state.change.logger)
[2021-08-25 01:24:31,401] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 14 from controller 1 epoch 8 starting the become-follower transition for partition POLICY-NOTIFICATION-2 with leader 1 (state.change.logger)
[2021-08-25 01:24:31,401] INFO Replica loaded for partition POLICY-NOTIFICATION-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:24:31,401] INFO Replica loaded for partition POLICY-NOTIFICATION-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:24:31,415] INFO [Log partition=POLICY-NOTIFICATION-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:24:31,418] INFO [Log partition=POLICY-NOTIFICATION-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log)
[2021-08-25 01:24:31,418] INFO Created log for partition POLICY-NOTIFICATION-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:24:31,420] INFO [Partition POLICY-NOTIFICATION-1 broker=2] No checkpointed highwatermark is found for partition POLICY-NOTIFICATION-1 (kafka.cluster.Partition)
[2021-08-25 01:24:31,420] INFO Replica loaded for partition POLICY-NOTIFICATION-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:24:31,420] INFO Replica loaded for partition POLICY-NOTIFICATION-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:24:31,433] INFO [Log partition=POLICY-NOTIFICATION-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:24:31,436] INFO [Log partition=POLICY-NOTIFICATION-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log)
[2021-08-25 01:24:31,437] INFO Created log for partition POLICY-NOTIFICATION-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:24:31,438] INFO [Partition POLICY-NOTIFICATION-2 broker=2] No checkpointed highwatermark is found for partition POLICY-NOTIFICATION-2 (kafka.cluster.Partition)
[2021-08-25 01:24:31,438] INFO Replica loaded for partition POLICY-NOTIFICATION-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:24:31,438] INFO Replica loaded for partition POLICY-NOTIFICATION-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:24:31,438] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(POLICY-NOTIFICATION-2, POLICY-NOTIFICATION-1) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:24:31,438] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 14 for partition POLICY-NOTIFICATION-2 with leader 1 (state.change.logger)
[2021-08-25 01:24:31,438] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 14 for partition POLICY-NOTIFICATION-1 with leader 0 (state.change.logger)
[2021-08-25 01:24:31,438] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition POLICY-NOTIFICATION-2 as part of become-follower request with correlation id 14 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:24:31,438] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition POLICY-NOTIFICATION-1 as part of become-follower request with correlation id 14 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:24:31,438] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-NOTIFICATION-2 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:24:31,439] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-NOTIFICATION-1 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:24:31,439] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 14 for partition POLICY-NOTIFICATION-2 with leader 1 (state.change.logger)
[2021-08-25 01:24:31,439] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 14 for partition POLICY-NOTIFICATION-1 with leader 0 (state.change.logger)
[2021-08-25 01:24:31,439] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 14 from controller 1 epoch 8 for the become-follower transition for partition POLICY-NOTIFICATION-1 with leader 0 (state.change.logger)
[2021-08-25 01:24:31,439] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 14 from controller 1 epoch 8 for the become-follower transition for partition POLICY-NOTIFICATION-2 with leader 1 (state.change.logger)
[2021-08-25 01:24:31,442] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition POLICY-NOTIFICATION-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 15 (state.change.logger)
[2021-08-25 01:24:31,443] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition POLICY-NOTIFICATION-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 15 (state.change.logger)
[2021-08-25 01:24:31,443] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition POLICY-NOTIFICATION-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 15 (state.change.logger)
[2021-08-25 01:24:31,826] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition POLICY-NOTIFICATION-1 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:24:31,827] INFO [Log partition=POLICY-NOTIFICATION-1, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:24:31,829] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition POLICY-NOTIFICATION-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:24:31,829] INFO [Log partition=POLICY-NOTIFICATION-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:25:10,939] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 3 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:25:14,963] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:25:14,963] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:25:18,117] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:25:18,117] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:25:40,059] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:25:40,060] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:25:40,066] INFO [GroupCoordinator 2]: Preparing to rebalance group dcae--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-3) (reason: Adding new member dcae-sch-2a301e03-0dc4-41ed-bd36-51bb3e9e013c with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:25:43,068] INFO [GroupCoordinator 2]: Stabilized group dcae--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-3) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:25:43,071] INFO [GroupCoordinator 2]: Assignment received from leader for group dcae--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:25:43,198] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:25:43,198] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:29:12,786] INFO Creating topic AAI-EVENT with configuration {} and initial partition assignment Map(2 -> ArrayBuffer(2, 0, 1), 1 -> ArrayBuffer(1, 2, 0), 0 -> ArrayBuffer(0, 1, 2)) (kafka.zk.AdminZkClient)
[2021-08-25 01:29:12,797] INFO [KafkaApi-2] Auto creation of topic AAI-EVENT with 3 partitions and replication factor 3 is successful (kafka.server.KafkaApis)
[2021-08-25 01:29:12,820] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 16 from controller 1 epoch 8 for partition AAI-EVENT-1 (state.change.logger)
[2021-08-25 01:29:12,820] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 16 from controller 1 epoch 8 for partition AAI-EVENT-0 (state.change.logger)
[2021-08-25 01:29:12,820] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 16 from controller 1 epoch 8 for partition AAI-EVENT-2 (state.change.logger)
[2021-08-25 01:29:12,821] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 16 from controller 1 epoch 8 starting the become-leader transition for partition AAI-EVENT-2 (state.change.logger)
[2021-08-25 01:29:12,821] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(AAI-EVENT-2) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:29:12,832] INFO [Log partition=AAI-EVENT-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:29:12,835] INFO [Log partition=AAI-EVENT-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 11 ms (kafka.log.Log)
[2021-08-25 01:29:12,835] INFO Created log for partition AAI-EVENT-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:29:12,837] INFO [Partition AAI-EVENT-2 broker=2] No checkpointed highwatermark is found for partition AAI-EVENT-2 (kafka.cluster.Partition)
[2021-08-25 01:29:12,837] INFO Replica loaded for partition AAI-EVENT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:29:12,837] INFO Replica loaded for partition AAI-EVENT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:29:12,837] INFO Replica loaded for partition AAI-EVENT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:29:12,837] INFO [Partition AAI-EVENT-2 broker=2] AAI-EVENT-2 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:29:12,840] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 16 for partition AAI-EVENT-2 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:29:12,840] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 16 from controller 1 epoch 8 for the become-leader transition for partition AAI-EVENT-2 (state.change.logger)
[2021-08-25 01:29:12,840] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 16 from controller 1 epoch 8 starting the become-follower transition for partition AAI-EVENT-1 with leader 1 (state.change.logger)
[2021-08-25 01:29:12,840] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 16 from controller 1 epoch 8 starting the become-follower transition for partition AAI-EVENT-0 with leader 0 (state.change.logger)
[2021-08-25 01:29:12,840] INFO Replica loaded for partition AAI-EVENT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:29:12,852] INFO [Log partition=AAI-EVENT-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:29:12,854] INFO [Log partition=AAI-EVENT-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log)
[2021-08-25 01:29:12,855] INFO Created log for partition AAI-EVENT-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:29:12,856] INFO [Partition AAI-EVENT-1 broker=2] No checkpointed highwatermark is found for partition AAI-EVENT-1 (kafka.cluster.Partition)
[2021-08-25 01:29:12,856] INFO Replica loaded for partition AAI-EVENT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:29:12,856] INFO Replica loaded for partition AAI-EVENT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:29:12,856] INFO Replica loaded for partition AAI-EVENT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:29:12,856] INFO Replica loaded for partition AAI-EVENT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:29:12,868] INFO [Log partition=AAI-EVENT-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:29:12,871] INFO [Log partition=AAI-EVENT-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log)
[2021-08-25 01:29:12,871] INFO Created log for partition AAI-EVENT-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:29:12,872] INFO [Partition AAI-EVENT-0 broker=2] No checkpointed highwatermark is found for partition AAI-EVENT-0 (kafka.cluster.Partition)
[2021-08-25 01:29:12,872] INFO Replica loaded for partition AAI-EVENT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:29:12,872] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(AAI-EVENT-1, AAI-EVENT-0) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:29:12,872] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 16 for partition AAI-EVENT-0 with leader 0 (state.change.logger)
[2021-08-25 01:29:12,872] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 16 for partition AAI-EVENT-1 with leader 1 (state.change.logger)
[2021-08-25 01:29:12,872] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition AAI-EVENT-0 as part of become-follower request with correlation id 16 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:29:12,872] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition AAI-EVENT-1 as part of become-follower request with correlation id 16 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:29:12,872] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(AAI-EVENT-1 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:29:12,872] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(AAI-EVENT-0 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:29:12,872] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 16 for partition AAI-EVENT-0 with leader 0 (state.change.logger)
[2021-08-25 01:29:12,872] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 16 for partition AAI-EVENT-1 with leader 1 (state.change.logger)
[2021-08-25 01:29:12,872] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 16 from controller 1 epoch 8 for the become-follower transition for partition AAI-EVENT-1 with leader 1 (state.change.logger)
[2021-08-25 01:29:12,872] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 16 from controller 1 epoch 8 for the become-follower transition for partition AAI-EVENT-0 with leader 0 (state.change.logger)
[2021-08-25 01:29:12,875] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition AAI-EVENT-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 17 (state.change.logger)
[2021-08-25 01:29:12,875] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition AAI-EVENT-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 17 (state.change.logger)
[2021-08-25 01:29:12,875] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition AAI-EVENT-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 17 (state.change.logger)
[2021-08-25 01:29:12,956] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition AAI-EVENT-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:29:12,956] INFO [Log partition=AAI-EVENT-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:29:12,966] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition AAI-EVENT-1 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:29:12,966] INFO [Log partition=AAI-EVENT-1, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:29:14,706] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:29:14,707] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:29:14,713] INFO [GroupCoordinator 2]: Preparing to rebalance group NBICG1--AAI-EVENT in state PreparingRebalance with old generation 0 (__consumer_offsets-6) (reason: Adding new member NBIC1-fdc3fe17-9d1b-4c63-bcf3-71ea59634493 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:29:17,715] INFO [GroupCoordinator 2]: Stabilized group NBICG1--AAI-EVENT generation 1 (__consumer_offsets-6) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:29:17,717] INFO [GroupCoordinator 2]: Assignment received from leader for group NBICG1--AAI-EVENT for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:29:17,935] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:29:17,935] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:29:18,477] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:29:18,477] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:29:21,627] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:29:21,628] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:30:37,893] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:30:37,894] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:30:37,898] INFO [GroupCoordinator 2]: Preparing to rebalance group cds--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-18) (reason: Adding new member cds-311e51f8-4bd5-4bc1-b8c2-b8f6cc4fd8e6 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:30:40,899] INFO [GroupCoordinator 2]: Stabilized group cds--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-18) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:30:40,902] INFO [GroupCoordinator 2]: Assignment received from leader for group cds--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:30:41,022] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:30:41,023] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:30:50,013] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:30:50,013] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:31:19,299] INFO [GroupCoordinator 2]: Member NBIC1-fdc3fe17-9d1b-4c63-bcf3-71ea59634493 in group NBICG1--AAI-EVENT has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:31:19,302] INFO [GroupCoordinator 2]: Preparing to rebalance group NBICG1--AAI-EVENT in state PreparingRebalance with old generation 1 (__consumer_offsets-6) (reason: removing member NBIC1-fdc3fe17-9d1b-4c63-bcf3-71ea59634493 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:31:19,306] INFO [GroupCoordinator 2]: Group NBICG1--AAI-EVENT with generation 2 is now empty (__consumer_offsets-6) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:31:29,873] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:31:29,873] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:31:29,879] INFO [GroupCoordinator 2]: Preparing to rebalance group NBICG1--AAI-EVENT in state PreparingRebalance with old generation 2 (__consumer_offsets-6) (reason: Adding new member NBIC1-03a43ce6-280a-4368-a3db-4774d87d5f04 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:31:32,880] INFO [GroupCoordinator 2]: Stabilized group NBICG1--AAI-EVENT generation 3 (__consumer_offsets-6) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:31:32,883] INFO [GroupCoordinator 2]: Assignment received from leader for group NBICG1--AAI-EVENT for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:31:32,903] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:31:32,903] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:33:37,292] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:33:37,292] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:35:10,936] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:45:10,936] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:52:08,955] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:52:08,955] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:52:08,988] INFO Creating topic HV_VES_PERF3GPP_SSL with configuration {} and initial partition assignment Map(2 -> ArrayBuffer(0, 1, 2), 1 -> ArrayBuffer(2, 0, 1), 0 -> ArrayBuffer(1, 2, 0)) (kafka.zk.AdminZkClient)
[2021-08-25 01:52:08,999] INFO [KafkaApi-2] Auto creation of topic HV_VES_PERF3GPP_SSL with 3 partitions and replication factor 3 is successful (kafka.server.KafkaApis)
[2021-08-25 01:52:09,040] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:52:09,040] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:52:09,041] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 18 from controller 1 epoch 8 for partition HV_VES_PERF3GPP_SSL-2 (state.change.logger)
[2021-08-25 01:52:09,042] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 18 from controller 1 epoch 8 for partition HV_VES_PERF3GPP_SSL-0 (state.change.logger)
[2021-08-25 01:52:09,042] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 18 from controller 1 epoch 8 for partition HV_VES_PERF3GPP_SSL-1 (state.change.logger)
[2021-08-25 01:52:09,044] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 18 from controller 1 epoch 8 starting the become-leader transition for partition HV_VES_PERF3GPP_SSL-1 (state.change.logger)
[2021-08-25 01:52:09,044] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(HV_VES_PERF3GPP_SSL-1) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:52:09,060] INFO [Log partition=HV_VES_PERF3GPP_SSL-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:52:09,063] INFO [Log partition=HV_VES_PERF3GPP_SSL-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log)
[2021-08-25 01:52:09,064] INFO Created log for partition HV_VES_PERF3GPP_SSL-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:52:09,066] INFO [Partition HV_VES_PERF3GPP_SSL-1 broker=2] No checkpointed highwatermark is found for partition HV_VES_PERF3GPP_SSL-1 (kafka.cluster.Partition)
[2021-08-25 01:52:09,067] INFO Replica loaded for partition HV_VES_PERF3GPP_SSL-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:52:09,067] INFO Replica loaded for partition HV_VES_PERF3GPP_SSL-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:52:09,067] INFO Replica loaded for partition HV_VES_PERF3GPP_SSL-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:52:09,067] INFO [Partition HV_VES_PERF3GPP_SSL-1 broker=2] HV_VES_PERF3GPP_SSL-1 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:52:09,070] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 18 for partition HV_VES_PERF3GPP_SSL-1 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:52:09,070] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 18 from controller 1 epoch 8 for the become-leader transition for partition HV_VES_PERF3GPP_SSL-1 (state.change.logger)
[2021-08-25 01:52:09,070] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 18 from controller 1 epoch 8 starting the become-follower transition for partition HV_VES_PERF3GPP_SSL-2 with leader 0 (state.change.logger)
[2021-08-25 01:52:09,070] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 18 from controller 1 epoch 8 starting the become-follower transition for partition HV_VES_PERF3GPP_SSL-0 with leader 1 (state.change.logger)
[2021-08-25 01:52:09,070] INFO Replica loaded for partition HV_VES_PERF3GPP_SSL-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:52:09,070] INFO Replica loaded for partition HV_VES_PERF3GPP_SSL-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:52:09,083] INFO [Log partition=HV_VES_PERF3GPP_SSL-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:52:09,086] INFO [Log partition=HV_VES_PERF3GPP_SSL-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log)
[2021-08-25 01:52:09,087] INFO Created log for partition HV_VES_PERF3GPP_SSL-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:52:09,088] INFO [Partition HV_VES_PERF3GPP_SSL-2 broker=2] No checkpointed highwatermark is found for partition HV_VES_PERF3GPP_SSL-2 (kafka.cluster.Partition)
[2021-08-25 01:52:09,088] INFO Replica loaded for partition HV_VES_PERF3GPP_SSL-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:52:09,088] INFO Replica loaded for partition HV_VES_PERF3GPP_SSL-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:52:09,101] INFO [Log partition=HV_VES_PERF3GPP_SSL-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:52:09,104] INFO [Log partition=HV_VES_PERF3GPP_SSL-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log)
[2021-08-25 01:52:09,104] INFO Created log for partition HV_VES_PERF3GPP_SSL-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:52:09,105] INFO [Partition HV_VES_PERF3GPP_SSL-0 broker=2] No checkpointed highwatermark is found for partition HV_VES_PERF3GPP_SSL-0 (kafka.cluster.Partition)
[2021-08-25 01:52:09,105] INFO Replica loaded for partition HV_VES_PERF3GPP_SSL-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:52:09,105] INFO Replica loaded for partition HV_VES_PERF3GPP_SSL-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:52:09,105] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(HV_VES_PERF3GPP_SSL-2, HV_VES_PERF3GPP_SSL-0) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:52:09,105] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 18 for partition HV_VES_PERF3GPP_SSL-2 with leader 0 (state.change.logger)
[2021-08-25 01:52:09,105] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 18 for partition HV_VES_PERF3GPP_SSL-0 with leader 1 (state.change.logger)
[2021-08-25 01:52:09,105] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition HV_VES_PERF3GPP_SSL-2 as part of become-follower request with correlation id 18 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:52:09,105] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition HV_VES_PERF3GPP_SSL-0 as part of become-follower request with correlation id 18 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:52:09,106] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(HV_VES_PERF3GPP_SSL-0 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:52:09,106] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(HV_VES_PERF3GPP_SSL-2 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:52:09,106] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 18 for partition HV_VES_PERF3GPP_SSL-2 with leader 0 (state.change.logger)
[2021-08-25 01:52:09,106] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 18 for partition HV_VES_PERF3GPP_SSL-0 with leader 1 (state.change.logger)
[2021-08-25 01:52:09,106] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 18 from controller 1 epoch 8 for the become-follower transition for partition HV_VES_PERF3GPP_SSL-2 with leader 0 (state.change.logger)
[2021-08-25 01:52:09,106] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 18 from controller 1 epoch 8 for the become-follower transition for partition HV_VES_PERF3GPP_SSL-0 with leader 1 (state.change.logger)
[2021-08-25 01:52:09,109] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:52:09,110] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:52:09,113] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition HV_VES_PERF3GPP_SSL-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 19 (state.change.logger)
[2021-08-25 01:52:09,113] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition HV_VES_PERF3GPP_SSL-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 19 (state.change.logger)
[2021-08-25 01:52:09,113] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition HV_VES_PERF3GPP_SSL-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 19 (state.change.logger)
[2021-08-25 01:52:09,122] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition HV_VES_PERF3GPP_SSL-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:52:09,122] INFO [Log partition=HV_VES_PERF3GPP_SSL-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:52:09,265] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition HV_VES_PERF3GPP_SSL-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:52:09,265] INFO [Log partition=HV_VES_PERF3GPP_SSL-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:52:09,626] ERROR [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Error for partition HV_VES_PERF3GPP_SSL-2 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-08-25 01:52:13,998] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:52:13,998] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:52:20,887] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:52:20,887] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:53:22,614] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 20 from controller 1 epoch 8 for partition TEST_TOPIC-2 (state.change.logger)
[2021-08-25 01:53:22,614] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 20 from controller 1 epoch 8 for partition TEST_TOPIC-0 (state.change.logger)
[2021-08-25 01:53:22,614] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 20 from controller 1 epoch 8 for partition TEST_TOPIC-1 (state.change.logger)
[2021-08-25 01:53:22,616] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 20 from controller 1 epoch 8 starting the become-leader transition for partition TEST_TOPIC-1 (state.change.logger)
[2021-08-25 01:53:22,616] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(TEST_TOPIC-1) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:53:22,633] INFO [Log partition=TEST_TOPIC-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:53:22,636] INFO [Log partition=TEST_TOPIC-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log)
[2021-08-25 01:53:22,637] INFO Created log for partition TEST_TOPIC-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:53:22,640] INFO [Partition TEST_TOPIC-1 broker=2] No checkpointed highwatermark is found for partition TEST_TOPIC-1 (kafka.cluster.Partition)
[2021-08-25 01:53:22,640] INFO Replica loaded for partition TEST_TOPIC-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:22,640] INFO Replica loaded for partition TEST_TOPIC-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:22,640] INFO Replica loaded for partition TEST_TOPIC-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:22,640] INFO [Partition TEST_TOPIC-1 broker=2] TEST_TOPIC-1 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:53:22,643] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 20 for partition TEST_TOPIC-1 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:53:22,643] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 20 from controller 1 epoch 8 for the become-leader transition for partition TEST_TOPIC-1 (state.change.logger)
[2021-08-25 01:53:22,643] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 20 from controller 1 epoch 8 starting the become-follower transition for partition TEST_TOPIC-0 with leader 1 (state.change.logger)
[2021-08-25 01:53:22,643] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 20 from controller 1 epoch 8 starting the become-follower transition for partition TEST_TOPIC-2 with leader 0 (state.change.logger)
[2021-08-25 01:53:22,643] INFO Replica loaded for partition TEST_TOPIC-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:22,643] INFO Replica loaded for partition TEST_TOPIC-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:22,657] INFO [Log partition=TEST_TOPIC-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:53:22,660] INFO [Log partition=TEST_TOPIC-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log)
[2021-08-25 01:53:22,660] INFO Created log for partition TEST_TOPIC-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:53:22,662] INFO [Partition TEST_TOPIC-0 broker=2] No checkpointed highwatermark is found for partition TEST_TOPIC-0 (kafka.cluster.Partition)
[2021-08-25 01:53:22,662] INFO Replica loaded for partition TEST_TOPIC-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:22,662] INFO Replica loaded for partition TEST_TOPIC-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:22,676] INFO [Log partition=TEST_TOPIC-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:53:22,680] INFO [Log partition=TEST_TOPIC-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log)
[2021-08-25 01:53:22,680] INFO Created log for partition TEST_TOPIC-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:53:22,681] INFO [Partition TEST_TOPIC-2 broker=2] No checkpointed highwatermark is found for partition TEST_TOPIC-2 (kafka.cluster.Partition)
[2021-08-25 01:53:22,681] INFO Replica loaded for partition TEST_TOPIC-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:22,682] INFO Replica loaded for partition TEST_TOPIC-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:22,682] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(TEST_TOPIC-0, TEST_TOPIC-2) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:53:22,682] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 20 for partition TEST_TOPIC-0 with leader 1 (state.change.logger)
[2021-08-25 01:53:22,682] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 20 for partition TEST_TOPIC-2 with leader 0 (state.change.logger)
[2021-08-25 01:53:22,682] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition TEST_TOPIC-0 as part of become-follower request with correlation id 20 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:53:22,682] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition TEST_TOPIC-2 as part of become-follower request with correlation id 20 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:53:22,682] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(TEST_TOPIC-2 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:53:22,683] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(TEST_TOPIC-0 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:53:22,683] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 20 for partition TEST_TOPIC-0 with leader 1 (state.change.logger)
[2021-08-25 01:53:22,683] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 20 for partition TEST_TOPIC-2 with leader 0 (state.change.logger)
[2021-08-25 01:53:22,683] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 20 from controller 1 epoch 8 for the become-follower transition for partition TEST_TOPIC-0 with leader 1 (state.change.logger)
[2021-08-25 01:53:22,683] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 20 from controller 1 epoch 8 for the become-follower transition for partition TEST_TOPIC-2 with leader 0 (state.change.logger)
[2021-08-25 01:53:22,686] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition TEST_TOPIC-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 21 (state.change.logger)
[2021-08-25 01:53:22,686] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition TEST_TOPIC-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 21 (state.change.logger)
[2021-08-25 01:53:22,686] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition TEST_TOPIC-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 21 (state.change.logger)
[2021-08-25 01:53:22,818] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition TEST_TOPIC-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:53:22,819] INFO [Log partition=TEST_TOPIC-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:53:22,820] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition TEST_TOPIC-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:53:22,820] INFO [Log partition=TEST_TOPIC-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:53:23,422] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:53:23,422] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:53:23,429] INFO [GroupCoordinator 2]: Preparing to rebalance group g1--TEST_TOPIC in state PreparingRebalance with old generation 0 (__consumer_offsets-24) (reason: Adding new member c4-89e1678a-b9d0-4a0a-9f8c-be33b56cbba5 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:53:26,431] INFO [GroupCoordinator 2]: Stabilized group g1--TEST_TOPIC generation 1 (__consumer_offsets-24) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:53:26,434] INFO [GroupCoordinator 2]: Assignment received from leader for group g1--TEST_TOPIC for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:53:26,558] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:53:26,558] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:53:27,398] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 22 from controller 1 epoch 8 for partition unauthenticated.SEC_FAULT_OUTPUT-2 (state.change.logger)
[2021-08-25 01:53:27,398] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 22 from controller 1 epoch 8 for partition unauthenticated.SEC_FAULT_OUTPUT-1 (state.change.logger)
[2021-08-25 01:53:27,398] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 22 from controller 1 epoch 8 for partition unauthenticated.SEC_FAULT_OUTPUT-0 (state.change.logger)
[2021-08-25 01:53:27,401] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 22 from controller 1 epoch 8 starting the become-leader transition for partition unauthenticated.SEC_FAULT_OUTPUT-0 (state.change.logger)
[2021-08-25 01:53:27,401] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(unauthenticated.SEC_FAULT_OUTPUT-0) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:53:27,420] INFO [Log partition=unauthenticated.SEC_FAULT_OUTPUT-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:53:27,423] INFO [Log partition=unauthenticated.SEC_FAULT_OUTPUT-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 17 ms (kafka.log.Log)
[2021-08-25 01:53:27,424] INFO Created log for partition unauthenticated.SEC_FAULT_OUTPUT-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:53:27,428] INFO [Partition unauthenticated.SEC_FAULT_OUTPUT-0 broker=2] No checkpointed highwatermark is found for partition unauthenticated.SEC_FAULT_OUTPUT-0 (kafka.cluster.Partition)
[2021-08-25 01:53:27,428] INFO Replica loaded for partition unauthenticated.SEC_FAULT_OUTPUT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:27,428] INFO Replica loaded for partition unauthenticated.SEC_FAULT_OUTPUT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:27,428] INFO Replica loaded for partition unauthenticated.SEC_FAULT_OUTPUT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:27,428] INFO [Partition unauthenticated.SEC_FAULT_OUTPUT-0 broker=2] unauthenticated.SEC_FAULT_OUTPUT-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:53:27,433] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 22 for partition unauthenticated.SEC_FAULT_OUTPUT-0 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:53:27,433] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 22 from controller 1 epoch 8 for the become-leader transition for partition unauthenticated.SEC_FAULT_OUTPUT-0 (state.change.logger)
[2021-08-25 01:53:27,433] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 22 from controller 1 epoch 8 starting the become-follower transition for partition unauthenticated.SEC_FAULT_OUTPUT-1 with leader 0 (state.change.logger)
[2021-08-25 01:53:27,433] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 22 from controller 1 epoch 8 starting the become-follower transition for partition unauthenticated.SEC_FAULT_OUTPUT-2 with leader 1 (state.change.logger)
[2021-08-25 01:53:27,433] INFO Replica loaded for partition unauthenticated.SEC_FAULT_OUTPUT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:27,433] INFO Replica loaded for partition unauthenticated.SEC_FAULT_OUTPUT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:27,453] INFO [Log partition=unauthenticated.SEC_FAULT_OUTPUT-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:53:27,456] INFO [Log partition=unauthenticated.SEC_FAULT_OUTPUT-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 19 ms (kafka.log.Log)
[2021-08-25 01:53:27,457] INFO Created log for partition unauthenticated.SEC_FAULT_OUTPUT-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:53:27,459] INFO [Partition unauthenticated.SEC_FAULT_OUTPUT-1 broker=2] No checkpointed highwatermark is found for partition unauthenticated.SEC_FAULT_OUTPUT-1 (kafka.cluster.Partition)
[2021-08-25 01:53:27,459] INFO Replica loaded for partition unauthenticated.SEC_FAULT_OUTPUT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:27,459] INFO Replica loaded for partition unauthenticated.SEC_FAULT_OUTPUT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:27,475] INFO [Log partition=unauthenticated.SEC_FAULT_OUTPUT-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:53:27,479] INFO [Log partition=unauthenticated.SEC_FAULT_OUTPUT-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log)
[2021-08-25 01:53:27,480] INFO Created log for partition unauthenticated.SEC_FAULT_OUTPUT-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:53:27,481] INFO [Partition unauthenticated.SEC_FAULT_OUTPUT-2 broker=2] No checkpointed highwatermark is found for partition unauthenticated.SEC_FAULT_OUTPUT-2 (kafka.cluster.Partition)
[2021-08-25 01:53:27,481] INFO Replica loaded for partition unauthenticated.SEC_FAULT_OUTPUT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:27,481] INFO Replica loaded for partition unauthenticated.SEC_FAULT_OUTPUT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:53:27,482] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(unauthenticated.SEC_FAULT_OUTPUT-1, unauthenticated.SEC_FAULT_OUTPUT-2) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:53:27,482] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 22 for partition unauthenticated.SEC_FAULT_OUTPUT-2 with leader 1 (state.change.logger)
[2021-08-25 01:53:27,482] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 22 for partition unauthenticated.SEC_FAULT_OUTPUT-1 with leader 0 (state.change.logger)
[2021-08-25 01:53:27,482] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition unauthenticated.SEC_FAULT_OUTPUT-2 as part of become-follower request with correlation id 22 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:53:27,482] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition unauthenticated.SEC_FAULT_OUTPUT-1 as part of become-follower request with correlation id 22 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:53:27,482] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(unauthenticated.SEC_FAULT_OUTPUT-1 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:53:27,483] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(unauthenticated.SEC_FAULT_OUTPUT-2 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:53:27,483] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 22 for partition unauthenticated.SEC_FAULT_OUTPUT-2 with leader 1 (state.change.logger)
[2021-08-25 01:53:27,483] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 22 for partition unauthenticated.SEC_FAULT_OUTPUT-1 with leader 0 (state.change.logger)
[2021-08-25 01:53:27,483] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 22 from controller 1 epoch 8 for the become-follower transition for partition unauthenticated.SEC_FAULT_OUTPUT-1 with leader 0 (state.change.logger)
[2021-08-25 01:53:27,483] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 22 from controller 1 epoch 8 for the become-follower transition for partition unauthenticated.SEC_FAULT_OUTPUT-2 with leader 1 (state.change.logger)
[2021-08-25 01:53:27,485] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition unauthenticated.SEC_FAULT_OUTPUT-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 23 (state.change.logger)
[2021-08-25 01:53:27,486] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition unauthenticated.SEC_FAULT_OUTPUT-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 23 (state.change.logger)
[2021-08-25 01:53:27,486] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition unauthenticated.SEC_FAULT_OUTPUT-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 23 (state.change.logger)
[2021-08-25 01:53:27,852] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition unauthenticated.SEC_FAULT_OUTPUT-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:53:27,852] INFO [Log partition=unauthenticated.SEC_FAULT_OUTPUT-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:53:27,970] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition unauthenticated.SEC_FAULT_OUTPUT-1 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:53:27,971] INFO [Log partition=unauthenticated.SEC_FAULT_OUTPUT-1, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:53:31,388] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:53:31,389] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:54:56,037] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:54:56,038] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:54:59,178] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:54:59,178] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:55:10,572] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 24 from controller 1 epoch 8 for partition TEST_TOPIC_ACL-0 (state.change.logger)
[2021-08-25 01:55:10,574] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 24 from controller 1 epoch 8 starting the become-follower transition for partition TEST_TOPIC_ACL-0 with leader 0 (state.change.logger)
[2021-08-25 01:55:10,575] INFO Replica loaded for partition TEST_TOPIC_ACL-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:55:10,575] INFO Replica loaded for partition TEST_TOPIC_ACL-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:55:10,595] INFO [Log partition=TEST_TOPIC_ACL-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:55:10,599] INFO [Log partition=TEST_TOPIC_ACL-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 19 ms (kafka.log.Log)
[2021-08-25 01:55:10,600] INFO Created log for partition TEST_TOPIC_ACL-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:55:10,603] INFO [Partition TEST_TOPIC_ACL-0 broker=2] No checkpointed highwatermark is found for partition TEST_TOPIC_ACL-0 (kafka.cluster.Partition)
[2021-08-25 01:55:10,603] INFO Replica loaded for partition TEST_TOPIC_ACL-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:55:10,604] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(TEST_TOPIC_ACL-0) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:55:10,604] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 24 for partition TEST_TOPIC_ACL-0 with leader 0 (state.change.logger)
[2021-08-25 01:55:10,604] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition TEST_TOPIC_ACL-0 as part of become-follower request with correlation id 24 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:55:10,604] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(TEST_TOPIC_ACL-0 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:55:10,604] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 24 for partition TEST_TOPIC_ACL-0 with leader 0 (state.change.logger)
[2021-08-25 01:55:10,604] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 24 from controller 1 epoch 8 for the become-follower transition for partition TEST_TOPIC_ACL-0 with leader 0 (state.change.logger)
[2021-08-25 01:55:10,607] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition TEST_TOPIC_ACL-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 25 (state.change.logger)
[2021-08-25 01:55:10,769] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition TEST_TOPIC_ACL-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:55:10,770] INFO [Log partition=TEST_TOPIC_ACL-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:55:10,936] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-08-25 01:55:33,534] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:55:33,534] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:55:34,654] INFO [GroupCoordinator 2]: Member c4-89e1678a-b9d0-4a0a-9f8c-be33b56cbba5 in group g1--TEST_TOPIC has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:55:34,655] INFO [GroupCoordinator 2]: Preparing to rebalance group g1--TEST_TOPIC in state PreparingRebalance with old generation 1 (__consumer_offsets-24) (reason: removing member c4-89e1678a-b9d0-4a0a-9f8c-be33b56cbba5 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:55:34,655] INFO [GroupCoordinator 2]: Group g1--TEST_TOPIC with generation 2 is now empty (__consumer_offsets-24) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:55:36,671] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:55:36,671] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:55:37,659] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 26 from controller 1 epoch 8 for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1 (state.change.logger)
[2021-08-25 01:55:37,660] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 26 from controller 1 epoch 8 for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0 (state.change.logger)
[2021-08-25 01:55:37,660] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 26 from controller 1 epoch 8 for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-2 (state.change.logger)
[2021-08-25 01:55:37,662] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 26 from controller 1 epoch 8 starting the become-leader transition for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-2 (state.change.logger)
[2021-08-25 01:55:37,662] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-2) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:55:37,679] INFO [Log partition=unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:55:37,683] INFO [Log partition=unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 17 ms (kafka.log.Log)
[2021-08-25 01:55:37,684] INFO Created log for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:55:37,687] INFO [Partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-2 broker=2] No checkpointed highwatermark is found for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-2 (kafka.cluster.Partition)
[2021-08-25 01:55:37,687] INFO Replica loaded for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:55:37,687] INFO Replica loaded for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:55:37,687] INFO Replica loaded for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:55:37,687] INFO [Partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-2 broker=2] unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-2 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:55:37,690] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 26 for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-2 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:55:37,690] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 26 from controller 1 epoch 8 for the become-leader transition for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-2 (state.change.logger)
[2021-08-25 01:55:37,690] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 26 from controller 1 epoch 8 starting the become-follower transition for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0 with leader 0 (state.change.logger)
[2021-08-25 01:55:37,690] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 26 from controller 1 epoch 8 starting the become-follower transition for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1 with leader 1 (state.change.logger)
[2021-08-25 01:55:37,690] INFO Replica loaded for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:55:37,703] INFO [Log partition=unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:55:37,706] INFO [Log partition=unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log)
[2021-08-25 01:55:37,707] INFO Created log for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:55:37,708] INFO [Partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0 broker=2] No checkpointed highwatermark is found for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0 (kafka.cluster.Partition)
[2021-08-25 01:55:37,708] INFO Replica loaded for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:55:37,708] INFO Replica loaded for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:55:37,708] INFO Replica loaded for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:55:37,708] INFO Replica loaded for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:55:37,723] INFO [Log partition=unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:55:37,726] INFO [Log partition=unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log)
[2021-08-25 01:55:37,727] INFO Created log for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:55:37,728] INFO [Partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1 broker=2] No checkpointed highwatermark is found for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1 (kafka.cluster.Partition)
[2021-08-25 01:55:37,728] INFO Replica loaded for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:55:37,728] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1, unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:55:37,728] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 26 for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1 with leader 1 (state.change.logger)
[2021-08-25 01:55:37,728] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 26 for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0 with leader 0 (state.change.logger)
[2021-08-25 01:55:37,728] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1 as part of become-follower request with correlation id 26 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:55:37,728] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0 as part of become-follower request with correlation id 26 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:55:37,729] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:55:37,729] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:55:37,729] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 26 for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1 with leader 1 (state.change.logger)
[2021-08-25 01:55:37,729] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 26 for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0 with leader 0 (state.change.logger)
[2021-08-25 01:55:37,729] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 26 from controller 1 epoch 8 for the become-follower transition for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0 with leader 0 (state.change.logger)
[2021-08-25 01:55:37,729] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 26 from controller 1 epoch 8 for the become-follower transition for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1 with leader 1 (state.change.logger)
[2021-08-25 01:55:37,732] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 27 (state.change.logger)
[2021-08-25 01:55:37,733] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 27 (state.change.logger)
[2021-08-25 01:55:37,734] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 27 (state.change.logger)
[2021-08-25 01:55:37,784] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:55:37,784] INFO [Log partition=unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:55:38,094] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:55:38,094] INFO [Log partition=unauthenticated.SEC_3GPP_FAULTSUPERVISION_OUTPUT-1, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:55:41,571] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:55:41,571] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:57:43,703] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:57:43,704] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:57:46,841] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:57:46,841] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:57:47,737] INFO Creating topic unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT with configuration {} and initial partition assignment Map(2 -> ArrayBuffer(1, 0, 2), 1 -> ArrayBuffer(0, 2, 1), 0 -> ArrayBuffer(2, 1, 0)) (kafka.zk.AdminZkClient)
[2021-08-25 01:57:47,749] INFO [KafkaApi-2] Auto creation of topic unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT with 3 partitions and replication factor 3 is successful (kafka.server.KafkaApis)
[2021-08-25 01:57:47,771] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 28 from controller 1 epoch 8 for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2 (state.change.logger)
[2021-08-25 01:57:47,772] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 28 from controller 1 epoch 8 for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1 (state.change.logger)
[2021-08-25 01:57:47,772] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 28 from controller 1 epoch 8 for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-0 (state.change.logger)
[2021-08-25 01:57:47,774] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 28 from controller 1 epoch 8 starting the become-leader transition for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-0 (state.change.logger)
[2021-08-25 01:57:47,774] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-0) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:57:47,792] INFO [Log partition=unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:57:47,795] INFO [Log partition=unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 17 ms (kafka.log.Log)
[2021-08-25 01:57:47,796] INFO Created log for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:57:47,799] INFO [Partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-0 broker=2] No checkpointed highwatermark is found for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-0 (kafka.cluster.Partition)
[2021-08-25 01:57:47,800] INFO Replica loaded for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:57:47,800] INFO Replica loaded for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:57:47,800] INFO Replica loaded for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:57:47,800] INFO [Partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-0 broker=2] unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:57:47,803] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 28 for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-0 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:57:47,803] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 28 from controller 1 epoch 8 for the become-leader transition for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-0 (state.change.logger)
[2021-08-25 01:57:47,803] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 28 from controller 1 epoch 8 starting the become-follower transition for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1 with leader 0 (state.change.logger)
[2021-08-25 01:57:47,803] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 28 from controller 1 epoch 8 starting the become-follower transition for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2 with leader 1 (state.change.logger)
[2021-08-25 01:57:47,803] INFO Replica loaded for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:57:47,819] INFO [Log partition=unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:57:47,822] INFO [Log partition=unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log)
[2021-08-25 01:57:47,823] INFO Created log for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:57:47,825] INFO [Partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1 broker=2] No checkpointed highwatermark is found for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1 (kafka.cluster.Partition)
[2021-08-25 01:57:47,825] INFO Replica loaded for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:57:47,825] INFO Replica loaded for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:57:47,825] INFO Replica loaded for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:57:47,825] INFO Replica loaded for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:57:47,840] INFO [Log partition=unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:57:47,844] INFO [Log partition=unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log)
[2021-08-25 01:57:47,845] INFO Created log for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:57:47,845] INFO [Partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2 broker=2] No checkpointed highwatermark is found for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2 (kafka.cluster.Partition)
[2021-08-25 01:57:47,846] INFO Replica loaded for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:57:47,846] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2, unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:57:47,846] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 28 for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2 with leader 1 (state.change.logger)
[2021-08-25 01:57:47,846] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 28 for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1 with leader 0 (state.change.logger)
[2021-08-25 01:57:47,846] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2 as part of become-follower request with correlation id 28 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:57:47,846] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1 as part of become-follower request with correlation id 28 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:57:47,846] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:57:47,846] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:57:47,846] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 28 for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2 with leader 1 (state.change.logger)
[2021-08-25 01:57:47,846] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 28 for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1 with leader 0 (state.change.logger)
[2021-08-25 01:57:47,846] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 28 from controller 1 epoch 8 for the become-follower transition for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1 with leader 0 (state.change.logger)
[2021-08-25 01:57:47,846] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 28 from controller 1 epoch 8 for the become-follower transition for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2 with leader 1 (state.change.logger)
[2021-08-25 01:57:47,850] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 29 (state.change.logger)
[2021-08-25 01:57:47,851] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 29 (state.change.logger)
[2021-08-25 01:57:47,851] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 29 (state.change.logger)
[2021-08-25 01:57:47,956] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:57:47,956] INFO [Log partition=unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-1, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:57:47,988] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:57:47,988] INFO [Log partition=unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:57:48,558] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:57:48,558] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:57:48,564] INFO [GroupCoordinator 2]: Preparing to rebalance group CG1--unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT in state PreparingRebalance with old generation 0 (__consumer_offsets-42) (reason: Adding new member C1-44d82216-58f8-472c-a62f-a4b7c18184e9 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:57:51,566] INFO [GroupCoordinator 2]: Stabilized group CG1--unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT generation 1 (__consumer_offsets-42) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:57:51,569] INFO [GroupCoordinator 2]: Assignment received from leader for group CG1--unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:57:51,696] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:57:51,696] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:58:04,655] INFO [GroupCoordinator 2]: Member SO-COpenSource-Env11-343967ee-ce2a-4f2c-b1d1-9660bc136c7c in group SO-OpenSource-Env11--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:58:04,655] INFO [GroupCoordinator 2]: Preparing to rebalance group SO-OpenSource-Env11--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 1 (__consumer_offsets-0) (reason: removing member SO-COpenSource-Env11-343967ee-ce2a-4f2c-b1d1-9660bc136c7c on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:58:04,655] INFO [GroupCoordinator 2]: Group SO-OpenSource-Env11--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 2 is now empty (__consumer_offsets-0) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:59:45,185] INFO [GroupCoordinator 2]: Member C1-44d82216-58f8-472c-a62f-a4b7c18184e9 in group CG1--unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:59:45,186] INFO [GroupCoordinator 2]: Preparing to rebalance group CG1--unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT in state PreparingRebalance with old generation 1 (__consumer_offsets-42) (reason: removing member C1-44d82216-58f8-472c-a62f-a4b7c18184e9 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:59:45,186] INFO [GroupCoordinator 2]: Group CG1--unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT with generation 2 is now empty (__consumer_offsets-42) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:59:53,907] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:59:53,907] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:59:53,913] INFO [GroupCoordinator 2]: Preparing to rebalance group CG1--unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT in state PreparingRebalance with old generation 2 (__consumer_offsets-42) (reason: Adding new member C1-d14aa2d1-707a-4566-87c4-c187b14ff658 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:59:56,914] INFO [GroupCoordinator 2]: Stabilized group CG1--unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT generation 3 (__consumer_offsets-42) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:59:56,917] INFO [GroupCoordinator 2]: Assignment received from leader for group CG1--unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 01:59:57,036] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:59:57,036] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:59:57,947] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 30 from controller 1 epoch 8 for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2 (state.change.logger)
[2021-08-25 01:59:57,947] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 30 from controller 1 epoch 8 for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-1 (state.change.logger)
[2021-08-25 01:59:57,947] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 30 from controller 1 epoch 8 for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0 (state.change.logger)
[2021-08-25 01:59:57,949] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 30 from controller 1 epoch 8 starting the become-leader transition for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-1 (state.change.logger)
[2021-08-25 01:59:57,950] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-1) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:59:57,966] INFO [Log partition=unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:59:57,969] INFO [Log partition=unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log)
[2021-08-25 01:59:57,969] INFO Created log for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:59:57,972] INFO [Partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-1 broker=2] No checkpointed highwatermark is found for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-1 (kafka.cluster.Partition)
[2021-08-25 01:59:57,972] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:59:57,972] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:59:57,972] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:59:57,973] INFO [Partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-1 broker=2] unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-1 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 01:59:57,975] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 30 for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-1 (last update controller epoch 8) (state.change.logger)
[2021-08-25 01:59:57,975] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 30 from controller 1 epoch 8 for the become-leader transition for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-1 (state.change.logger)
[2021-08-25 01:59:57,975] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 30 from controller 1 epoch 8 starting the become-follower transition for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0 with leader 1 (state.change.logger)
[2021-08-25 01:59:57,975] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 30 from controller 1 epoch 8 starting the become-follower transition for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2 with leader 0 (state.change.logger)
[2021-08-25 01:59:57,975] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:59:57,975] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:59:57,989] INFO [Log partition=unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:59:57,993] INFO [Log partition=unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log)
[2021-08-25 01:59:57,994] INFO Created log for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:59:57,995] INFO [Partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0 broker=2] No checkpointed highwatermark is found for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0 (kafka.cluster.Partition)
[2021-08-25 01:59:57,995] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:59:57,996] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:59:58,010] INFO [Log partition=unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 01:59:58,014] INFO [Log partition=unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log)
[2021-08-25 01:59:58,015] INFO Created log for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 01:59:58,016] INFO [Partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2 broker=2] No checkpointed highwatermark is found for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2 (kafka.cluster.Partition)
[2021-08-25 01:59:58,016] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:59:58,020] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 01:59:58,021] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2, unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:59:58,021] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 30 for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2 with leader 0 (state.change.logger)
[2021-08-25 01:59:58,021] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 30 for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0 with leader 1 (state.change.logger)
[2021-08-25 01:59:58,021] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2 as part of become-follower request with correlation id 30 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 01:59:58,021] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0 as part of become-follower request with correlation id 30 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 01:59:58,021] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:59:58,022] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 01:59:58,022] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 30 for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2 with leader 0 (state.change.logger)
[2021-08-25 01:59:58,022] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 30 for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0 with leader 1 (state.change.logger)
[2021-08-25 01:59:58,022] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 30 from controller 1 epoch 8 for the become-follower transition for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0 with leader 1 (state.change.logger)
[2021-08-25 01:59:58,022] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 30 from controller 1 epoch 8 for the become-follower transition for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2 with leader 0 (state.change.logger)
[2021-08-25 01:59:58,026] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 31 (state.change.logger)
[2021-08-25 01:59:58,027] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 31 (state.change.logger)
[2021-08-25 01:59:58,027] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 31 (state.change.logger)
[2021-08-25 01:59:58,228] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:59:58,229] INFO [Log partition=unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:59:58,305] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 01:59:58,305] INFO [Log partition=unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 01:59:58,731] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:59:58,732] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 01:59:58,738] INFO [GroupCoordinator 2]: Preparing to rebalance group CG1--unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT in state PreparingRebalance with old generation 0 (__consumer_offsets-45) (reason: Adding new member C1-cb52c499-e21c-49e6-ac8e-6f19a0d21c5c with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:00:01,739] INFO [GroupCoordinator 2]: Stabilized group CG1--unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT generation 1 (__consumer_offsets-45) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:00:01,742] INFO [GroupCoordinator 2]: Assignment received from leader for group CG1--unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:00:01,868] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:00:01,868] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:00:08,354] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:00:08,354] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:00:08,361] INFO [GroupCoordinator 2]: Preparing to rebalance group SO-OpenSource-Env11--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 2 (__consumer_offsets-0) (reason: Adding new member SO-COpenSource-Env11-d9a7c54b-36a4-448c-9f24-a07d9b4a1d2a with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:00:11,362] INFO [GroupCoordinator 2]: Stabilized group SO-OpenSource-Env11--SDC-DISTR-NOTIF-TOPIC-AUTO generation 3 (__consumer_offsets-0) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:00:11,364] INFO [GroupCoordinator 2]: Assignment received from leader for group SO-OpenSource-Env11--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:00:11,489] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:00:11,489] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:00:50,518] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:00:50,519] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:01:55,312] INFO [GroupCoordinator 2]: Member C1-cb52c499-e21c-49e6-ac8e-6f19a0d21c5c in group CG1--unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:01:55,312] INFO [GroupCoordinator 2]: Preparing to rebalance group CG1--unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT in state PreparingRebalance with old generation 1 (__consumer_offsets-45) (reason: removing member C1-cb52c499-e21c-49e6-ac8e-6f19a0d21c5c on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:01:55,312] INFO [GroupCoordinator 2]: Group CG1--unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT with generation 2 is now empty (__consumer_offsets-45) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:02:04,040] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:02:04,040] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:02:04,046] INFO [GroupCoordinator 2]: Preparing to rebalance group CG1--unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT in state PreparingRebalance with old generation 2 (__consumer_offsets-45) (reason: Adding new member C1-987d5b09-7764-4fe8-9cec-d4e5d67de42d with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:02:04,656] INFO [GroupCoordinator 2]: Member C1-d14aa2d1-707a-4566-87c4-c187b14ff658 in group CG1--unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:02:04,656] INFO [GroupCoordinator 2]: Preparing to rebalance group CG1--unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT in state PreparingRebalance with old generation 3 (__consumer_offsets-42) (reason: removing member C1-d14aa2d1-707a-4566-87c4-c187b14ff658 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:02:04,657] INFO [GroupCoordinator 2]: Group CG1--unauthenticated.SEC_3GPP_HEARTBEAT_OUTPUT with generation 4 is now empty (__consumer_offsets-42) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:02:07,047] INFO [GroupCoordinator 2]: Stabilized group CG1--unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT generation 3 (__consumer_offsets-45) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:02:07,050] INFO [GroupCoordinator 2]: Assignment received from leader for group CG1--unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:02:07,171] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:02:07,171] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:02:08,079] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 32 from controller 1 epoch 8 for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-0 (state.change.logger)
[2021-08-25 02:02:08,080] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 32 from controller 1 epoch 8 for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1 (state.change.logger)
[2021-08-25 02:02:08,080] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 32 from controller 1 epoch 8 for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2 (state.change.logger)
[2021-08-25 02:02:08,082] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 32 from controller 1 epoch 8 starting the become-leader transition for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-0 (state.change.logger)
[2021-08-25 02:02:08,082] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-0) (kafka.server.ReplicaFetcherManager)
[2021-08-25 02:02:08,100] INFO [Log partition=unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 02:02:08,103] INFO [Log partition=unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log)
[2021-08-25 02:02:08,104] INFO Created log for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 02:02:08,108] INFO [Partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-0 broker=2] No checkpointed highwatermark is found for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-0 (kafka.cluster.Partition)
[2021-08-25 02:02:08,108] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 02:02:08,108] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 02:02:08,108] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 02:02:08,109] INFO [Partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-0 broker=2] unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-08-25 02:02:08,111] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 32 for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-0 (last update controller epoch 8) (state.change.logger)
[2021-08-25 02:02:08,112] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 32 from controller 1 epoch 8 for the become-leader transition for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-0 (state.change.logger)
[2021-08-25 02:02:08,112] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 32 from controller 1 epoch 8 starting the become-follower transition for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1 with leader 0 (state.change.logger)
[2021-08-25 02:02:08,112] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 32 from controller 1 epoch 8 starting the become-follower transition for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2 with leader 1 (state.change.logger)
[2021-08-25 02:02:08,112] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 02:02:08,112] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 02:02:08,127] INFO [Log partition=unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 02:02:08,130] INFO [Log partition=unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log)
[2021-08-25 02:02:08,131] INFO Created log for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 02:02:08,133] INFO [Partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1 broker=2] No checkpointed highwatermark is found for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1 (kafka.cluster.Partition)
[2021-08-25 02:02:08,133] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 02:02:08,134] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 02:02:08,148] INFO [Log partition=unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-08-25 02:02:08,151] INFO [Log partition=unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log)
[2021-08-25 02:02:08,152] INFO Created log for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-08-25 02:02:08,153] INFO [Partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2 broker=2] No checkpointed highwatermark is found for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2 (kafka.cluster.Partition)
[2021-08-25 02:02:08,153] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 02:02:08,153] INFO Replica loaded for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-08-25 02:02:08,153] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1, unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2) (kafka.server.ReplicaFetcherManager)
[2021-08-25 02:02:08,153] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 32 for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2 with leader 1 (state.change.logger)
[2021-08-25 02:02:08,153] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 32 for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1 with leader 0 (state.change.logger)
[2021-08-25 02:02:08,153] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2 as part of become-follower request with correlation id 32 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-08-25 02:02:08,153] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1 as part of become-follower request with correlation id 32 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-08-25 02:02:08,153] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 02:02:08,153] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-08-25 02:02:08,153] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 32 for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2 with leader 1 (state.change.logger)
[2021-08-25 02:02:08,153] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 32 for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1 with leader 0 (state.change.logger)
[2021-08-25 02:02:08,153] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 32 from controller 1 epoch 8 for the become-follower transition for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1 with leader 0 (state.change.logger)
[2021-08-25 02:02:08,153] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 32 from controller 1 epoch 8 for the become-follower transition for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2 with leader 1 (state.change.logger)
[2021-08-25 02:02:08,157] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 33 (state.change.logger)
[2021-08-25 02:02:08,157] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 33 (state.change.logger)
[2021-08-25 02:02:08,157] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 33 (state.change.logger)
[2021-08-25 02:02:08,215] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 02:02:08,215] INFO [Log partition=unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-1, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 02:02:08,586] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-08-25 02:02:08,586] INFO [Log partition=unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-08-25 02:02:08,862] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:02:08,862] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:02:08,866] INFO [GroupCoordinator 2]: Preparing to rebalance group CG1--unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT in state PreparingRebalance with old generation 0 (__consumer_offsets-39) (reason: Adding new member C1-7bfe3d0b-fffc-41b1-bb7a-53079dcff42e with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:02:11,868] INFO [GroupCoordinator 2]: Stabilized group CG1--unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT generation 1 (__consumer_offsets-39) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:02:11,870] INFO [GroupCoordinator 2]: Assignment received from leader for group CG1--unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:02:11,993] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:02:11,993] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:04:04,655] INFO [GroupCoordinator 2]: Member C1-987d5b09-7764-4fe8-9cec-d4e5d67de42d in group CG1--unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:04:04,656] INFO [GroupCoordinator 2]: Preparing to rebalance group CG1--unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT in state PreparingRebalance with old generation 3 (__consumer_offsets-45) (reason: removing member C1-987d5b09-7764-4fe8-9cec-d4e5d67de42d on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:04:04,656] INFO [GroupCoordinator 2]: Group CG1--unauthenticated.SEC_3GPP_PERFORMANCEASSURANCE_OUTPUT with generation 4 is now empty (__consumer_offsets-45) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:04:05,468] INFO [GroupCoordinator 2]: Member C1-7bfe3d0b-fffc-41b1-bb7a-53079dcff42e in group CG1--unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:04:05,468] INFO [GroupCoordinator 2]: Preparing to rebalance group CG1--unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT in state PreparingRebalance with old generation 1 (__consumer_offsets-39) (reason: removing member C1-7bfe3d0b-fffc-41b1-bb7a-53079dcff42e on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:04:05,468] INFO [GroupCoordinator 2]: Group CG1--unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT with generation 2 is now empty (__consumer_offsets-39) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:04:14,185] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:04:14,185] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-08-25 02:04:14,190] INFO [GroupCoordinator 2]: Preparing to rebalance group CG1--unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT in state PreparingRebalance with old generation 2 (__consumer_offsets-39) (reason: Adding new member C1-e98c6625-a727-4f21-9062-6b3269dc6028 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-08-25 02:04:17,191] INFO [GroupCoordinator 2]: Stabilized group CG1--unauthenticated.SEC_3GPP_PROVISIONING_OUTPUT generation 3 (__consumer_offsets-39) (kafka.coordinator.