Results

By type

          + export KAFKA_BROKER_ID=2
+ cp /opt/app/osaaf/local/cadi.properties /etc/kafka/data/cadi.properties
+ export KAFKA_ADVERTISED_LISTENERS=EXTERNAL_SASL_PLAINTEXT://172.16.10.172:30492,INTERNAL_SASL_PLAINTEXT://:9092
+ exec /etc/confluent/docker/run
===> ENV Variables ...
A1POLICYMANAGEMENT_EXTERNAL_PORT=tcp://10.96.161.117:8433
A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP=tcp://10.96.161.117:8433
A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_ADDR=10.96.161.117
A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_PORT=8433
A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_PROTO=tcp
A1POLICYMANAGEMENT_EXTERNAL_SERVICE_HOST=10.96.161.117
A1POLICYMANAGEMENT_EXTERNAL_SERVICE_PORT=8433
A1POLICYMANAGEMENT_EXTERNAL_SERVICE_PORT_HTTPS_API=8433
A1POLICYMANAGEMENT_PORT=tcp://10.96.204.187:8433
A1POLICYMANAGEMENT_PORT_8081_TCP=tcp://10.96.204.187:8081
A1POLICYMANAGEMENT_PORT_8081_TCP_ADDR=10.96.204.187
A1POLICYMANAGEMENT_PORT_8081_TCP_PORT=8081
A1POLICYMANAGEMENT_PORT_8081_TCP_PROTO=tcp
A1POLICYMANAGEMENT_PORT_8433_TCP=tcp://10.96.204.187:8433
A1POLICYMANAGEMENT_PORT_8433_TCP_ADDR=10.96.204.187
A1POLICYMANAGEMENT_PORT_8433_TCP_PORT=8433
A1POLICYMANAGEMENT_PORT_8433_TCP_PROTO=tcp
A1POLICYMANAGEMENT_SERVICE_HOST=10.96.204.187
A1POLICYMANAGEMENT_SERVICE_PORT=8433
A1POLICYMANAGEMENT_SERVICE_PORT_HTTPS_API=8433
A1POLICYMANAGEMENT_SERVICE_PORT_HTTP_API=8081
AAF_CASS_PORT=tcp://10.96.152.242:7000
AAF_CASS_PORT_7000_TCP=tcp://10.96.152.242:7000
AAF_CASS_PORT_7000_TCP_ADDR=10.96.152.242
AAF_CASS_PORT_7000_TCP_PORT=7000
AAF_CASS_PORT_7000_TCP_PROTO=tcp
AAF_CASS_PORT_7001_TCP=tcp://10.96.152.242:7001
AAF_CASS_PORT_7001_TCP_ADDR=10.96.152.242
AAF_CASS_PORT_7001_TCP_PORT=7001
AAF_CASS_PORT_7001_TCP_PROTO=tcp
AAF_CASS_PORT_9042_TCP=tcp://10.96.152.242:9042
AAF_CASS_PORT_9042_TCP_ADDR=10.96.152.242
AAF_CASS_PORT_9042_TCP_PORT=9042
AAF_CASS_PORT_9042_TCP_PROTO=tcp
AAF_CASS_PORT_9160_TCP=tcp://10.96.152.242:9160
AAF_CASS_PORT_9160_TCP_ADDR=10.96.152.242
AAF_CASS_PORT_9160_TCP_PORT=9160
AAF_CASS_PORT_9160_TCP_PROTO=tcp
AAF_CASS_SERVICE_HOST=10.96.152.242
AAF_CASS_SERVICE_PORT=7000
AAF_CASS_SERVICE_PORT_TCP_CQL=9042
AAF_CASS_SERVICE_PORT_TCP_INTRA=7000
AAF_CASS_SERVICE_PORT_TCP_THRIFT=9160
AAF_CASS_SERVICE_PORT_TLS=7001
AAF_CM_PORT=tcp://10.96.31.225:8150
AAF_CM_PORT_8150_TCP=tcp://10.96.31.225:8150
AAF_CM_PORT_8150_TCP_ADDR=10.96.31.225
AAF_CM_PORT_8150_TCP_PORT=8150
AAF_CM_PORT_8150_TCP_PROTO=tcp
AAF_CM_SERVICE_HOST=10.96.31.225
AAF_CM_SERVICE_PORT=8150
AAF_CM_SERVICE_PORT_API=8150
AAF_FS_PORT=tcp://10.96.168.206:8096
AAF_FS_PORT_8096_TCP=tcp://10.96.168.206:8096
AAF_FS_PORT_8096_TCP_ADDR=10.96.168.206
AAF_FS_PORT_8096_TCP_PORT=8096
AAF_FS_PORT_8096_TCP_PROTO=tcp
AAF_FS_SERVICE_HOST=10.96.168.206
AAF_FS_SERVICE_PORT=8096
AAF_FS_SERVICE_PORT_API=8096
AAF_GUI_PORT=tcp://10.96.252.75:8200
AAF_GUI_PORT_8200_TCP=tcp://10.96.252.75:8200
AAF_GUI_PORT_8200_TCP_ADDR=10.96.252.75
AAF_GUI_PORT_8200_TCP_PORT=8200
AAF_GUI_PORT_8200_TCP_PROTO=tcp
AAF_GUI_SERVICE_HOST=10.96.252.75
AAF_GUI_SERVICE_PORT=8200
AAF_GUI_SERVICE_PORT_GUI=8200
AAF_HELLO_PORT=tcp://10.96.165.250:8130
AAF_HELLO_PORT_8130_TCP=tcp://10.96.165.250:8130
AAF_HELLO_PORT_8130_TCP_ADDR=10.96.165.250
AAF_HELLO_PORT_8130_TCP_PORT=8130
AAF_HELLO_PORT_8130_TCP_PROTO=tcp
AAF_HELLO_SERVICE_HOST=10.96.165.250
AAF_HELLO_SERVICE_PORT=8130
AAF_HELLO_SERVICE_PORT_API=8130
AAF_LOCATE_PORT=tcp://10.96.91.37:8095
AAF_LOCATE_PORT_8095_TCP=tcp://10.96.91.37:8095
AAF_LOCATE_PORT_8095_TCP_ADDR=10.96.91.37
AAF_LOCATE_PORT_8095_TCP_PORT=8095
AAF_LOCATE_PORT_8095_TCP_PROTO=tcp
AAF_LOCATE_SERVICE_HOST=10.96.91.37
AAF_LOCATE_SERVICE_PORT=8095
AAF_LOCATE_SERVICE_PORT_API=8095
AAF_OAUTH_PORT=tcp://10.96.33.57:8140
AAF_OAUTH_PORT_8140_TCP=tcp://10.96.33.57:8140
AAF_OAUTH_PORT_8140_TCP_ADDR=10.96.33.57
AAF_OAUTH_PORT_8140_TCP_PORT=8140
AAF_OAUTH_PORT_8140_TCP_PROTO=tcp
AAF_OAUTH_SERVICE_HOST=10.96.33.57
AAF_OAUTH_SERVICE_PORT=8140
AAF_OAUTH_SERVICE_PORT_API=8140
AAF_SERVICE_PORT=tcp://10.96.128.81:8100
AAF_SERVICE_PORT_8100_TCP=tcp://10.96.128.81:8100
AAF_SERVICE_PORT_8100_TCP_ADDR=10.96.128.81
AAF_SERVICE_PORT_8100_TCP_PORT=8100
AAF_SERVICE_PORT_8100_TCP_PROTO=tcp
AAF_SERVICE_SERVICE_HOST=10.96.128.81
AAF_SERVICE_SERVICE_PORT=8100
AAF_SERVICE_SERVICE_PORT_API=8100
AAF_SMS_DB_PORT=tcp://10.96.49.67:8200
AAF_SMS_DB_PORT_8200_TCP=tcp://10.96.49.67:8200
AAF_SMS_DB_PORT_8200_TCP_ADDR=10.96.49.67
AAF_SMS_DB_PORT_8200_TCP_PORT=8200
AAF_SMS_DB_PORT_8200_TCP_PROTO=tcp
AAF_SMS_DB_SERVICE_HOST=10.96.49.67
AAF_SMS_DB_SERVICE_PORT=8200
AAF_SMS_DB_SERVICE_PORT_AAF_SMS_DB=8200
AAF_SMS_PORT=tcp://10.96.50.10:10443
AAF_SMS_PORT_10443_TCP=tcp://10.96.50.10:10443
AAF_SMS_PORT_10443_TCP_ADDR=10.96.50.10
AAF_SMS_PORT_10443_TCP_PORT=10443
AAF_SMS_PORT_10443_TCP_PROTO=tcp
AAF_SMS_SERVICE_HOST=10.96.50.10
AAF_SMS_SERVICE_PORT=10443
AAI_BABEL_PORT=tcp://10.96.140.180:9516
AAI_BABEL_PORT_9516_TCP=tcp://10.96.140.180:9516
AAI_BABEL_PORT_9516_TCP_ADDR=10.96.140.180
AAI_BABEL_PORT_9516_TCP_PORT=9516
AAI_BABEL_PORT_9516_TCP_PROTO=tcp
AAI_BABEL_SERVICE_HOST=10.96.140.180
AAI_BABEL_SERVICE_PORT=9516
AAI_BABEL_SERVICE_PORT_BABEL=9516
AAI_MODELLOADER_PORT=tcp://10.96.96.102:8080
AAI_MODELLOADER_PORT_8080_TCP=tcp://10.96.96.102:8080
AAI_MODELLOADER_PORT_8080_TCP_ADDR=10.96.96.102
AAI_MODELLOADER_PORT_8080_TCP_PORT=8080
AAI_MODELLOADER_PORT_8080_TCP_PROTO=tcp
AAI_MODELLOADER_PORT_8443_TCP=tcp://10.96.96.102:8443
AAI_MODELLOADER_PORT_8443_TCP_ADDR=10.96.96.102
AAI_MODELLOADER_PORT_8443_TCP_PORT=8443
AAI_MODELLOADER_PORT_8443_TCP_PROTO=tcp
AAI_MODELLOADER_SERVICE_HOST=10.96.96.102
AAI_MODELLOADER_SERVICE_PORT=8080
AAI_MODELLOADER_SERVICE_PORT_AAI_MODELLOADER=8080
AAI_MODELLOADER_SERVICE_PORT_AAI_MODELLOADER_SSL=8443
AAI_PORT=tcp://10.96.224.219:8443
AAI_PORT_8443_TCP=tcp://10.96.224.219:8443
AAI_PORT_8443_TCP_ADDR=10.96.224.219
AAI_PORT_8443_TCP_PORT=8443
AAI_PORT_8443_TCP_PROTO=tcp
AAI_RESOURCES_PORT=tcp://10.96.211.174:8447
AAI_RESOURCES_PORT_5005_TCP=tcp://10.96.211.174:5005
AAI_RESOURCES_PORT_5005_TCP_ADDR=10.96.211.174
AAI_RESOURCES_PORT_5005_TCP_PORT=5005
AAI_RESOURCES_PORT_5005_TCP_PROTO=tcp
AAI_RESOURCES_PORT_8447_TCP=tcp://10.96.211.174:8447
AAI_RESOURCES_PORT_8447_TCP_ADDR=10.96.211.174
AAI_RESOURCES_PORT_8447_TCP_PORT=8447
AAI_RESOURCES_PORT_8447_TCP_PROTO=tcp
AAI_RESOURCES_SERVICE_HOST=10.96.211.174
AAI_RESOURCES_SERVICE_PORT=8447
AAI_RESOURCES_SERVICE_PORT_AAI_RESOURCES_5005=5005
AAI_RESOURCES_SERVICE_PORT_AAI_RESOURCES_8447=8447
AAI_SERVICE_HOST=10.96.224.219
AAI_SERVICE_PORT=8443
AAI_SERVICE_PORT_AAI_SSL=8443
AAI_SPARKY_BE_PORT=tcp://10.96.29.216:8000
AAI_SPARKY_BE_PORT_8000_TCP=tcp://10.96.29.216:8000
AAI_SPARKY_BE_PORT_8000_TCP_ADDR=10.96.29.216
AAI_SPARKY_BE_PORT_8000_TCP_PORT=8000
AAI_SPARKY_BE_PORT_8000_TCP_PROTO=tcp
AAI_SPARKY_BE_SERVICE_HOST=10.96.29.216
AAI_SPARKY_BE_SERVICE_PORT=8000
AAI_SPARKY_BE_SERVICE_PORT_AAI_SPARKY_BE=8000
AAI_TRAVERSAL_PORT=tcp://10.96.173.165:8446
AAI_TRAVERSAL_PORT_5005_TCP=tcp://10.96.173.165:5005
AAI_TRAVERSAL_PORT_5005_TCP_ADDR=10.96.173.165
AAI_TRAVERSAL_PORT_5005_TCP_PORT=5005
AAI_TRAVERSAL_PORT_5005_TCP_PROTO=tcp
AAI_TRAVERSAL_PORT_8446_TCP=tcp://10.96.173.165:8446
AAI_TRAVERSAL_PORT_8446_TCP_ADDR=10.96.173.165
AAI_TRAVERSAL_PORT_8446_TCP_PORT=8446
AAI_TRAVERSAL_PORT_8446_TCP_PROTO=tcp
AAI_TRAVERSAL_SERVICE_HOST=10.96.173.165
AAI_TRAVERSAL_SERVICE_PORT=8446
AAI_TRAVERSAL_SERVICE_PORT_AAI_TRAVERSAL_5005=5005
AAI_TRAVERSAL_SERVICE_PORT_AAI_TRAVERSAL_8446=8446
ALLOW_UNSIGNED=false
APPC_ANSIBLE_SERVER_PORT=tcp://10.96.204.69:8000
APPC_ANSIBLE_SERVER_PORT_8000_TCP=tcp://10.96.204.69:8000
APPC_ANSIBLE_SERVER_PORT_8000_TCP_ADDR=10.96.204.69
APPC_ANSIBLE_SERVER_PORT_8000_TCP_PORT=8000
APPC_ANSIBLE_SERVER_PORT_8000_TCP_PROTO=tcp
APPC_ANSIBLE_SERVER_SERVICE_HOST=10.96.204.69
APPC_ANSIBLE_SERVER_SERVICE_PORT=8000
APPC_ANSIBLE_SERVER_SERVICE_PORT_APPC_ANSIBLE_SERVER=8000
APPC_CDT_PORT=tcp://10.96.208.191:18080
APPC_CDT_PORT_18080_TCP=tcp://10.96.208.191:18080
APPC_CDT_PORT_18080_TCP_ADDR=10.96.208.191
APPC_CDT_PORT_18080_TCP_PORT=18080
APPC_CDT_PORT_18080_TCP_PROTO=tcp
APPC_CDT_SERVICE_HOST=10.96.208.191
APPC_CDT_SERVICE_PORT=18080
APPC_CDT_SERVICE_PORT_APPC_CDT=18080
APPC_DB_PORT=tcp://10.96.181.221:3306
APPC_DB_PORT_3306_TCP=tcp://10.96.181.221:3306
APPC_DB_PORT_3306_TCP_ADDR=10.96.181.221
APPC_DB_PORT_3306_TCP_PORT=3306
APPC_DB_PORT_3306_TCP_PROTO=tcp
APPC_DB_SERVICE_HOST=10.96.181.221
APPC_DB_SERVICE_PORT=3306
APPC_DB_SERVICE_PORT_MYSQL=3306
APPC_DGBUILDER_PORT=tcp://10.96.174.163:3000
APPC_DGBUILDER_PORT_3000_TCP=tcp://10.96.174.163:3000
APPC_DGBUILDER_PORT_3000_TCP_ADDR=10.96.174.163
APPC_DGBUILDER_PORT_3000_TCP_PORT=3000
APPC_DGBUILDER_PORT_3000_TCP_PROTO=tcp
APPC_DGBUILDER_SERVICE_HOST=10.96.174.163
APPC_DGBUILDER_SERVICE_PORT=3000
APPC_DGBUILDER_SERVICE_PORT_DGBUILDER=3000
APPC_PORT=tcp://10.96.186.145:8443
APPC_PORT_1830_TCP=tcp://10.96.186.145:1830
APPC_PORT_1830_TCP_ADDR=10.96.186.145
APPC_PORT_1830_TCP_PORT=1830
APPC_PORT_1830_TCP_PROTO=tcp
APPC_PORT_8443_TCP=tcp://10.96.186.145:8443
APPC_PORT_8443_TCP_ADDR=10.96.186.145
APPC_PORT_8443_TCP_PORT=8443
APPC_PORT_8443_TCP_PROTO=tcp
APPC_PORT_9090_TCP=tcp://10.96.186.145:9090
APPC_PORT_9090_TCP_ADDR=10.96.186.145
APPC_PORT_9090_TCP_PORT=9090
APPC_PORT_9090_TCP_PROTO=tcp
APPC_SERVICE_HOST=10.96.186.145
APPC_SERVICE_PORT=8443
APPC_SERVICE_PORT_APPC_1830=1830
APPC_SERVICE_PORT_APPC_8443=8443
APPC_SERVICE_PORT_APPC_9090=9090
AWX_POSTGRESQL_PORT=tcp://10.96.173.14:5432
AWX_POSTGRESQL_PORT_5432_TCP=tcp://10.96.173.14:5432
AWX_POSTGRESQL_PORT_5432_TCP_ADDR=10.96.173.14
AWX_POSTGRESQL_PORT_5432_TCP_PORT=5432
AWX_POSTGRESQL_PORT_5432_TCP_PROTO=tcp
AWX_POSTGRESQL_SERVICE_HOST=10.96.173.14
AWX_POSTGRESQL_SERVICE_PORT=5432
AWX_POSTGRESQL_SERVICE_PORT_AWX_POSTGRESQL=5432
AWX_RABBITMQ_PORT=tcp://10.96.200.40:15672
AWX_RABBITMQ_PORT_15672_TCP=tcp://10.96.200.40:15672
AWX_RABBITMQ_PORT_15672_TCP_ADDR=10.96.200.40
AWX_RABBITMQ_PORT_15672_TCP_PORT=15672
AWX_RABBITMQ_PORT_15672_TCP_PROTO=tcp
AWX_RABBITMQ_PORT_5672_TCP=tcp://10.96.200.40:5672
AWX_RABBITMQ_PORT_5672_TCP_ADDR=10.96.200.40
AWX_RABBITMQ_PORT_5672_TCP_PORT=5672
AWX_RABBITMQ_PORT_5672_TCP_PROTO=tcp
AWX_RABBITMQ_SERVICE_HOST=10.96.200.40
AWX_RABBITMQ_SERVICE_PORT=15672
AWX_RABBITMQ_SERVICE_PORT_AMQP=5672
AWX_RABBITMQ_SERVICE_PORT_HTTP=15672
AWX_RMQ_MGMT_PORT=tcp://10.96.171.49:15672
AWX_RMQ_MGMT_PORT_15672_TCP=tcp://10.96.171.49:15672
AWX_RMQ_MGMT_PORT_15672_TCP_ADDR=10.96.171.49
AWX_RMQ_MGMT_PORT_15672_TCP_PORT=15672
AWX_RMQ_MGMT_PORT_15672_TCP_PROTO=tcp
AWX_RMQ_MGMT_SERVICE_HOST=10.96.171.49
AWX_RMQ_MGMT_SERVICE_PORT=15672
AWX_RMQ_MGMT_SERVICE_PORT_RMQMGMT=15672
AWX_WEB_PORT=tcp://10.96.123.114:8052
AWX_WEB_PORT_8052_TCP=tcp://10.96.123.114:8052
AWX_WEB_PORT_8052_TCP_ADDR=10.96.123.114
AWX_WEB_PORT_8052_TCP_PORT=8052
AWX_WEB_PORT_8052_TCP_PROTO=tcp
AWX_WEB_SERVICE_HOST=10.96.123.114
AWX_WEB_SERVICE_PORT=8052
AWX_WEB_SERVICE_PORT_WEB=8052
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT=tcp://10.96.195.227:5701
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP=tcp://10.96.195.227:5701
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_ADDR=10.96.195.227
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_PORT=5701
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_PROTO=tcp
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_HOST=10.96.195.227
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_PORT=5701
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_PORT_BLUEPRINTS_PROCESSOR_CLUSTER=5701
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT=tcp://10.96.224.160:9111
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP=tcp://10.96.224.160:9111
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_ADDR=10.96.224.160
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_PORT=9111
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_PROTO=tcp
CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_HOST=10.96.224.160
CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_PORT=9111
CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_PORT_BLUEPRINTS_PROCESSOR_GRPC=9111
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT=tcp://10.96.30.172:8080
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP=tcp://10.96.30.172:8080
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_ADDR=10.96.30.172
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_PORT=8080
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_PROTO=tcp
CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_HOST=10.96.30.172
CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_PORT=8080
CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_PORT_BLUEPRINTS_PROCESSOR_HTTP=8080
CDS_COMMAND_EXECUTOR_PORT=tcp://10.96.84.114:50051
CDS_COMMAND_EXECUTOR_PORT_50051_TCP=tcp://10.96.84.114:50051
CDS_COMMAND_EXECUTOR_PORT_50051_TCP_ADDR=10.96.84.114
CDS_COMMAND_EXECUTOR_PORT_50051_TCP_PORT=50051
CDS_COMMAND_EXECUTOR_PORT_50051_TCP_PROTO=tcp
CDS_COMMAND_EXECUTOR_SERVICE_HOST=10.96.84.114
CDS_COMMAND_EXECUTOR_SERVICE_PORT=50051
CDS_COMMAND_EXECUTOR_SERVICE_PORT_COMMAND_EXECUTOR_GRPC=50051
CDS_DB_PORT=tcp://10.96.238.76:3306
CDS_DB_PORT_3306_TCP=tcp://10.96.238.76:3306
CDS_DB_PORT_3306_TCP_ADDR=10.96.238.76
CDS_DB_PORT_3306_TCP_PORT=3306
CDS_DB_PORT_3306_TCP_PROTO=tcp
CDS_DB_SERVICE_HOST=10.96.238.76
CDS_DB_SERVICE_PORT=3306
CDS_DB_SERVICE_PORT_MYSQL=3306
CDS_PY_EXECUTOR_PORT=tcp://10.96.75.244:50052
CDS_PY_EXECUTOR_PORT_50052_TCP=tcp://10.96.75.244:50052
CDS_PY_EXECUTOR_PORT_50052_TCP_ADDR=10.96.75.244
CDS_PY_EXECUTOR_PORT_50052_TCP_PORT=50052
CDS_PY_EXECUTOR_PORT_50052_TCP_PROTO=tcp
CDS_PY_EXECUTOR_PORT_50053_TCP=tcp://10.96.75.244:50053
CDS_PY_EXECUTOR_PORT_50053_TCP_ADDR=10.96.75.244
CDS_PY_EXECUTOR_PORT_50053_TCP_PORT=50053
CDS_PY_EXECUTOR_PORT_50053_TCP_PROTO=tcp
CDS_PY_EXECUTOR_SERVICE_HOST=10.96.75.244
CDS_PY_EXECUTOR_SERVICE_PORT=50052
CDS_PY_EXECUTOR_SERVICE_PORT_EXECUTOR_GRPC=50052
CDS_PY_EXECUTOR_SERVICE_PORT_MANAGER_GRPC=50053
CDS_SDC_LISTENER_PORT=tcp://10.96.40.138:8080
CDS_SDC_LISTENER_PORT_8080_TCP=tcp://10.96.40.138:8080
CDS_SDC_LISTENER_PORT_8080_TCP_ADDR=10.96.40.138
CDS_SDC_LISTENER_PORT_8080_TCP_PORT=8080
CDS_SDC_LISTENER_PORT_8080_TCP_PROTO=tcp
CDS_SDC_LISTENER_SERVICE_HOST=10.96.40.138
CDS_SDC_LISTENER_SERVICE_PORT=8080
CDS_SDC_LISTENER_SERVICE_PORT_CDS_SDC_LISTENER_HTTP=8080
CDS_UI_PORT=tcp://10.96.220.72:3000
CDS_UI_PORT_3000_TCP=tcp://10.96.220.72:3000
CDS_UI_PORT_3000_TCP_ADDR=10.96.220.72
CDS_UI_PORT_3000_TCP_PORT=3000
CDS_UI_PORT_3000_TCP_PROTO=tcp
CDS_UI_SERVICE_HOST=10.96.220.72
CDS_UI_SERVICE_PORT=3000
CDS_UI_SERVICE_PORT_CDS_UI_3000=3000
CLI_PORT=tcp://10.96.104.43:443
CLI_PORT_443_TCP=tcp://10.96.104.43:443
CLI_PORT_443_TCP_ADDR=10.96.104.43
CLI_PORT_443_TCP_PORT=443
CLI_PORT_443_TCP_PROTO=tcp
CLI_PORT_9090_TCP=tcp://10.96.104.43:9090
CLI_PORT_9090_TCP_ADDR=10.96.104.43
CLI_PORT_9090_TCP_PORT=9090
CLI_PORT_9090_TCP_PROTO=tcp
CLI_SERVICE_HOST=10.96.104.43
CLI_SERVICE_PORT=443
CLI_SERVICE_PORT_CLI443=443
CLI_SERVICE_PORT_CLI9090=9090
CMSO_DB_PORT=tcp://10.96.227.173:3306
CMSO_DB_PORT_3306_TCP=tcp://10.96.227.173:3306
CMSO_DB_PORT_3306_TCP_ADDR=10.96.227.173
CMSO_DB_PORT_3306_TCP_PORT=3306
CMSO_DB_PORT_3306_TCP_PROTO=tcp
CMSO_DB_SERVICE_HOST=10.96.227.173
CMSO_DB_SERVICE_PORT=3306
CMSO_DB_SERVICE_PORT_MYSQL=3306
COMPONENT=kafka
CONFIG_BINDING_SERVICE_PORT=tcp://10.96.217.19:10000
CONFIG_BINDING_SERVICE_PORT_10000_TCP=tcp://10.96.217.19:10000
CONFIG_BINDING_SERVICE_PORT_10000_TCP_ADDR=10.96.217.19
CONFIG_BINDING_SERVICE_PORT_10000_TCP_PORT=10000
CONFIG_BINDING_SERVICE_PORT_10000_TCP_PROTO=tcp
CONFIG_BINDING_SERVICE_PORT_10443_TCP=tcp://10.96.217.19:10443
CONFIG_BINDING_SERVICE_PORT_10443_TCP_ADDR=10.96.217.19
CONFIG_BINDING_SERVICE_PORT_10443_TCP_PORT=10443
CONFIG_BINDING_SERVICE_PORT_10443_TCP_PROTO=tcp
CONFIG_BINDING_SERVICE_SERVICE_HOST=10.96.217.19
CONFIG_BINDING_SERVICE_SERVICE_PORT=10000
CONFIG_BINDING_SERVICE_SERVICE_PORT_CONFIG_BINDING_SERVICE_INSECURE=10000
CONFIG_BINDING_SERVICE_SERVICE_PORT_CONFIG_BINDING_SERVICE_SECURE=10443
CONFLUENT_DEB_VERSION=1
CONFLUENT_MAJOR_VERSION=5
CONFLUENT_MINOR_VERSION=3
CONFLUENT_MVN_LABEL=
CONFLUENT_PATCH_VERSION=1
CONFLUENT_PLATFORM_LABEL=
CONFLUENT_VERSION=5.3.1
CONSUL_SERVER_UI_PORT=tcp://10.96.65.168:8500
CONSUL_SERVER_UI_PORT_8500_TCP=tcp://10.96.65.168:8500
CONSUL_SERVER_UI_PORT_8500_TCP_ADDR=10.96.65.168
CONSUL_SERVER_UI_PORT_8500_TCP_PORT=8500
CONSUL_SERVER_UI_PORT_8500_TCP_PROTO=tcp
CONSUL_SERVER_UI_SERVICE_HOST=10.96.65.168
CONSUL_SERVER_UI_SERVICE_PORT=8500
CONSUL_SERVER_UI_SERVICE_PORT_CONSUL_UI=8500
CPS_PG_PRIMARY_PORT=tcp://10.96.56.212:5432
CPS_PG_PRIMARY_PORT_5432_TCP=tcp://10.96.56.212:5432
CPS_PG_PRIMARY_PORT_5432_TCP_ADDR=10.96.56.212
CPS_PG_PRIMARY_PORT_5432_TCP_PORT=5432
CPS_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
CPS_PG_PRIMARY_SERVICE_HOST=10.96.56.212
CPS_PG_PRIMARY_SERVICE_PORT=5432
CPS_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
CPS_PG_REPLICA_PORT=tcp://10.96.216.252:5432
CPS_PG_REPLICA_PORT_5432_TCP=tcp://10.96.216.252:5432
CPS_PG_REPLICA_PORT_5432_TCP_ADDR=10.96.216.252
CPS_PG_REPLICA_PORT_5432_TCP_PORT=5432
CPS_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
CPS_PG_REPLICA_SERVICE_HOST=10.96.216.252
CPS_PG_REPLICA_SERVICE_PORT=5432
CPS_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
CPS_PORT=tcp://10.96.136.241:8080
CPS_PORT_8080_TCP=tcp://10.96.136.241:8080
CPS_PORT_8080_TCP_ADDR=10.96.136.241
CPS_PORT_8080_TCP_PORT=8080
CPS_PORT_8080_TCP_PROTO=tcp
CPS_POSTGRES_PORT=tcp://10.96.69.60:5432
CPS_POSTGRES_PORT_5432_TCP=tcp://10.96.69.60:5432
CPS_POSTGRES_PORT_5432_TCP_ADDR=10.96.69.60
CPS_POSTGRES_PORT_5432_TCP_PORT=5432
CPS_POSTGRES_PORT_5432_TCP_PROTO=tcp
CPS_POSTGRES_SERVICE_HOST=10.96.69.60
CPS_POSTGRES_SERVICE_PORT=5432
CPS_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
CPS_SERVICE_HOST=10.96.136.241
CPS_SERVICE_PORT=8080
CPS_SERVICE_PORT_HTTP=8080
CUB_CLASSPATH=/etc/confluent/docker/docker-utils.jar
DASHBOARD_PORT=tcp://10.96.179.51:8443
DASHBOARD_PORT_8443_TCP=tcp://10.96.179.51:8443
DASHBOARD_PORT_8443_TCP_ADDR=10.96.179.51
DASHBOARD_PORT_8443_TCP_PORT=8443
DASHBOARD_PORT_8443_TCP_PROTO=tcp
DASHBOARD_SERVICE_HOST=10.96.179.51
DASHBOARD_SERVICE_PORT=8443
DASHBOARD_SERVICE_PORT_DASHBOARD=8443
DBC_PG_PRIMARY_PORT=tcp://10.96.217.250:5432
DBC_PG_PRIMARY_PORT_5432_TCP=tcp://10.96.217.250:5432
DBC_PG_PRIMARY_PORT_5432_TCP_ADDR=10.96.217.250
DBC_PG_PRIMARY_PORT_5432_TCP_PORT=5432
DBC_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
DBC_PG_PRIMARY_SERVICE_HOST=10.96.217.250
DBC_PG_PRIMARY_SERVICE_PORT=5432
DBC_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
DBC_PG_REPLICA_PORT=tcp://10.96.37.142:5432
DBC_PG_REPLICA_PORT_5432_TCP=tcp://10.96.37.142:5432
DBC_PG_REPLICA_PORT_5432_TCP_ADDR=10.96.37.142
DBC_PG_REPLICA_PORT_5432_TCP_PORT=5432
DBC_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
DBC_PG_REPLICA_SERVICE_HOST=10.96.37.142
DBC_PG_REPLICA_SERVICE_PORT=5432
DBC_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
DBC_POSTGRES_PORT=tcp://10.96.246.19:5432
DBC_POSTGRES_PORT_5432_TCP=tcp://10.96.246.19:5432
DBC_POSTGRES_PORT_5432_TCP_ADDR=10.96.246.19
DBC_POSTGRES_PORT_5432_TCP_PORT=5432
DBC_POSTGRES_PORT_5432_TCP_PROTO=tcp
DBC_POSTGRES_SERVICE_HOST=10.96.246.19
DBC_POSTGRES_SERVICE_PORT=5432
DBC_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
DCAEMOD_DESIGNTOOL_PORT=tcp://10.96.228.15:8080
DCAEMOD_DESIGNTOOL_PORT_8080_TCP=tcp://10.96.228.15:8080
DCAEMOD_DESIGNTOOL_PORT_8080_TCP_ADDR=10.96.228.15
DCAEMOD_DESIGNTOOL_PORT_8080_TCP_PORT=8080
DCAEMOD_DESIGNTOOL_PORT_8080_TCP_PROTO=tcp
DCAEMOD_DESIGNTOOL_SERVICE_HOST=10.96.228.15
DCAEMOD_DESIGNTOOL_SERVICE_PORT=8080
DCAEMOD_DESIGNTOOL_SERVICE_PORT_HTTP=8080
DCAEMOD_DISTRIBUTOR_API_PORT=tcp://10.96.1.196:8080
DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP=tcp://10.96.1.196:8080
DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP_ADDR=10.96.1.196
DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP_PORT=8080
DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP_PROTO=tcp
DCAEMOD_DISTRIBUTOR_API_SERVICE_HOST=10.96.1.196
DCAEMOD_DISTRIBUTOR_API_SERVICE_PORT=8080
DCAEMOD_DISTRIBUTOR_API_SERVICE_PORT_HTTP=8080
DCAEMOD_GENPROCESSOR_PORT=tcp://10.96.111.144:8080
DCAEMOD_GENPROCESSOR_PORT_8080_TCP=tcp://10.96.111.144:8080
DCAEMOD_GENPROCESSOR_PORT_8080_TCP_ADDR=10.96.111.144
DCAEMOD_GENPROCESSOR_PORT_8080_TCP_PORT=8080
DCAEMOD_GENPROCESSOR_PORT_8080_TCP_PROTO=tcp
DCAEMOD_GENPROCESSOR_SERVICE_HOST=10.96.111.144
DCAEMOD_GENPROCESSOR_SERVICE_PORT=8080
DCAEMOD_GENPROCESSOR_SERVICE_PORT_HTTP=8080
DCAEMOD_HEALTHCHECK_PORT=tcp://10.96.212.144:8080
DCAEMOD_HEALTHCHECK_PORT_8080_TCP=tcp://10.96.212.144:8080
DCAEMOD_HEALTHCHECK_PORT_8080_TCP_ADDR=10.96.212.144
DCAEMOD_HEALTHCHECK_PORT_8080_TCP_PORT=8080
DCAEMOD_HEALTHCHECK_PORT_8080_TCP_PROTO=tcp
DCAEMOD_HEALTHCHECK_SERVICE_HOST=10.96.212.144
DCAEMOD_HEALTHCHECK_SERVICE_PORT=8080
DCAEMOD_HEALTHCHECK_SERVICE_PORT_HTTP=8080
DCAEMOD_NIFI_REGISTRY_PORT=tcp://10.96.192.57:18080
DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP=tcp://10.96.192.57:18080
DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP_ADDR=10.96.192.57
DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP_PORT=18080
DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP_PROTO=tcp
DCAEMOD_NIFI_REGISTRY_SERVICE_HOST=10.96.192.57
DCAEMOD_NIFI_REGISTRY_SERVICE_PORT=18080
DCAEMOD_NIFI_REGISTRY_SERVICE_PORT_HTTP=18080
DCAEMOD_ONBOARDING_API_PORT=tcp://10.96.244.172:8080
DCAEMOD_ONBOARDING_API_PORT_8080_TCP=tcp://10.96.244.172:8080
DCAEMOD_ONBOARDING_API_PORT_8080_TCP_ADDR=10.96.244.172
DCAEMOD_ONBOARDING_API_PORT_8080_TCP_PORT=8080
DCAEMOD_ONBOARDING_API_PORT_8080_TCP_PROTO=tcp
DCAEMOD_ONBOARDING_API_SERVICE_HOST=10.96.244.172
DCAEMOD_ONBOARDING_API_SERVICE_PORT=8080
DCAEMOD_ONBOARDING_API_SERVICE_PORT_HTTP=8080
DCAEMOD_PG_PRIMARY_PORT=tcp://10.96.58.160:5432
DCAEMOD_PG_PRIMARY_PORT_5432_TCP=tcp://10.96.58.160:5432
DCAEMOD_PG_PRIMARY_PORT_5432_TCP_ADDR=10.96.58.160
DCAEMOD_PG_PRIMARY_PORT_5432_TCP_PORT=5432
DCAEMOD_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
DCAEMOD_PG_PRIMARY_SERVICE_HOST=10.96.58.160
DCAEMOD_PG_PRIMARY_SERVICE_PORT=5432
DCAEMOD_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
DCAEMOD_PG_REPLICA_PORT=tcp://10.96.172.41:5432
DCAEMOD_PG_REPLICA_PORT_5432_TCP=tcp://10.96.172.41:5432
DCAEMOD_PG_REPLICA_PORT_5432_TCP_ADDR=10.96.172.41
DCAEMOD_PG_REPLICA_PORT_5432_TCP_PORT=5432
DCAEMOD_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
DCAEMOD_PG_REPLICA_SERVICE_HOST=10.96.172.41
DCAEMOD_PG_REPLICA_SERVICE_PORT=5432
DCAEMOD_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
DCAEMOD_POSTGRES_PORT=tcp://10.96.193.122:5432
DCAEMOD_POSTGRES_PORT_5432_TCP=tcp://10.96.193.122:5432
DCAEMOD_POSTGRES_PORT_5432_TCP_ADDR=10.96.193.122
DCAEMOD_POSTGRES_PORT_5432_TCP_PORT=5432
DCAEMOD_POSTGRES_PORT_5432_TCP_PROTO=tcp
DCAEMOD_POSTGRES_SERVICE_HOST=10.96.193.122
DCAEMOD_POSTGRES_SERVICE_PORT=5432
DCAEMOD_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
DCAEMOD_RUNTIME_API_PORT=tcp://10.96.56.211:9090
DCAEMOD_RUNTIME_API_PORT_9090_TCP=tcp://10.96.56.211:9090
DCAEMOD_RUNTIME_API_PORT_9090_TCP_ADDR=10.96.56.211
DCAEMOD_RUNTIME_API_PORT_9090_TCP_PORT=9090
DCAEMOD_RUNTIME_API_PORT_9090_TCP_PROTO=tcp
DCAEMOD_RUNTIME_API_SERVICE_HOST=10.96.56.211
DCAEMOD_RUNTIME_API_SERVICE_PORT=9090
DCAEMOD_RUNTIME_API_SERVICE_PORT_HTTP=9090
DCAE_CLOUDIFY_MANAGER_PORT=tcp://10.96.100.77:443
DCAE_CLOUDIFY_MANAGER_PORT_443_TCP=tcp://10.96.100.77:443
DCAE_CLOUDIFY_MANAGER_PORT_443_TCP_ADDR=10.96.100.77
DCAE_CLOUDIFY_MANAGER_PORT_443_TCP_PORT=443
DCAE_CLOUDIFY_MANAGER_PORT_443_TCP_PROTO=tcp
DCAE_CLOUDIFY_MANAGER_SERVICE_HOST=10.96.100.77
DCAE_CLOUDIFY_MANAGER_SERVICE_PORT=443
DCAE_CLOUDIFY_MANAGER_SERVICE_PORT_DCAE_CLOUDIFY_MANAGER=443
DCAE_DASHBOARD_PG_PRIMARY_PORT=tcp://10.96.106.166:5432
DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP=tcp://10.96.106.166:5432
DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP_ADDR=10.96.106.166
DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP_PORT=5432
DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
DCAE_DASHBOARD_PG_PRIMARY_SERVICE_HOST=10.96.106.166
DCAE_DASHBOARD_PG_PRIMARY_SERVICE_PORT=5432
DCAE_DASHBOARD_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_DASHBOARD_PG_REPLICA_PORT=tcp://10.96.37.160:5432
DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP=tcp://10.96.37.160:5432
DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP_ADDR=10.96.37.160
DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP_PORT=5432
DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
DCAE_DASHBOARD_PG_REPLICA_SERVICE_HOST=10.96.37.160
DCAE_DASHBOARD_PG_REPLICA_SERVICE_PORT=5432
DCAE_DASHBOARD_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_DASHBOARD_POSTGRES_PORT=tcp://10.96.197.162:5432
DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP=tcp://10.96.197.162:5432
DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP_ADDR=10.96.197.162
DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP_PORT=5432
DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP_PROTO=tcp
DCAE_DASHBOARD_POSTGRES_SERVICE_HOST=10.96.197.162
DCAE_DASHBOARD_POSTGRES_SERVICE_PORT=5432
DCAE_DASHBOARD_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_HEALTHCHECK_PORT=tcp://10.96.33.182:80
DCAE_HEALTHCHECK_PORT_80_TCP=tcp://10.96.33.182:80
DCAE_HEALTHCHECK_PORT_80_TCP_ADDR=10.96.33.182
DCAE_HEALTHCHECK_PORT_80_TCP_PORT=80
DCAE_HEALTHCHECK_PORT_80_TCP_PROTO=tcp
DCAE_HEALTHCHECK_SERVICE_HOST=10.96.33.182
DCAE_HEALTHCHECK_SERVICE_PORT=80
DCAE_HEALTHCHECK_SERVICE_PORT_DCAE_HEALTHCHECK=80
DCAE_HV_VES_COLLECTOR_PORT=tcp://10.96.46.19:6061
DCAE_HV_VES_COLLECTOR_PORT_6061_TCP=tcp://10.96.46.19:6061
DCAE_HV_VES_COLLECTOR_PORT_6061_TCP_ADDR=10.96.46.19
DCAE_HV_VES_COLLECTOR_PORT_6061_TCP_PORT=6061
DCAE_HV_VES_COLLECTOR_PORT_6061_TCP_PROTO=tcp
DCAE_HV_VES_COLLECTOR_SERVICE_HOST=10.96.46.19
DCAE_HV_VES_COLLECTOR_SERVICE_PORT=6061
DCAE_HV_VES_COLLECTOR_SERVICE_PORT_HTTPS_HTTP=6061
DCAE_INV_PG_PRIMARY_PORT=tcp://10.96.141.176:5432
DCAE_INV_PG_PRIMARY_PORT_5432_TCP=tcp://10.96.141.176:5432
DCAE_INV_PG_PRIMARY_PORT_5432_TCP_ADDR=10.96.141.176
DCAE_INV_PG_PRIMARY_PORT_5432_TCP_PORT=5432
DCAE_INV_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
DCAE_INV_PG_PRIMARY_SERVICE_HOST=10.96.141.176
DCAE_INV_PG_PRIMARY_SERVICE_PORT=5432
DCAE_INV_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_INV_PG_REPLICA_PORT=tcp://10.96.98.197:5432
DCAE_INV_PG_REPLICA_PORT_5432_TCP=tcp://10.96.98.197:5432
DCAE_INV_PG_REPLICA_PORT_5432_TCP_ADDR=10.96.98.197
DCAE_INV_PG_REPLICA_PORT_5432_TCP_PORT=5432
DCAE_INV_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
DCAE_INV_PG_REPLICA_SERVICE_HOST=10.96.98.197
DCAE_INV_PG_REPLICA_SERVICE_PORT=5432
DCAE_INV_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_INV_POSTGRES_PORT=tcp://10.96.96.163:5432
DCAE_INV_POSTGRES_PORT_5432_TCP=tcp://10.96.96.163:5432
DCAE_INV_POSTGRES_PORT_5432_TCP_ADDR=10.96.96.163
DCAE_INV_POSTGRES_PORT_5432_TCP_PORT=5432
DCAE_INV_POSTGRES_PORT_5432_TCP_PROTO=tcp
DCAE_INV_POSTGRES_SERVICE_HOST=10.96.96.163
DCAE_INV_POSTGRES_SERVICE_PORT=5432
DCAE_INV_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_MONGOHOST_READ_PORT=tcp://10.96.193.171:27017
DCAE_MONGOHOST_READ_PORT_27017_TCP=tcp://10.96.193.171:27017
DCAE_MONGOHOST_READ_PORT_27017_TCP_ADDR=10.96.193.171
DCAE_MONGOHOST_READ_PORT_27017_TCP_PORT=27017
DCAE_MONGOHOST_READ_PORT_27017_TCP_PROTO=tcp
DCAE_MONGOHOST_READ_SERVICE_HOST=10.96.193.171
DCAE_MONGOHOST_READ_SERVICE_PORT=27017
DCAE_MONGOHOST_READ_SERVICE_PORT_MONGO=27017
DCAE_MS_HEALTHCHECK_PORT=tcp://10.96.119.80:8080
DCAE_MS_HEALTHCHECK_PORT_8080_TCP=tcp://10.96.119.80:8080
DCAE_MS_HEALTHCHECK_PORT_8080_TCP_ADDR=10.96.119.80
DCAE_MS_HEALTHCHECK_PORT_8080_TCP_PORT=8080
DCAE_MS_HEALTHCHECK_PORT_8080_TCP_PROTO=tcp
DCAE_MS_HEALTHCHECK_SERVICE_HOST=10.96.119.80
DCAE_MS_HEALTHCHECK_SERVICE_PORT=8080
DCAE_MS_HEALTHCHECK_SERVICE_PORT_HTTP=8080
DCAE_PG_PRIMARY_PORT=tcp://10.96.132.51:5432
DCAE_PG_PRIMARY_PORT_5432_TCP=tcp://10.96.132.51:5432
DCAE_PG_PRIMARY_PORT_5432_TCP_ADDR=10.96.132.51
DCAE_PG_PRIMARY_PORT_5432_TCP_PORT=5432
DCAE_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
DCAE_PG_PRIMARY_SERVICE_HOST=10.96.132.51
DCAE_PG_PRIMARY_SERVICE_PORT=5432
DCAE_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_PG_REPLICA_PORT=tcp://10.96.242.30:5432
DCAE_PG_REPLICA_PORT_5432_TCP=tcp://10.96.242.30:5432
DCAE_PG_REPLICA_PORT_5432_TCP_ADDR=10.96.242.30
DCAE_PG_REPLICA_PORT_5432_TCP_PORT=5432
DCAE_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
DCAE_PG_REPLICA_SERVICE_HOST=10.96.242.30
DCAE_PG_REPLICA_SERVICE_PORT=5432
DCAE_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_POSTGRES_PORT=tcp://10.96.39.100:5432
DCAE_POSTGRES_PORT_5432_TCP=tcp://10.96.39.100:5432
DCAE_POSTGRES_PORT_5432_TCP_ADDR=10.96.39.100
DCAE_POSTGRES_PORT_5432_TCP_PORT=5432
DCAE_POSTGRES_PORT_5432_TCP_PROTO=tcp
DCAE_POSTGRES_SERVICE_HOST=10.96.39.100
DCAE_POSTGRES_SERVICE_PORT=5432
DCAE_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_PRH_PORT=tcp://10.96.125.28:8100
DCAE_PRH_PORT_8100_TCP=tcp://10.96.125.28:8100
DCAE_PRH_PORT_8100_TCP_ADDR=10.96.125.28
DCAE_PRH_PORT_8100_TCP_PORT=8100
DCAE_PRH_PORT_8100_TCP_PROTO=tcp
DCAE_PRH_SERVICE_HOST=10.96.125.28
DCAE_PRH_SERVICE_PORT=8100
DCAE_PRH_SERVICE_PORT_HTTP=8100
DCAE_TCAGEN2_PORT=tcp://10.96.179.173:9091
DCAE_TCAGEN2_PORT_9091_TCP=tcp://10.96.179.173:9091
DCAE_TCAGEN2_PORT_9091_TCP_ADDR=10.96.179.173
DCAE_TCAGEN2_PORT_9091_TCP_PORT=9091
DCAE_TCAGEN2_PORT_9091_TCP_PROTO=tcp
DCAE_TCAGEN2_SERVICE_HOST=10.96.179.173
DCAE_TCAGEN2_SERVICE_PORT=9091
DCAE_TCAGEN2_SERVICE_PORT_HTTP=9091
DCAE_VES_COLLECTOR_PORT=tcp://10.96.214.137:8443
DCAE_VES_COLLECTOR_PORT_8443_TCP=tcp://10.96.214.137:8443
DCAE_VES_COLLECTOR_PORT_8443_TCP_ADDR=10.96.214.137
DCAE_VES_COLLECTOR_PORT_8443_TCP_PORT=8443
DCAE_VES_COLLECTOR_PORT_8443_TCP_PROTO=tcp
DCAE_VES_COLLECTOR_SERVICE_HOST=10.96.214.137
DCAE_VES_COLLECTOR_SERVICE_PORT=8443
DCAE_VES_COLLECTOR_SERVICE_PORT_HTTPS_HTTP=8443
DEPLOYMENT_HANDLER_PORT=tcp://10.96.63.195:8443
DEPLOYMENT_HANDLER_PORT_8443_TCP=tcp://10.96.63.195:8443
DEPLOYMENT_HANDLER_PORT_8443_TCP_ADDR=10.96.63.195
DEPLOYMENT_HANDLER_PORT_8443_TCP_PORT=8443
DEPLOYMENT_HANDLER_PORT_8443_TCP_PROTO=tcp
DEPLOYMENT_HANDLER_SERVICE_HOST=10.96.63.195
DEPLOYMENT_HANDLER_SERVICE_PORT=8443
DEPLOYMENT_HANDLER_SERVICE_PORT_DEPLOYMENT_HANDLER=8443
DEV_APPC_DB_METRICS_PORT=tcp://10.96.12.49:9104
DEV_APPC_DB_METRICS_PORT_9104_TCP=tcp://10.96.12.49:9104
DEV_APPC_DB_METRICS_PORT_9104_TCP_ADDR=10.96.12.49
DEV_APPC_DB_METRICS_PORT_9104_TCP_PORT=9104
DEV_APPC_DB_METRICS_PORT_9104_TCP_PROTO=tcp
DEV_APPC_DB_METRICS_SERVICE_HOST=10.96.12.49
DEV_APPC_DB_METRICS_SERVICE_PORT=9104
DEV_APPC_DB_METRICS_SERVICE_PORT_METRICS=9104
DEV_CDS_DB_METRICS_PORT=tcp://10.96.173.18:9104
DEV_CDS_DB_METRICS_PORT_9104_TCP=tcp://10.96.173.18:9104
DEV_CDS_DB_METRICS_PORT_9104_TCP_ADDR=10.96.173.18
DEV_CDS_DB_METRICS_PORT_9104_TCP_PORT=9104
DEV_CDS_DB_METRICS_PORT_9104_TCP_PROTO=tcp
DEV_CDS_DB_METRICS_SERVICE_HOST=10.96.173.18
DEV_CDS_DB_METRICS_SERVICE_PORT=9104
DEV_CDS_DB_METRICS_SERVICE_PORT_METRICS=9104
DEV_CMSO_DB_METRICS_PORT=tcp://10.96.48.57:9104
DEV_CMSO_DB_METRICS_PORT_9104_TCP=tcp://10.96.48.57:9104
DEV_CMSO_DB_METRICS_PORT_9104_TCP_ADDR=10.96.48.57
DEV_CMSO_DB_METRICS_PORT_9104_TCP_PORT=9104
DEV_CMSO_DB_METRICS_PORT_9104_TCP_PROTO=tcp
DEV_CMSO_DB_METRICS_SERVICE_HOST=10.96.48.57
DEV_CMSO_DB_METRICS_SERVICE_PORT=9104
DEV_CMSO_DB_METRICS_SERVICE_PORT_METRICS=9104
DEV_DMAAP_DR_DB_METRICS_PORT=tcp://10.96.176.235:9104
DEV_DMAAP_DR_DB_METRICS_PORT_9104_TCP=tcp://10.96.176.235:9104
DEV_DMAAP_DR_DB_METRICS_PORT_9104_TCP_ADDR=10.96.176.235
DEV_DMAAP_DR_DB_METRICS_PORT_9104_TCP_PORT=9104
DEV_DMAAP_DR_DB_METRICS_PORT_9104_TCP_PROTO=tcp
DEV_DMAAP_DR_DB_METRICS_SERVICE_HOST=10.96.176.235
DEV_DMAAP_DR_DB_METRICS_SERVICE_PORT=9104
DEV_DMAAP_DR_DB_METRICS_SERVICE_PORT_METRICS=9104
DEV_MARIADB_GALERA_METRICS_PORT=tcp://10.96.201.132:9104
DEV_MARIADB_GALERA_METRICS_PORT_9104_TCP=tcp://10.96.201.132:9104
DEV_MARIADB_GALERA_METRICS_PORT_9104_TCP_ADDR=10.96.201.132
DEV_MARIADB_GALERA_METRICS_PORT_9104_TCP_PORT=9104
DEV_MARIADB_GALERA_METRICS_PORT_9104_TCP_PROTO=tcp
DEV_MARIADB_GALERA_METRICS_SERVICE_HOST=10.96.201.132
DEV_MARIADB_GALERA_METRICS_SERVICE_PORT=9104
DEV_MARIADB_GALERA_METRICS_SERVICE_PORT_METRICS=9104
DMAAP_BC_PORT=tcp://10.96.69.65:8443
DMAAP_BC_PORT_8443_TCP=tcp://10.96.69.65:8443
DMAAP_BC_PORT_8443_TCP_ADDR=10.96.69.65
DMAAP_BC_PORT_8443_TCP_PORT=8443
DMAAP_BC_PORT_8443_TCP_PROTO=tcp
DMAAP_BC_SERVICE_HOST=10.96.69.65
DMAAP_BC_SERVICE_PORT=8443
DMAAP_BC_SERVICE_PORT_HTTPS_API=8443
DMAAP_DR_DB_PORT=tcp://10.96.223.211:3306
DMAAP_DR_DB_PORT_3306_TCP=tcp://10.96.223.211:3306
DMAAP_DR_DB_PORT_3306_TCP_ADDR=10.96.223.211
DMAAP_DR_DB_PORT_3306_TCP_PORT=3306
DMAAP_DR_DB_PORT_3306_TCP_PROTO=tcp
DMAAP_DR_DB_SERVICE_HOST=10.96.223.211
DMAAP_DR_DB_SERVICE_PORT=3306
DMAAP_DR_DB_SERVICE_PORT_MYSQL=3306
DMAAP_DR_NODE_EXTERNAL_PORT=tcp://10.96.144.118:8443
DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP=tcp://10.96.144.118:8443
DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_ADDR=10.96.144.118
DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_PORT=8443
DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_PROTO=tcp
DMAAP_DR_NODE_EXTERNAL_SERVICE_HOST=10.96.144.118
DMAAP_DR_NODE_EXTERNAL_SERVICE_PORT=8443
DMAAP_DR_NODE_EXTERNAL_SERVICE_PORT_HTTPS_API=8443
DMAAP_DR_NODE_PORT=tcp://10.96.111.165:8443
DMAAP_DR_NODE_PORT_8080_TCP=tcp://10.96.111.165:8080
DMAAP_DR_NODE_PORT_8080_TCP_ADDR=10.96.111.165
DMAAP_DR_NODE_PORT_8080_TCP_PORT=8080
DMAAP_DR_NODE_PORT_8080_TCP_PROTO=tcp
DMAAP_DR_NODE_PORT_8443_TCP=tcp://10.96.111.165:8443
DMAAP_DR_NODE_PORT_8443_TCP_ADDR=10.96.111.165
DMAAP_DR_NODE_PORT_8443_TCP_PORT=8443
DMAAP_DR_NODE_PORT_8443_TCP_PROTO=tcp
DMAAP_DR_NODE_SERVICE_HOST=10.96.111.165
DMAAP_DR_NODE_SERVICE_PORT=8443
DMAAP_DR_NODE_SERVICE_PORT_HTTPS_API=8443
DMAAP_DR_NODE_SERVICE_PORT_HTTP_API=8080
DMAAP_DR_PROV_PORT=tcp://10.96.41.53:443
DMAAP_DR_PROV_PORT_443_TCP=tcp://10.96.41.53:443
DMAAP_DR_PROV_PORT_443_TCP_ADDR=10.96.41.53
DMAAP_DR_PROV_PORT_443_TCP_PORT=443
DMAAP_DR_PROV_PORT_443_TCP_PROTO=tcp
DMAAP_DR_PROV_SERVICE_HOST=10.96.41.53
DMAAP_DR_PROV_SERVICE_PORT=443
DMAAP_DR_PROV_SERVICE_PORT_DR_PROV_PORT2=443
EJBCA_PORT=tcp://10.96.8.204:8443
EJBCA_PORT_8080_TCP=tcp://10.96.8.204:8080
EJBCA_PORT_8080_TCP_ADDR=10.96.8.204
EJBCA_PORT_8080_TCP_PORT=8080
EJBCA_PORT_8080_TCP_PROTO=tcp
EJBCA_PORT_8443_TCP=tcp://10.96.8.204:8443
EJBCA_PORT_8443_TCP_ADDR=10.96.8.204
EJBCA_PORT_8443_TCP_PORT=8443
EJBCA_PORT_8443_TCP_PROTO=tcp
EJBCA_SERVICE_HOST=10.96.8.204
EJBCA_SERVICE_PORT=8443
EJBCA_SERVICE_PORT_HTTPS_API=8443
EJBCA_SERVICE_PORT_HTTP_API=8080
ESR_GUI_PORT=tcp://10.96.180.125:8080
ESR_GUI_PORT_8080_TCP=tcp://10.96.180.125:8080
ESR_GUI_PORT_8080_TCP_ADDR=10.96.180.125
ESR_GUI_PORT_8080_TCP_PORT=8080
ESR_GUI_PORT_8080_TCP_PROTO=tcp
ESR_GUI_SERVICE_HOST=10.96.180.125
ESR_GUI_SERVICE_PORT=8080
ESR_GUI_SERVICE_PORT_ESR_GUI=8080
ESR_SERVER_PORT=tcp://10.96.67.233:9518
ESR_SERVER_PORT_9518_TCP=tcp://10.96.67.233:9518
ESR_SERVER_PORT_9518_TCP_ADDR=10.96.67.233
ESR_SERVER_PORT_9518_TCP_PORT=9518
ESR_SERVER_PORT_9518_TCP_PROTO=tcp
ESR_SERVER_SERVICE_HOST=10.96.67.233
ESR_SERVER_SERVICE_PORT=9518
ESR_SERVER_SERVICE_PORT_ESR_SERVER=9518
HOLMES_ENGINE_MGMT_PORT=tcp://10.96.153.22:9102
HOLMES_ENGINE_MGMT_PORT_9102_TCP=tcp://10.96.153.22:9102
HOLMES_ENGINE_MGMT_PORT_9102_TCP_ADDR=10.96.153.22
HOLMES_ENGINE_MGMT_PORT_9102_TCP_PORT=9102
HOLMES_ENGINE_MGMT_PORT_9102_TCP_PROTO=tcp
HOLMES_ENGINE_MGMT_SERVICE_HOST=10.96.153.22
HOLMES_ENGINE_MGMT_SERVICE_PORT=9102
HOLMES_ENGINE_MGMT_SERVICE_PORT_HTTPS_REST=9102
HOLMES_POSTGRES_PORT=tcp://10.96.41.33:5432
HOLMES_POSTGRES_PORT_5432_TCP=tcp://10.96.41.33:5432
HOLMES_POSTGRES_PORT_5432_TCP_ADDR=10.96.41.33
HOLMES_POSTGRES_PORT_5432_TCP_PORT=5432
HOLMES_POSTGRES_PORT_5432_TCP_PROTO=tcp
HOLMES_POSTGRES_PRIMARY_PORT=tcp://10.96.40.131:5432
HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP=tcp://10.96.40.131:5432
HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_ADDR=10.96.40.131
HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_PORT=5432
HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_PROTO=tcp
HOLMES_POSTGRES_PRIMARY_SERVICE_HOST=10.96.40.131
HOLMES_POSTGRES_PRIMARY_SERVICE_PORT=5432
HOLMES_POSTGRES_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
HOLMES_POSTGRES_REPLICA_PORT=tcp://10.96.120.121:5432
HOLMES_POSTGRES_REPLICA_PORT_5432_TCP=tcp://10.96.120.121:5432
HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_ADDR=10.96.120.121
HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_PORT=5432
HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_PROTO=tcp
HOLMES_POSTGRES_REPLICA_SERVICE_HOST=10.96.120.121
HOLMES_POSTGRES_REPLICA_SERVICE_PORT=5432
HOLMES_POSTGRES_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
HOLMES_POSTGRES_SERVICE_HOST=10.96.41.33
HOLMES_POSTGRES_SERVICE_PORT=5432
HOLMES_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
HOLMES_RULE_MGMT_PORT=tcp://10.96.232.22:9101
HOLMES_RULE_MGMT_PORT_9101_TCP=tcp://10.96.232.22:9101
HOLMES_RULE_MGMT_PORT_9101_TCP_ADDR=10.96.232.22
HOLMES_RULE_MGMT_PORT_9101_TCP_PORT=9101
HOLMES_RULE_MGMT_PORT_9101_TCP_PROTO=tcp
HOLMES_RULE_MGMT_PORT_9104_TCP=tcp://10.96.232.22:9104
HOLMES_RULE_MGMT_PORT_9104_TCP_ADDR=10.96.232.22
HOLMES_RULE_MGMT_PORT_9104_TCP_PORT=9104
HOLMES_RULE_MGMT_PORT_9104_TCP_PROTO=tcp
HOLMES_RULE_MGMT_SERVICE_HOST=10.96.232.22
HOLMES_RULE_MGMT_SERVICE_PORT=9101
HOLMES_RULE_MGMT_SERVICE_PORT_HTTPS_REST=9101
HOLMES_RULE_MGMT_SERVICE_PORT_HTTPS_UI=9104
HOME=/home/mrkafka
HOSTNAME=dev-message-router-kafka-2
HOST_IP=172.16.10.172
INVENTORY_PORT=tcp://10.96.105.10:8080
INVENTORY_PORT_8080_TCP=tcp://10.96.105.10:8080
INVENTORY_PORT_8080_TCP_ADDR=10.96.105.10
INVENTORY_PORT_8080_TCP_PORT=8080
INVENTORY_PORT_8080_TCP_PROTO=tcp
INVENTORY_SERVICE_HOST=10.96.105.10
INVENTORY_SERVICE_PORT=8080
INVENTORY_SERVICE_PORT_INVENTORY=8080
KAFKA_ADVERTISED_LISTENERS=EXTERNAL_SASL_PLAINTEXT://172.16.10.172:30492,INTERNAL_SASL_PLAINTEXT://:9092
KAFKA_AUTHORIZER_CLASS_NAME=org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer
KAFKA_BROKER_ID=2
KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE=false
KAFKA_DEFAULT_REPLICATION_FACTOR=3
KAFKA_INTER_BROKER_LISTENER_NAME=INTERNAL_SASL_PLAINTEXT
KAFKA_JMX_PORT=5555
KAFKA_LISTENERS=EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT
KAFKA_LOG_DIRS=/var/lib/kafka/data
KAFKA_LOG_RETENTION_HOURS=168
KAFKA_NUM_PARTITIONS=3
KAFKA_NUM_RECOVERY_THREADS_PER_DATA_DIR=5
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=3
KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/secrets/jaas/kafka_server_jaas.conf
KAFKA_SASL_ENABLED_MECHANISMS=PLAIN
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL=PLAIN
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR=1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR=1
KAFKA_USER=mrkafka
KAFKA_VERSION=5.3.1
KAFKA_ZOOKEEPER_CONNECT=dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181
KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS=6000
KAFKA_ZOOKEEPER_SET_ACL=true
KUBERNETES_PORT=tcp://10.96.0.1:443
KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443
KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1
KUBERNETES_PORT_443_TCP_PORT=443
KUBERNETES_PORT_443_TCP_PROTO=tcp
KUBERNETES_SERVICE_HOST=10.96.0.1
KUBERNETES_SERVICE_PORT=443
KUBERNETES_SERVICE_PORT_HTTPS=443
LANG=C.UTF-8
MARIADB_GALERA_PORT=tcp://10.96.77.241:3306
MARIADB_GALERA_PORT_3306_TCP=tcp://10.96.77.241:3306
MARIADB_GALERA_PORT_3306_TCP_ADDR=10.96.77.241
MARIADB_GALERA_PORT_3306_TCP_PORT=3306
MARIADB_GALERA_PORT_3306_TCP_PROTO=tcp
MARIADB_GALERA_SERVICE_HOST=10.96.77.241
MARIADB_GALERA_SERVICE_PORT=3306
MARIADB_GALERA_SERVICE_PORT_MYSQL=3306
MESSAGE_ROUTER_EXTERNAL_PORT=tcp://10.96.9.166:3905
MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP=tcp://10.96.9.166:3905
MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_ADDR=10.96.9.166
MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_PORT=3905
MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_PROTO=tcp
MESSAGE_ROUTER_EXTERNAL_SERVICE_HOST=10.96.9.166
MESSAGE_ROUTER_EXTERNAL_SERVICE_PORT=3905
MESSAGE_ROUTER_EXTERNAL_SERVICE_PORT_HTTPS_API=3905
MESSAGE_ROUTER_KAFKA_0_PORT=tcp://10.96.166.165:9091
MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP=tcp://10.96.166.165:9091
MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_ADDR=10.96.166.165
MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_PORT=9091
MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_PROTO=tcp
MESSAGE_ROUTER_KAFKA_0_SERVICE_HOST=10.96.166.165
MESSAGE_ROUTER_KAFKA_0_SERVICE_PORT=9091
MESSAGE_ROUTER_KAFKA_0_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_0=9091
MESSAGE_ROUTER_KAFKA_1_PORT=tcp://10.96.230.27:9091
MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP=tcp://10.96.230.27:9091
MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_ADDR=10.96.230.27
MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_PORT=9091
MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_PROTO=tcp
MESSAGE_ROUTER_KAFKA_1_SERVICE_HOST=10.96.230.27
MESSAGE_ROUTER_KAFKA_1_SERVICE_PORT=9091
MESSAGE_ROUTER_KAFKA_1_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_1=9091
MESSAGE_ROUTER_KAFKA_2_PORT=tcp://10.96.243.88:9091
MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP=tcp://10.96.243.88:9091
MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_ADDR=10.96.243.88
MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_PORT=9091
MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_PROTO=tcp
MESSAGE_ROUTER_KAFKA_2_SERVICE_HOST=10.96.243.88
MESSAGE_ROUTER_KAFKA_2_SERVICE_PORT=9091
MESSAGE_ROUTER_KAFKA_2_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_2=9091
MESSAGE_ROUTER_PORT=tcp://10.96.114.131:3905
MESSAGE_ROUTER_PORT_3904_TCP=tcp://10.96.114.131:3904
MESSAGE_ROUTER_PORT_3904_TCP_ADDR=10.96.114.131
MESSAGE_ROUTER_PORT_3904_TCP_PORT=3904
MESSAGE_ROUTER_PORT_3904_TCP_PROTO=tcp
MESSAGE_ROUTER_PORT_3905_TCP=tcp://10.96.114.131:3905
MESSAGE_ROUTER_PORT_3905_TCP_ADDR=10.96.114.131
MESSAGE_ROUTER_PORT_3905_TCP_PORT=3905
MESSAGE_ROUTER_PORT_3905_TCP_PROTO=tcp
MESSAGE_ROUTER_SERVICE_HOST=10.96.114.131
MESSAGE_ROUTER_SERVICE_PORT=3905
MESSAGE_ROUTER_SERVICE_PORT_HTTPS_API=3905
MESSAGE_ROUTER_SERVICE_PORT_HTTP_API=3904
MODELING_ETSICATALOG_PORT=tcp://10.96.68.162:8806
MODELING_ETSICATALOG_PORT_8806_TCP=tcp://10.96.68.162:8806
MODELING_ETSICATALOG_PORT_8806_TCP_ADDR=10.96.68.162
MODELING_ETSICATALOG_PORT_8806_TCP_PORT=8806
MODELING_ETSICATALOG_PORT_8806_TCP_PROTO=tcp
MODELING_ETSICATALOG_SERVICE_HOST=10.96.68.162
MODELING_ETSICATALOG_SERVICE_PORT=8806
MODELING_ETSICATALOG_SERVICE_PORT_MODELING_ETSICATALOG=8806
MSB_CONSUL_PORT=tcp://10.96.17.163:8500
MSB_CONSUL_PORT_8500_TCP=tcp://10.96.17.163:8500
MSB_CONSUL_PORT_8500_TCP_ADDR=10.96.17.163
MSB_CONSUL_PORT_8500_TCP_PORT=8500
MSB_CONSUL_PORT_8500_TCP_PROTO=tcp
MSB_CONSUL_SERVICE_HOST=10.96.17.163
MSB_CONSUL_SERVICE_PORT=8500
MSB_CONSUL_SERVICE_PORT_HTTP_MSB_CONSUL=8500
MSB_DISCOVERY_PORT=tcp://10.96.103.17:10081
MSB_DISCOVERY_PORT_10081_TCP=tcp://10.96.103.17:10081
MSB_DISCOVERY_PORT_10081_TCP_ADDR=10.96.103.17
MSB_DISCOVERY_PORT_10081_TCP_PORT=10081
MSB_DISCOVERY_PORT_10081_TCP_PROTO=tcp
MSB_DISCOVERY_SERVICE_HOST=10.96.103.17
MSB_DISCOVERY_SERVICE_PORT=10081
MSB_DISCOVERY_SERVICE_PORT_HTTP_MSB_DISCOVERY=10081
MSB_EAG_PORT=tcp://10.96.43.155:443
MSB_EAG_PORT_443_TCP=tcp://10.96.43.155:443
MSB_EAG_PORT_443_TCP_ADDR=10.96.43.155
MSB_EAG_PORT_443_TCP_PORT=443
MSB_EAG_PORT_443_TCP_PROTO=tcp
MSB_EAG_SERVICE_HOST=10.96.43.155
MSB_EAG_SERVICE_PORT=443
MSB_EAG_SERVICE_PORT_HTTPS_MSB_EAG=443
MSB_IAG_PORT=tcp://10.96.180.18:443
MSB_IAG_PORT_443_TCP=tcp://10.96.180.18:443
MSB_IAG_PORT_443_TCP_ADDR=10.96.180.18
MSB_IAG_PORT_443_TCP_PORT=443
MSB_IAG_PORT_443_TCP_PROTO=tcp
MSB_IAG_SERVICE_HOST=10.96.180.18
MSB_IAG_SERVICE_PORT=443
MSB_IAG_SERVICE_PORT_HTTPS_MSB_IAG=443
MULTICLOUD_FCAPS_PORT=tcp://10.96.163.89:9011
MULTICLOUD_FCAPS_PORT_9011_TCP=tcp://10.96.163.89:9011
MULTICLOUD_FCAPS_PORT_9011_TCP_ADDR=10.96.163.89
MULTICLOUD_FCAPS_PORT_9011_TCP_PORT=9011
MULTICLOUD_FCAPS_PORT_9011_TCP_PROTO=tcp
MULTICLOUD_FCAPS_SERVICE_HOST=10.96.163.89
MULTICLOUD_FCAPS_SERVICE_PORT=9011
MULTICLOUD_FCAPS_SERVICE_PORT_MULTICLOUD_FCAPS=9011
MULTICLOUD_FRAMEWORK_PORT=tcp://10.96.144.97:9001
MULTICLOUD_FRAMEWORK_PORT_9001_TCP=tcp://10.96.144.97:9001
MULTICLOUD_FRAMEWORK_PORT_9001_TCP_ADDR=10.96.144.97
MULTICLOUD_FRAMEWORK_PORT_9001_TCP_PORT=9001
MULTICLOUD_FRAMEWORK_PORT_9001_TCP_PROTO=tcp
MULTICLOUD_FRAMEWORK_SERVICE_HOST=10.96.144.97
MULTICLOUD_FRAMEWORK_SERVICE_PORT=9001
MULTICLOUD_FRAMEWORK_SERVICE_PORT_MULTICLOUD_FRAMEWORK=9001
MULTICLOUD_K8S_MONGO_READ_PORT=tcp://10.96.28.220:27017
MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP=tcp://10.96.28.220:27017
MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_ADDR=10.96.28.220
MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_PORT=27017
MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_PROTO=tcp
MULTICLOUD_K8S_MONGO_READ_SERVICE_HOST=10.96.28.220
MULTICLOUD_K8S_MONGO_READ_SERVICE_PORT=27017
MULTICLOUD_K8S_MONGO_READ_SERVICE_PORT_MONGO=27017
MULTICLOUD_K8S_PORT=tcp://10.96.40.33:9015
MULTICLOUD_K8S_PORT_9015_TCP=tcp://10.96.40.33:9015
MULTICLOUD_K8S_PORT_9015_TCP_ADDR=10.96.40.33
MULTICLOUD_K8S_PORT_9015_TCP_PORT=9015
MULTICLOUD_K8S_PORT_9015_TCP_PROTO=tcp
MULTICLOUD_K8S_SERVICE_HOST=10.96.40.33
MULTICLOUD_K8S_SERVICE_PORT=9015
MULTICLOUD_PIKE_PORT=tcp://10.96.227.167:9007
MULTICLOUD_PIKE_PORT_9007_TCP=tcp://10.96.227.167:9007
MULTICLOUD_PIKE_PORT_9007_TCP_ADDR=10.96.227.167
MULTICLOUD_PIKE_PORT_9007_TCP_PORT=9007
MULTICLOUD_PIKE_PORT_9007_TCP_PROTO=tcp
MULTICLOUD_PIKE_SERVICE_HOST=10.96.227.167
MULTICLOUD_PIKE_SERVICE_PORT=9007
MULTICLOUD_PIKE_SERVICE_PORT_MULTICLOUD_PIKE=9007
MULTICLOUD_STARLINGX_PORT=tcp://10.96.151.62:9009
MULTICLOUD_STARLINGX_PORT_9009_TCP=tcp://10.96.151.62:9009
MULTICLOUD_STARLINGX_PORT_9009_TCP_ADDR=10.96.151.62
MULTICLOUD_STARLINGX_PORT_9009_TCP_PORT=9009
MULTICLOUD_STARLINGX_PORT_9009_TCP_PROTO=tcp
MULTICLOUD_STARLINGX_SERVICE_HOST=10.96.151.62
MULTICLOUD_STARLINGX_SERVICE_PORT=9009
MULTICLOUD_STARLINGX_SERVICE_PORT_MULTICLOUD_STARLINGX=9009
MULTICLOUD_TITANIUMCLOUD_PORT=tcp://10.96.28.14:9005
MULTICLOUD_TITANIUMCLOUD_PORT_9005_TCP=tcp://10.96.28.14:9005
MULTICLOUD_TITANIUMCLOUD_PORT_9005_TCP_ADDR=10.96.28.14
MULTICLOUD_TITANIUMCLOUD_PORT_9005_TCP_PORT=9005
MULTICLOUD_TITANIUMCLOUD_PORT_9005_TCP_PROTO=tcp
MULTICLOUD_TITANIUMCLOUD_SERVICE_HOST=10.96.28.14
MULTICLOUD_TITANIUMCLOUD_SERVICE_PORT=9005
MULTICLOUD_TITANIUMCLOUD_SERVICE_PORT_MULTICLOUD_TITANIUMCLOUD=9005
MULTICLOUD_VIO_PORT=tcp://10.96.15.57:9004
MULTICLOUD_VIO_PORT_9004_TCP=tcp://10.96.15.57:9004
MULTICLOUD_VIO_PORT_9004_TCP_ADDR=10.96.15.57
MULTICLOUD_VIO_PORT_9004_TCP_PORT=9004
MULTICLOUD_VIO_PORT_9004_TCP_PROTO=tcp
MULTICLOUD_VIO_SERVICE_HOST=10.96.15.57
MULTICLOUD_VIO_SERVICE_PORT=9004
MULTICLOUD_VIO_SERVICE_PORT_MULTICLOUD_VIO=9004
NBI_MONGOHOST_READ_PORT=tcp://10.96.55.85:27017
NBI_MONGOHOST_READ_PORT_27017_TCP=tcp://10.96.55.85:27017
NBI_MONGOHOST_READ_PORT_27017_TCP_ADDR=10.96.55.85
NBI_MONGOHOST_READ_PORT_27017_TCP_PORT=27017
NBI_MONGOHOST_READ_PORT_27017_TCP_PROTO=tcp
NBI_MONGOHOST_READ_SERVICE_HOST=10.96.55.85
NBI_MONGOHOST_READ_SERVICE_PORT=27017
NBI_MONGOHOST_READ_SERVICE_PORT_MONGO=27017
NBI_PORT=tcp://10.96.84.186:8443
NBI_PORT_8443_TCP=tcp://10.96.84.186:8443
NBI_PORT_8443_TCP_ADDR=10.96.84.186
NBI_PORT_8443_TCP_PORT=8443
NBI_PORT_8443_TCP_PROTO=tcp
NBI_SERVICE_HOST=10.96.84.186
NBI_SERVICE_PORT=8443
NBI_SERVICE_PORT_API_8443=8443
NETBOX_APP_PORT=tcp://10.96.196.95:8001
NETBOX_APP_PORT_8001_TCP=tcp://10.96.196.95:8001
NETBOX_APP_PORT_8001_TCP_ADDR=10.96.196.95
NETBOX_APP_PORT_8001_TCP_PORT=8001
NETBOX_APP_PORT_8001_TCP_PROTO=tcp
NETBOX_APP_SERVICE_HOST=10.96.196.95
NETBOX_APP_SERVICE_PORT=8001
NETBOX_APP_SERVICE_PORT_NETBOX_APP=8001
NETBOX_NGINX_PORT=tcp://10.96.176.112:8080
NETBOX_NGINX_PORT_8080_TCP=tcp://10.96.176.112:8080
NETBOX_NGINX_PORT_8080_TCP_ADDR=10.96.176.112
NETBOX_NGINX_PORT_8080_TCP_PORT=8080
NETBOX_NGINX_PORT_8080_TCP_PROTO=tcp
NETBOX_NGINX_SERVICE_HOST=10.96.176.112
NETBOX_NGINX_SERVICE_PORT=8080
NETBOX_POSTGRES_PORT=tcp://10.96.234.43:5432
NETBOX_POSTGRES_PORT_5432_TCP=tcp://10.96.234.43:5432
NETBOX_POSTGRES_PORT_5432_TCP_ADDR=10.96.234.43
NETBOX_POSTGRES_PORT_5432_TCP_PORT=5432
NETBOX_POSTGRES_PORT_5432_TCP_PROTO=tcp
NETBOX_POSTGRES_SERVICE_HOST=10.96.234.43
NETBOX_POSTGRES_SERVICE_PORT=5432
NETBOX_POSTGRES_SERVICE_PORT_NETBOX_POSTGRES=5432
OOF_CMSO_OPTIMIZER_PORT=tcp://10.96.139.6:7997
OOF_CMSO_OPTIMIZER_PORT_7997_TCP=tcp://10.96.139.6:7997
OOF_CMSO_OPTIMIZER_PORT_7997_TCP_ADDR=10.96.139.6
OOF_CMSO_OPTIMIZER_PORT_7997_TCP_PORT=7997
OOF_CMSO_OPTIMIZER_PORT_7997_TCP_PROTO=tcp
OOF_CMSO_OPTIMIZER_SERVICE_HOST=10.96.139.6
OOF_CMSO_OPTIMIZER_SERVICE_PORT=7997
OOF_CMSO_OPTIMIZER_SERVICE_PORT_CMSO=7997
OOF_CMSO_PORT=tcp://10.96.187.99:8080
OOF_CMSO_PORT_8080_TCP=tcp://10.96.187.99:8080
OOF_CMSO_PORT_8080_TCP_ADDR=10.96.187.99
OOF_CMSO_PORT_8080_TCP_PORT=8080
OOF_CMSO_PORT_8080_TCP_PROTO=tcp
OOF_CMSO_SERVICE_HOST=10.96.187.99
OOF_CMSO_SERVICE_PORT=8080
OOF_CMSO_SERVICE_PORT_CMSO=8080
OOF_CMSO_TICKETMGT_PORT=tcp://10.96.20.57:7999
OOF_CMSO_TICKETMGT_PORT_7999_TCP=tcp://10.96.20.57:7999
OOF_CMSO_TICKETMGT_PORT_7999_TCP_ADDR=10.96.20.57
OOF_CMSO_TICKETMGT_PORT_7999_TCP_PORT=7999
OOF_CMSO_TICKETMGT_PORT_7999_TCP_PROTO=tcp
OOF_CMSO_TICKETMGT_SERVICE_HOST=10.96.20.57
OOF_CMSO_TICKETMGT_SERVICE_PORT=7999
OOF_CMSO_TICKETMGT_SERVICE_PORT_CMSO_TICKETMGT=7999
OOF_CMSO_TOPOLOGY_PORT=tcp://10.96.233.25:7998
OOF_CMSO_TOPOLOGY_PORT_7998_TCP=tcp://10.96.233.25:7998
OOF_CMSO_TOPOLOGY_PORT_7998_TCP_ADDR=10.96.233.25
OOF_CMSO_TOPOLOGY_PORT_7998_TCP_PORT=7998
OOF_CMSO_TOPOLOGY_PORT_7998_TCP_PROTO=tcp
OOF_CMSO_TOPOLOGY_SERVICE_HOST=10.96.233.25
OOF_CMSO_TOPOLOGY_SERVICE_PORT=7998
OOF_CMSO_TOPOLOGY_SERVICE_PORT_CMSO_TOPOLOGY=7998
OOF_HAS_API_PORT=tcp://10.96.153.200:8091
OOF_HAS_API_PORT_8091_TCP=tcp://10.96.153.200:8091
OOF_HAS_API_PORT_8091_TCP_ADDR=10.96.153.200
OOF_HAS_API_PORT_8091_TCP_PORT=8091
OOF_HAS_API_PORT_8091_TCP_PROTO=tcp
OOF_HAS_API_SERVICE_HOST=10.96.153.200
OOF_HAS_API_SERVICE_PORT=8091
OOF_HAS_API_SERVICE_PORT_OOF_HAS_API=8091
OOF_OSDF_PORT=tcp://10.96.155.13:8698
OOF_OSDF_PORT_8698_TCP=tcp://10.96.155.13:8698
OOF_OSDF_PORT_8698_TCP_ADDR=10.96.155.13
OOF_OSDF_PORT_8698_TCP_PORT=8698
OOF_OSDF_PORT_8698_TCP_PROTO=tcp
OOF_OSDF_SERVICE_HOST=10.96.155.13
OOF_OSDF_SERVICE_PORT=8698
OOM_CERTSERVICE_CMPV2ISSUER_METRICS_SERVICE_PORT=tcp://10.96.32.172:8443
OOM_CERTSERVICE_CMPV2ISSUER_METRICS_SERVICE_PORT_8443_TCP=tcp://10.96.32.172:8443
OOM_CERTSERVICE_CMPV2ISSUER_METRICS_SERVICE_PORT_8443_TCP_ADDR=10.96.32.172
OOM_CERTSERVICE_CMPV2ISSUER_METRICS_SERVICE_PORT_8443_TCP_PORT=8443
OOM_CERTSERVICE_CMPV2ISSUER_METRICS_SERVICE_PORT_8443_TCP_PROTO=tcp
OOM_CERTSERVICE_CMPV2ISSUER_METRICS_SERVICE_SERVICE_HOST=10.96.32.172
OOM_CERTSERVICE_CMPV2ISSUER_METRICS_SERVICE_SERVICE_PORT=8443
OOM_CERTSERVICE_CMPV2ISSUER_METRICS_SERVICE_SERVICE_PORT_HTTPS=8443
OOM_CERT_SERVICE_PORT=tcp://10.96.58.26:8443
OOM_CERT_SERVICE_PORT_8443_TCP=tcp://10.96.58.26:8443
OOM_CERT_SERVICE_PORT_8443_TCP_ADDR=10.96.58.26
OOM_CERT_SERVICE_PORT_8443_TCP_PORT=8443
OOM_CERT_SERVICE_PORT_8443_TCP_PROTO=tcp
OOM_CERT_SERVICE_SERVICE_HOST=10.96.58.26
OOM_CERT_SERVICE_SERVICE_PORT=8443
OOM_CERT_SERVICE_SERVICE_PORT_HTTPS_HTTP=8443
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
POLICY_HANDLER_PORT=tcp://10.96.248.252:80
POLICY_HANDLER_PORT_80_TCP=tcp://10.96.248.252:80
POLICY_HANDLER_PORT_80_TCP_ADDR=10.96.248.252
POLICY_HANDLER_PORT_80_TCP_PORT=80
POLICY_HANDLER_PORT_80_TCP_PROTO=tcp
POLICY_HANDLER_SERVICE_HOST=10.96.248.252
POLICY_HANDLER_SERVICE_PORT=80
POLICY_HANDLER_SERVICE_PORT_POLICY_HANDLER=80
PWD=/
PYTHON_PIP_VERSION=8.1.2
PYTHON_VERSION=2.7.9-1
SCALA_VERSION=2.12
SHLVL=1
ZULU_OPENJDK_VERSION=8=8.38.0.13
_=/usr/bin/env
aaf_locate_url=https://aaf-locate.onap:8095
enableCadi=true
===> User
uid=1000(mrkafka) gid=0(root) groups=0(root)
===> Configuring ...
SASL is enabled.
===> Running preflight checks ... 
===> Check if /var/lib/kafka/data is writable ...
===> Check if Zookeeper is healthy ...
[main] INFO io.confluent.admin.utils.ClusterStatus - SASL is enabled. java.security.auth.login.config=/etc/kafka/secrets/jaas/kafka_server_jaas.conf
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:zookeeper.version=3.4.14-4c25d480e66aadd371de8bd2fd8da255ac140bcf, built on 03/06/2019 16:18 GMT
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:host.name=dev-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.version=1.8.0_212
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.vendor=Azul Systems, Inc.
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.class.path=/etc/confluent/docker/docker-utils.jar
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.io.tmpdir=/tmp
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.compiler=
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.name=Linux
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.arch=amd64
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.version=4.15.0-117-generic
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.name=mrkafka
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.home=/home/mrkafka
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.dir=/
[main] INFO org.apache.zookeeper.ZooKeeper - Initiating client connection, connectString=dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 sessionTimeout=40000 watcher=io.confluent.admin.utils.ZookeeperConnectionWatcher@30dae81
[main-SendThread(dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.Login - Client successfully logged in.
[main-SendThread(dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.client.ZooKeeperSaslClient - Client will use DIGEST-MD5 as SASL mechanism.
[main-SendThread(dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.ClientCnxn - Opening socket connection to server dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.242.47.9:2181. Will attempt to SASL-authenticate using Login Context section 'Client'
[main-SendThread(dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.ClientCnxn - Socket connection established to dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.242.47.9:2181, initiating session
[main-SendThread(dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.ClientCnxn - Session establishment complete on server dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.242.47.9:2181, sessionid = 0x100001c043a0000, negotiated timeout = 40000
[main] INFO org.apache.zookeeper.ZooKeeper - Session: 0x100001c043a0000 closed
[main-EventThread] INFO org.apache.zookeeper.ClientCnxn - EventThread shut down for session: 0x100001c043a0000
===> Launching ... 
===> Launching kafka ... 
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/share/java/kafka/slf4j-log4j12-1.7.26.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/share/java/kafka/kafka11aaf-jar-with-dependencies.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
[2021-06-03 22:46:34,164] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)
[2021-06-03 22:46:34,432] INFO KafkaConfig values: 
	advertised.host.name = null
	advertised.listeners = EXTERNAL_SASL_PLAINTEXT://172.16.10.172:30492,INTERNAL_SASL_PLAINTEXT://:9092
	advertised.port = null
	alter.config.policy.class.name = null
	alter.log.dirs.replication.quota.window.num = 11
	alter.log.dirs.replication.quota.window.size.seconds = 1
	authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer
	auto.create.topics.enable = true
	auto.leader.rebalance.enable = true
	background.threads = 10
	broker.id = 2
	broker.id.generation.enable = true
	broker.rack = null
	client.quota.callback.class = null
	compression.type = producer
	connection.failed.authentication.delay.ms = 100
	connections.max.idle.ms = 600000
	connections.max.reauth.ms = 0
	control.plane.listener.name = null
	controlled.shutdown.enable = true
	controlled.shutdown.max.retries = 3
	controlled.shutdown.retry.backoff.ms = 5000
	controller.socket.timeout.ms = 30000
	create.topic.policy.class.name = null
	default.replication.factor = 3
	delegation.token.expiry.check.interval.ms = 3600000
	delegation.token.expiry.time.ms = 86400000
	delegation.token.master.key = null
	delegation.token.max.lifetime.ms = 604800000
	delete.records.purgatory.purge.interval.requests = 1
	delete.topic.enable = true
	fetch.purgatory.purge.interval.requests = 1000
	group.initial.rebalance.delay.ms = 3000
	group.max.session.timeout.ms = 1800000
	group.max.size = 2147483647
	group.min.session.timeout.ms = 6000
	host.name = 
	inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT
	inter.broker.protocol.version = 2.3-IV1
	kafka.metrics.polling.interval.secs = 10
	kafka.metrics.reporters = []
	leader.imbalance.check.interval.seconds = 300
	leader.imbalance.per.broker.percentage = 10
	listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT
	listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092
	log.cleaner.backoff.ms = 15000
	log.cleaner.dedupe.buffer.size = 134217728
	log.cleaner.delete.retention.ms = 86400000
	log.cleaner.enable = true
	log.cleaner.io.buffer.load.factor = 0.9
	log.cleaner.io.buffer.size = 524288
	log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
	log.cleaner.max.compaction.lag.ms = 9223372036854775807
	log.cleaner.min.cleanable.ratio = 0.5
	log.cleaner.min.compaction.lag.ms = 0
	log.cleaner.threads = 1
	log.cleanup.policy = [delete]
	log.dir = /tmp/kafka-logs
	log.dirs = /var/lib/kafka/data
	log.flush.interval.messages = 9223372036854775807
	log.flush.interval.ms = null
	log.flush.offset.checkpoint.interval.ms = 60000
	log.flush.scheduler.interval.ms = 9223372036854775807
	log.flush.start.offset.checkpoint.interval.ms = 60000
	log.index.interval.bytes = 4096
	log.index.size.max.bytes = 10485760
	log.message.downconversion.enable = true
	log.message.format.version = 2.3-IV1
	log.message.timestamp.difference.max.ms = 9223372036854775807
	log.message.timestamp.type = CreateTime
	log.preallocate = false
	log.retention.bytes = -1
	log.retention.check.interval.ms = 300000
	log.retention.hours = 168
	log.retention.minutes = null
	log.retention.ms = null
	log.roll.hours = 168
	log.roll.jitter.hours = 0
	log.roll.jitter.ms = null
	log.roll.ms = null
	log.segment.bytes = 1073741824
	log.segment.delete.delay.ms = 60000
	max.connections = 2147483647
	max.connections.per.ip = 2147483647
	max.connections.per.ip.overrides = 
	max.incremental.fetch.session.cache.slots = 1000
	message.max.bytes = 1000012
	metric.reporters = []
	metrics.num.samples = 2
	metrics.recording.level = INFO
	metrics.sample.window.ms = 30000
	min.insync.replicas = 1
	num.io.threads = 8
	num.network.threads = 3
	num.partitions = 3
	num.recovery.threads.per.data.dir = 5
	num.replica.alter.log.dirs.threads = null
	num.replica.fetchers = 1
	offset.metadata.max.bytes = 4096
	offsets.commit.required.acks = -1
	offsets.commit.timeout.ms = 5000
	offsets.load.buffer.size = 5242880
	offsets.retention.check.interval.ms = 600000
	offsets.retention.minutes = 10080
	offsets.topic.compression.codec = 0
	offsets.topic.num.partitions = 50
	offsets.topic.replication.factor = 3
	offsets.topic.segment.bytes = 104857600
	password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
	password.encoder.iterations = 4096
	password.encoder.key.length = 128
	password.encoder.keyfactory.algorithm = null
	password.encoder.old.secret = null
	password.encoder.secret = null
	port = 9092
	principal.builder.class = null
	producer.purgatory.purge.interval.requests = 1000
	queued.max.request.bytes = -1
	queued.max.requests = 500
	quota.consumer.default = 9223372036854775807
	quota.producer.default = 9223372036854775807
	quota.window.num = 11
	quota.window.size.seconds = 1
	replica.fetch.backoff.ms = 1000
	replica.fetch.max.bytes = 1048576
	replica.fetch.min.bytes = 1
	replica.fetch.response.max.bytes = 10485760
	replica.fetch.wait.max.ms = 500
	replica.high.watermark.checkpoint.interval.ms = 5000
	replica.lag.time.max.ms = 10000
	replica.socket.receive.buffer.bytes = 65536
	replica.socket.timeout.ms = 30000
	replication.quota.window.num = 11
	replication.quota.window.size.seconds = 1
	request.timeout.ms = 30000
	reserved.broker.max.id = 1000
	sasl.client.callback.handler.class = null
	sasl.enabled.mechanisms = [PLAIN]
	sasl.jaas.config = null
	sasl.kerberos.kinit.cmd = /usr/bin/kinit
	sasl.kerberos.min.time.before.relogin = 60000
	sasl.kerberos.principal.to.local.rules = [DEFAULT]
	sasl.kerberos.service.name = null
	sasl.kerberos.ticket.renew.jitter = 0.05
	sasl.kerberos.ticket.renew.window.factor = 0.8
	sasl.login.callback.handler.class = null
	sasl.login.class = null
	sasl.login.refresh.buffer.seconds = 300
	sasl.login.refresh.min.period.seconds = 60
	sasl.login.refresh.window.factor = 0.8
	sasl.login.refresh.window.jitter = 0.05
	sasl.mechanism.inter.broker.protocol = PLAIN
	sasl.server.callback.handler.class = null
	security.inter.broker.protocol = PLAINTEXT
	socket.receive.buffer.bytes = 102400
	socket.request.max.bytes = 104857600
	socket.send.buffer.bytes = 102400
	ssl.cipher.suites = []
	ssl.client.auth = none
	ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
	ssl.endpoint.identification.algorithm = https
	ssl.key.password = null
	ssl.keymanager.algorithm = SunX509
	ssl.keystore.location = null
	ssl.keystore.password = null
	ssl.keystore.type = JKS
	ssl.principal.mapping.rules = [DEFAULT]
	ssl.protocol = TLS
	ssl.provider = null
	ssl.secure.random.implementation = null
	ssl.trustmanager.algorithm = PKIX
	ssl.truststore.location = null
	ssl.truststore.password = null
	ssl.truststore.type = JKS
	transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
	transaction.max.timeout.ms = 900000
	transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
	transaction.state.log.load.buffer.size = 5242880
	transaction.state.log.min.isr = 1
	transaction.state.log.num.partitions = 50
	transaction.state.log.replication.factor = 1
	transaction.state.log.segment.bytes = 104857600
	transactional.id.expiration.ms = 604800000
	unclean.leader.election.enable = false
	zookeeper.connect = dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181
	zookeeper.connection.timeout.ms = 6000
	zookeeper.max.in.flight.requests = 10
	zookeeper.session.timeout.ms = 6000
	zookeeper.set.acl = true
	zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig)
[2021-06-03 22:46:34,499] WARN The package io.confluent.support.metrics.collectors.FullCollector for collecting the full set of support metrics could not be loaded, so we are reverting to anonymous, basic metric collection. If you are a Confluent customer, please refer to the Confluent Platform documentation, section Proactive Support, on how to activate full metrics collection. (io.confluent.support.metrics.KafkaSupportConfig)
[2021-06-03 22:46:34,500] WARN The support metrics collection feature ("Metrics") of Proactive Support is disabled. (io.confluent.support.metrics.SupportedServerStartable)
[2021-06-03 22:46:34,502] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler)
[2021-06-03 22:46:34,502] INFO starting (kafka.server.KafkaServer)
[2021-06-03 22:46:34,503] INFO Connecting to zookeeper on dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 (kafka.server.KafkaServer)
[2021-06-03 22:46:34,524] INFO [ZooKeeperClient Kafka server] Initializing a new session to dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181. (kafka.zookeeper.ZooKeeperClient)
[2021-06-03 22:46:34,531] INFO Client environment:zookeeper.version=3.4.14-4c25d480e66aadd371de8bd2fd8da255ac140bcf, built on 03/06/2019 16:18 GMT (org.apache.zookeeper.ZooKeeper)
[2021-06-03 22:46:34,531] INFO Client environment:host.name=dev-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local (org.apache.zookeeper.ZooKeeper)
[2021-06-03 22:46:34,531] INFO Client environment:java.version=1.8.0_212 (org.apache.zookeeper.ZooKeeper)
[2021-06-03 22:46:34,531] INFO Client environment:java.vendor=Azul Systems, Inc. (org.apache.zookeeper.ZooKeeper)
[2021-06-03 22:46:34,531] INFO Client environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre (org.apache.zookeeper.ZooKeeper)
[2021-06-03 22:46:34,531] INFO Client environment:java.class.path=/usr/bin/../share/java/kafka/audience-annotations-0.5.0.jar:/usr/bin/../share/java/kafka/jetty-util-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/javax.servlet-api-3.1.0.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-2.28.jar:/usr/bin/../share/java/kafka/jersey-media-jaxb-2.28.jar:/usr/bin/../share/java/kafka/lz4-java-1.6.0.jar:/usr/bin/../share/java/kafka/jetty-io-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/commons-compress-1.8.1.jar:/usr/bin/../share/java/kafka/httpmime-4.5.7.jar:/usr/bin/../share/java/kafka/connect-json-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jackson-annotations-2.9.9.jar:/usr/bin/../share/java/kafka/paranamer-2.8.jar:/usr/bin/../share/java/kafka/scala-logging_2.11-3.9.0.jar:/usr/bin/../share/java/kafka/jackson-databind-2.9.9.3.jar:/usr/bin/../share/java/kafka/kafka-streams-examples-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-base-2.9.9.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-core-2.28.jar:/usr/bin/../share/java/kafka/jackson-datatype-jdk8-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-json-provider-2.9.9.jar:/usr/bin/../share/java/kafka/reflections-0.9.11.jar:/usr/bin/../share/java/kafka/jackson-mapper-asl-1.9.13.jar:/usr/bin/../share/java/kafka/paranamer-2.7.jar:/usr/bin/../share/java/kafka/slf4j-api-1.7.26.jar:/usr/bin/../share/java/kafka/zookeeper-3.4.14.jar:/usr/bin/../share/java/kafka/xz-1.5.jar:/usr/bin/../share/java/kafka/httpcore-4.4.11.jar:/usr/bin/../share/java/kafka/metrics-core-2.2.0.jar:/usr/bin/../share/java/kafka/jetty-servlet-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-server-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/commons-lang3-3.8.1.jar:/usr/bin/../share/java/kafka/osgi-resource-locator-1.0.1.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-api-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/log4j-1.2.17.jar:/usr/bin/../share/java/kafka/jersey-common-2.28.jar:/usr/bin/../share/java/kafka/hk2-locator-2.5.0.jar:/usr/bin/../share/java/kafka/kafka-clients-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/commons-logging-1.2.jar:/usr/bin/../share/java/kafka/jackson-core-asl-1.9.13.jar:/usr/bin/../share/java/kafka/jakarta.ws.rs-api-2.1.5.jar:/usr/bin/../share/java/kafka/jopt-simple-5.0.4.jar:/usr/bin/../share/java/kafka/connect-file-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jersey-client-2.28.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-test-sources.jar:/usr/bin/../share/java/kafka/plexus-utils-3.2.0.jar:/usr/bin/../share/java/kafka/kafka-streams-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jetty-client-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jackson-core-2.9.9.jar:/usr/bin/../share/java/kafka/jsr305-3.0.2.jar:/usr/bin/../share/java/kafka/commons-codec-1.11.jar:/usr/bin/../share/java/kafka/javassist-3.22.0-CR2.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-sources.jar:/usr/bin/../share/java/kafka/zstd-jni-1.4.0-1.jar:/usr/bin/../share/java/kafka/connect-runtime-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jersey-server-2.28.jar:/usr/bin/../share/java/kafka/kafka-log4j-appender-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/httpclient-4.5.7.jar:/usr/bin/../share/java/kafka/support-metrics-client-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/maven-artifact-3.6.1.jar:/usr/bin/../share/java/kafka/avro-1.8.1.jar:/usr/bin/../share/java/kafka/aopalliance-repackaged-2.5.0.jar:/usr/bin/../share/java/kafka/jakarta.inject-2.5.0.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-javadoc.jar:/usr/bin/../share/java/kafka/slf4j-log4j12-1.7.26.jar:/usr/bin/../share/java/kafka/jakarta.annotation-api-1.3.4.jar:/usr/bin/../share/java/kafka/spotbugs-annotations-3.1.9.jar:/usr/bin/../share/java/kafka/jetty-servlets-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/snappy-java-1.1.7.3.jar:/usr/bin/../share/java/kafka/jackson-module-scala_2.11-2.9.9.jar:/usr/bin/../share/java/kafka/javax.ws.rs-api-2.1.1.jar:/usr/bin/../share/java/kafka/hk2-api-2.5.0.jar:/usr/bin/../share/java/kafka/hk2-utils-2.5.0.jar:/usr/bin/../share/java/kafka/support-metrics-common-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jetty-continuation-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/kafka-streams-test-utils-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jersey-hk2-2.28.jar:/usr/bin/../share/java/kafka/guava-20.0.jar:/usr/bin/../share/java/kafka/jackson-module-paranamer-2.9.9.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-scaladoc.jar:/usr/bin/../share/java/kafka/activation-1.1.1.jar:/usr/bin/../share/java/kafka/jetty-http-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/kafka-streams-scala_2.11-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jetty-security-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/kafka.jar:/usr/bin/../share/java/kafka/connect-transforms-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jackson-dataformat-csv-2.9.9.jar:/usr/bin/../share/java/kafka/jaxb-api-2.3.0.jar:/usr/bin/../share/java/kafka/scala-library-2.11.12.jar:/usr/bin/../share/java/kafka/validation-api-2.0.1.Final.jar:/usr/bin/../share/java/kafka/kafka-tools-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jackson-module-jaxb-annotations-2.9.9.jar:/usr/bin/../share/java/kafka/rocksdbjni-5.18.3.jar:/usr/bin/../share/java/kafka/connect-basic-auth-extension-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/scala-reflect-2.11.12.jar:/usr/bin/../share/java/kafka/argparse4j-0.7.0.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-test.jar:/usr/bin/../share/java/kafka/zkclient-0.11.jar:/usr/bin/../share/java/kafka/kafka11aaf-jar-with-dependencies.jar:/usr/bin/../support-metrics-client/build/dependant-libs-2.12/*:/usr/bin/../support-metrics-client/build/libs/*:/usr/share/java/support-metrics-client/* (org.apache.zookeeper.ZooKeeper)
[2021-06-03 22:46:34,531] INFO Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper)
[2021-06-03 22:46:34,531] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper)
[2021-06-03 22:46:34,531] INFO Client environment:java.compiler= (org.apache.zookeeper.ZooKeeper)
[2021-06-03 22:46:34,531] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper)
[2021-06-03 22:46:34,531] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper)
[2021-06-03 22:46:34,531] INFO Client environment:os.version=4.15.0-117-generic (org.apache.zookeeper.ZooKeeper)
[2021-06-03 22:46:34,531] INFO Client environment:user.name=mrkafka (org.apache.zookeeper.ZooKeeper)
[2021-06-03 22:46:34,531] INFO Client environment:user.home=/home/mrkafka (org.apache.zookeeper.ZooKeeper)
[2021-06-03 22:46:34,532] INFO Client environment:user.dir=/ (org.apache.zookeeper.ZooKeeper)
[2021-06-03 22:46:34,533] INFO Initiating client connection, connectString=dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 sessionTimeout=6000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@28701274 (org.apache.zookeeper.ZooKeeper)
[2021-06-03 22:46:34,546] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)
[2021-06-03 22:46:34,555] INFO Client successfully logged in. (org.apache.zookeeper.Login)
[2021-06-03 22:46:34,556] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient)
[2021-06-03 22:46:34,588] INFO Opening socket connection to server dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.242.171.9:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn)
[2021-06-03 22:46:34,594] INFO Socket connection established to dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.242.171.9:2181, initiating session (org.apache.zookeeper.ClientCnxn)
[2021-06-03 22:46:34,631] INFO Session establishment complete on server dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.242.171.9:2181, sessionid = 0x300001c2d150000, negotiated timeout = 6000 (org.apache.zookeeper.ClientCnxn)
[2021-06-03 22:46:34,635] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient)
[2021-06-03 22:46:35,039] INFO Cluster ID = P3Yzo-7gQzaXtwUehK5FWw (kafka.server.KafkaServer)
[2021-06-03 22:46:35,042] WARN No meta.properties file under dir /var/lib/kafka/data/meta.properties (kafka.server.BrokerMetadataCheckpoint)
[2021-06-03 22:46:35,093] INFO KafkaConfig values: 
	advertised.host.name = null
	advertised.listeners = EXTERNAL_SASL_PLAINTEXT://172.16.10.172:30492,INTERNAL_SASL_PLAINTEXT://:9092
	advertised.port = null
	alter.config.policy.class.name = null
	alter.log.dirs.replication.quota.window.num = 11
	alter.log.dirs.replication.quota.window.size.seconds = 1
	authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer
	auto.create.topics.enable = true
	auto.leader.rebalance.enable = true
	background.threads = 10
	broker.id = 2
	broker.id.generation.enable = true
	broker.rack = null
	client.quota.callback.class = null
	compression.type = producer
	connection.failed.authentication.delay.ms = 100
	connections.max.idle.ms = 600000
	connections.max.reauth.ms = 0
	control.plane.listener.name = null
	controlled.shutdown.enable = true
	controlled.shutdown.max.retries = 3
	controlled.shutdown.retry.backoff.ms = 5000
	controller.socket.timeout.ms = 30000
	create.topic.policy.class.name = null
	default.replication.factor = 3
	delegation.token.expiry.check.interval.ms = 3600000
	delegation.token.expiry.time.ms = 86400000
	delegation.token.master.key = null
	delegation.token.max.lifetime.ms = 604800000
	delete.records.purgatory.purge.interval.requests = 1
	delete.topic.enable = true
	fetch.purgatory.purge.interval.requests = 1000
	group.initial.rebalance.delay.ms = 3000
	group.max.session.timeout.ms = 1800000
	group.max.size = 2147483647
	group.min.session.timeout.ms = 6000
	host.name = 
	inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT
	inter.broker.protocol.version = 2.3-IV1
	kafka.metrics.polling.interval.secs = 10
	kafka.metrics.reporters = []
	leader.imbalance.check.interval.seconds = 300
	leader.imbalance.per.broker.percentage = 10
	listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT
	listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092
	log.cleaner.backoff.ms = 15000
	log.cleaner.dedupe.buffer.size = 134217728
	log.cleaner.delete.retention.ms = 86400000
	log.cleaner.enable = true
	log.cleaner.io.buffer.load.factor = 0.9
	log.cleaner.io.buffer.size = 524288
	log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
	log.cleaner.max.compaction.lag.ms = 9223372036854775807
	log.cleaner.min.cleanable.ratio = 0.5
	log.cleaner.min.compaction.lag.ms = 0
	log.cleaner.threads = 1
	log.cleanup.policy = [delete]
	log.dir = /tmp/kafka-logs
	log.dirs = /var/lib/kafka/data
	log.flush.interval.messages = 9223372036854775807
	log.flush.interval.ms = null
	log.flush.offset.checkpoint.interval.ms = 60000
	log.flush.scheduler.interval.ms = 9223372036854775807
	log.flush.start.offset.checkpoint.interval.ms = 60000
	log.index.interval.bytes = 4096
	log.index.size.max.bytes = 10485760
	log.message.downconversion.enable = true
	log.message.format.version = 2.3-IV1
	log.message.timestamp.difference.max.ms = 9223372036854775807
	log.message.timestamp.type = CreateTime
	log.preallocate = false
	log.retention.bytes = -1
	log.retention.check.interval.ms = 300000
	log.retention.hours = 168
	log.retention.minutes = null
	log.retention.ms = null
	log.roll.hours = 168
	log.roll.jitter.hours = 0
	log.roll.jitter.ms = null
	log.roll.ms = null
	log.segment.bytes = 1073741824
	log.segment.delete.delay.ms = 60000
	max.connections = 2147483647
	max.connections.per.ip = 2147483647
	max.connections.per.ip.overrides = 
	max.incremental.fetch.session.cache.slots = 1000
	message.max.bytes = 1000012
	metric.reporters = []
	metrics.num.samples = 2
	metrics.recording.level = INFO
	metrics.sample.window.ms = 30000
	min.insync.replicas = 1
	num.io.threads = 8
	num.network.threads = 3
	num.partitions = 3
	num.recovery.threads.per.data.dir = 5
	num.replica.alter.log.dirs.threads = null
	num.replica.fetchers = 1
	offset.metadata.max.bytes = 4096
	offsets.commit.required.acks = -1
	offsets.commit.timeout.ms = 5000
	offsets.load.buffer.size = 5242880
	offsets.retention.check.interval.ms = 600000
	offsets.retention.minutes = 10080
	offsets.topic.compression.codec = 0
	offsets.topic.num.partitions = 50
	offsets.topic.replication.factor = 3
	offsets.topic.segment.bytes = 104857600
	password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
	password.encoder.iterations = 4096
	password.encoder.key.length = 128
	password.encoder.keyfactory.algorithm = null
	password.encoder.old.secret = null
	password.encoder.secret = null
	port = 9092
	principal.builder.class = null
	producer.purgatory.purge.interval.requests = 1000
	queued.max.request.bytes = -1
	queued.max.requests = 500
	quota.consumer.default = 9223372036854775807
	quota.producer.default = 9223372036854775807
	quota.window.num = 11
	quota.window.size.seconds = 1
	replica.fetch.backoff.ms = 1000
	replica.fetch.max.bytes = 1048576
	replica.fetch.min.bytes = 1
	replica.fetch.response.max.bytes = 10485760
	replica.fetch.wait.max.ms = 500
	replica.high.watermark.checkpoint.interval.ms = 5000
	replica.lag.time.max.ms = 10000
	replica.socket.receive.buffer.bytes = 65536
	replica.socket.timeout.ms = 30000
	replication.quota.window.num = 11
	replication.quota.window.size.seconds = 1
	request.timeout.ms = 30000
	reserved.broker.max.id = 1000
	sasl.client.callback.handler.class = null
	sasl.enabled.mechanisms = [PLAIN]
	sasl.jaas.config = null
	sasl.kerberos.kinit.cmd = /usr/bin/kinit
	sasl.kerberos.min.time.before.relogin = 60000
	sasl.kerberos.principal.to.local.rules = [DEFAULT]
	sasl.kerberos.service.name = null
	sasl.kerberos.ticket.renew.jitter = 0.05
	sasl.kerberos.ticket.renew.window.factor = 0.8
	sasl.login.callback.handler.class = null
	sasl.login.class = null
	sasl.login.refresh.buffer.seconds = 300
	sasl.login.refresh.min.period.seconds = 60
	sasl.login.refresh.window.factor = 0.8
	sasl.login.refresh.window.jitter = 0.05
	sasl.mechanism.inter.broker.protocol = PLAIN
	sasl.server.callback.handler.class = null
	security.inter.broker.protocol = PLAINTEXT
	socket.receive.buffer.bytes = 102400
	socket.request.max.bytes = 104857600
	socket.send.buffer.bytes = 102400
	ssl.cipher.suites = []
	ssl.client.auth = none
	ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
	ssl.endpoint.identification.algorithm = https
	ssl.key.password = null
	ssl.keymanager.algorithm = SunX509
	ssl.keystore.location = null
	ssl.keystore.password = null
	ssl.keystore.type = JKS
	ssl.principal.mapping.rules = [DEFAULT]
	ssl.protocol = TLS
	ssl.provider = null
	ssl.secure.random.implementation = null
	ssl.trustmanager.algorithm = PKIX
	ssl.truststore.location = null
	ssl.truststore.password = null
	ssl.truststore.type = JKS
	transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
	transaction.max.timeout.ms = 900000
	transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
	transaction.state.log.load.buffer.size = 5242880
	transaction.state.log.min.isr = 1
	transaction.state.log.num.partitions = 50
	transaction.state.log.replication.factor = 1
	transaction.state.log.segment.bytes = 104857600
	transactional.id.expiration.ms = 604800000
	unclean.leader.election.enable = false
	zookeeper.connect = dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181
	zookeeper.connection.timeout.ms = 6000
	zookeeper.max.in.flight.requests = 10
	zookeeper.session.timeout.ms = 6000
	zookeeper.set.acl = true
	zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig)
[2021-06-03 22:46:35,101] INFO KafkaConfig values: 
	advertised.host.name = null
	advertised.listeners = EXTERNAL_SASL_PLAINTEXT://172.16.10.172:30492,INTERNAL_SASL_PLAINTEXT://:9092
	advertised.port = null
	alter.config.policy.class.name = null
	alter.log.dirs.replication.quota.window.num = 11
	alter.log.dirs.replication.quota.window.size.seconds = 1
	authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer
	auto.create.topics.enable = true
	auto.leader.rebalance.enable = true
	background.threads = 10
	broker.id = 2
	broker.id.generation.enable = true
	broker.rack = null
	client.quota.callback.class = null
	compression.type = producer
	connection.failed.authentication.delay.ms = 100
	connections.max.idle.ms = 600000
	connections.max.reauth.ms = 0
	control.plane.listener.name = null
	controlled.shutdown.enable = true
	controlled.shutdown.max.retries = 3
	controlled.shutdown.retry.backoff.ms = 5000
	controller.socket.timeout.ms = 30000
	create.topic.policy.class.name = null
	default.replication.factor = 3
	delegation.token.expiry.check.interval.ms = 3600000
	delegation.token.expiry.time.ms = 86400000
	delegation.token.master.key = null
	delegation.token.max.lifetime.ms = 604800000
	delete.records.purgatory.purge.interval.requests = 1
	delete.topic.enable = true
	fetch.purgatory.purge.interval.requests = 1000
	group.initial.rebalance.delay.ms = 3000
	group.max.session.timeout.ms = 1800000
	group.max.size = 2147483647
	group.min.session.timeout.ms = 6000
	host.name = 
	inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT
	inter.broker.protocol.version = 2.3-IV1
	kafka.metrics.polling.interval.secs = 10
	kafka.metrics.reporters = []
	leader.imbalance.check.interval.seconds = 300
	leader.imbalance.per.broker.percentage = 10
	listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT
	listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092
	log.cleaner.backoff.ms = 15000
	log.cleaner.dedupe.buffer.size = 134217728
	log.cleaner.delete.retention.ms = 86400000
	log.cleaner.enable = true
	log.cleaner.io.buffer.load.factor = 0.9
	log.cleaner.io.buffer.size = 524288
	log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
	log.cleaner.max.compaction.lag.ms = 9223372036854775807
	log.cleaner.min.cleanable.ratio = 0.5
	log.cleaner.min.compaction.lag.ms = 0
	log.cleaner.threads = 1
	log.cleanup.policy = [delete]
	log.dir = /tmp/kafka-logs
	log.dirs = /var/lib/kafka/data
	log.flush.interval.messages = 9223372036854775807
	log.flush.interval.ms = null
	log.flush.offset.checkpoint.interval.ms = 60000
	log.flush.scheduler.interval.ms = 9223372036854775807
	log.flush.start.offset.checkpoint.interval.ms = 60000
	log.index.interval.bytes = 4096
	log.index.size.max.bytes = 10485760
	log.message.downconversion.enable = true
	log.message.format.version = 2.3-IV1
	log.message.timestamp.difference.max.ms = 9223372036854775807
	log.message.timestamp.type = CreateTime
	log.preallocate = false
	log.retention.bytes = -1
	log.retention.check.interval.ms = 300000
	log.retention.hours = 168
	log.retention.minutes = null
	log.retention.ms = null
	log.roll.hours = 168
	log.roll.jitter.hours = 0
	log.roll.jitter.ms = null
	log.roll.ms = null
	log.segment.bytes = 1073741824
	log.segment.delete.delay.ms = 60000
	max.connections = 2147483647
	max.connections.per.ip = 2147483647
	max.connections.per.ip.overrides = 
	max.incremental.fetch.session.cache.slots = 1000
	message.max.bytes = 1000012
	metric.reporters = []
	metrics.num.samples = 2
	metrics.recording.level = INFO
	metrics.sample.window.ms = 30000
	min.insync.replicas = 1
	num.io.threads = 8
	num.network.threads = 3
	num.partitions = 3
	num.recovery.threads.per.data.dir = 5
	num.replica.alter.log.dirs.threads = null
	num.replica.fetchers = 1
	offset.metadata.max.bytes = 4096
	offsets.commit.required.acks = -1
	offsets.commit.timeout.ms = 5000
	offsets.load.buffer.size = 5242880
	offsets.retention.check.interval.ms = 600000
	offsets.retention.minutes = 10080
	offsets.topic.compression.codec = 0
	offsets.topic.num.partitions = 50
	offsets.topic.replication.factor = 3
	offsets.topic.segment.bytes = 104857600
	password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
	password.encoder.iterations = 4096
	password.encoder.key.length = 128
	password.encoder.keyfactory.algorithm = null
	password.encoder.old.secret = null
	password.encoder.secret = null
	port = 9092
	principal.builder.class = null
	producer.purgatory.purge.interval.requests = 1000
	queued.max.request.bytes = -1
	queued.max.requests = 500
	quota.consumer.default = 9223372036854775807
	quota.producer.default = 9223372036854775807
	quota.window.num = 11
	quota.window.size.seconds = 1
	replica.fetch.backoff.ms = 1000
	replica.fetch.max.bytes = 1048576
	replica.fetch.min.bytes = 1
	replica.fetch.response.max.bytes = 10485760
	replica.fetch.wait.max.ms = 500
	replica.high.watermark.checkpoint.interval.ms = 5000
	replica.lag.time.max.ms = 10000
	replica.socket.receive.buffer.bytes = 65536
	replica.socket.timeout.ms = 30000
	replication.quota.window.num = 11
	replication.quota.window.size.seconds = 1
	request.timeout.ms = 30000
	reserved.broker.max.id = 1000
	sasl.client.callback.handler.class = null
	sasl.enabled.mechanisms = [PLAIN]
	sasl.jaas.config = null
	sasl.kerberos.kinit.cmd = /usr/bin/kinit
	sasl.kerberos.min.time.before.relogin = 60000
	sasl.kerberos.principal.to.local.rules = [DEFAULT]
	sasl.kerberos.service.name = null
	sasl.kerberos.ticket.renew.jitter = 0.05
	sasl.kerberos.ticket.renew.window.factor = 0.8
	sasl.login.callback.handler.class = null
	sasl.login.class = null
	sasl.login.refresh.buffer.seconds = 300
	sasl.login.refresh.min.period.seconds = 60
	sasl.login.refresh.window.factor = 0.8
	sasl.login.refresh.window.jitter = 0.05
	sasl.mechanism.inter.broker.protocol = PLAIN
	sasl.server.callback.handler.class = null
	security.inter.broker.protocol = PLAINTEXT
	socket.receive.buffer.bytes = 102400
	socket.request.max.bytes = 104857600
	socket.send.buffer.bytes = 102400
	ssl.cipher.suites = []
	ssl.client.auth = none
	ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
	ssl.endpoint.identification.algorithm = https
	ssl.key.password = null
	ssl.keymanager.algorithm = SunX509
	ssl.keystore.location = null
	ssl.keystore.password = null
	ssl.keystore.type = JKS
	ssl.principal.mapping.rules = [DEFAULT]
	ssl.protocol = TLS
	ssl.provider = null
	ssl.secure.random.implementation = null
	ssl.trustmanager.algorithm = PKIX
	ssl.truststore.location = null
	ssl.truststore.password = null
	ssl.truststore.type = JKS
	transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
	transaction.max.timeout.ms = 900000
	transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
	transaction.state.log.load.buffer.size = 5242880
	transaction.state.log.min.isr = 1
	transaction.state.log.num.partitions = 50
	transaction.state.log.replication.factor = 1
	transaction.state.log.segment.bytes = 104857600
	transactional.id.expiration.ms = 604800000
	unclean.leader.election.enable = false
	zookeeper.connect = dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181
	zookeeper.connection.timeout.ms = 6000
	zookeeper.max.in.flight.requests = 10
	zookeeper.session.timeout.ms = 6000
	zookeeper.set.acl = true
	zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig)
[2021-06-03 22:46:35,124] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-06-03 22:46:35,124] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-06-03 22:46:35,125] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-06-03 22:46:35,166] INFO Loading logs. (kafka.log.LogManager)
[2021-06-03 22:46:35,177] INFO Logs loading complete in 11 ms. (kafka.log.LogManager)
[2021-06-03 22:46:35,198] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager)
[2021-06-03 22:46:35,200] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager)
[2021-06-03 22:46:35,202] INFO Starting the log cleaner (kafka.log.LogCleaner)
[2021-06-03 22:46:35,459] INFO [kafka-log-cleaner-thread-0]: Starting (kafka.log.LogCleaner)
[2021-06-03 22:46:35,769] INFO Awaiting socket connections on 0.0.0.0:9091. (kafka.network.Acceptor)
[2021-06-03 22:46:35,784] INFO Successfully logged in. (org.apache.kafka.common.security.authenticator.AbstractLogin)
[2021-06-03 22:46:35,811] INFO [SocketServer brokerId=2] Created data-plane acceptor and processors for endpoint : EndPoint(0.0.0.0,9091,ListenerName(EXTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT) (kafka.network.SocketServer)
[2021-06-03 22:46:35,812] INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.Acceptor)
[2021-06-03 22:46:35,823] INFO [SocketServer brokerId=2] Created data-plane acceptor and processors for endpoint : EndPoint(0.0.0.0,9092,ListenerName(INTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT) (kafka.network.SocketServer)
[2021-06-03 22:46:35,825] INFO [SocketServer brokerId=2] Started 2 acceptor threads for data-plane (kafka.network.SocketServer)
[2021-06-03 22:46:35,844] INFO [ExpirationReaper-2-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-06-03 22:46:35,845] INFO [ExpirationReaper-2-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-06-03 22:46:35,845] INFO [ExpirationReaper-2-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-06-03 22:46:35,847] INFO [ExpirationReaper-2-ElectPreferredLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-06-03 22:46:35,863] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler)
[2021-06-03 22:46:35,928] INFO Creating /brokers/ids/2 (is it secure? true) (kafka.zk.KafkaZkClient)
[2021-06-03 22:46:35,962] INFO Stat of the created znode at /brokers/ids/2 is: 12884901932,12884901932,1622760395947,1622760395947,1,0,0,216172903129219072,366,0,12884901932
 (kafka.zk.KafkaZkClient)
[2021-06-03 22:46:35,963] INFO Registered broker 2 at path /brokers/ids/2 with addresses: ArrayBuffer(EndPoint(172.16.10.172,30492,ListenerName(EXTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT), EndPoint(dev-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local,9092,ListenerName(INTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT)), czxid (broker epoch): 12884901932 (kafka.zk.KafkaZkClient)
[2021-06-03 22:46:35,965] WARN No meta.properties file under dir /var/lib/kafka/data/meta.properties (kafka.server.BrokerMetadataCheckpoint)
[2021-06-03 22:46:36,021] INFO [ControllerEventThread controllerId=2] Starting (kafka.controller.ControllerEventManager$ControllerEventThread)
[2021-06-03 22:46:36,024] INFO [ExpirationReaper-2-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-06-03 22:46:36,026] INFO [ExpirationReaper-2-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-06-03 22:46:36,033] INFO [ExpirationReaper-2-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-06-03 22:46:36,043] DEBUG [Controller id=2] Broker 1 has been elected as the controller, so stopping the election process. (kafka.controller.KafkaController)
[2021-06-03 22:46:36,049] INFO [GroupCoordinator 2]: Starting up. (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 22:46:36,050] INFO [GroupCoordinator 2]: Startup complete. (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 22:46:36,052] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 2 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:46:36,070] INFO [ProducerId Manager 2]: Acquired new producerId block (brokerId:2,blockStartProducerId:10000,blockEndProducerId:10999) by writing to Zk with path version 11 (kafka.coordinator.transaction.ProducerIdManager)
[2021-06-03 22:46:36,092] INFO [TransactionCoordinator id=2] Starting up. (kafka.coordinator.transaction.TransactionCoordinator)
[2021-06-03 22:46:36,093] INFO [Transaction Marker Channel Manager 2]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager)
[2021-06-03 22:46:36,093] INFO [TransactionCoordinator id=2] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator)
[2021-06-03 22:46:36,125] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread)
[2021-06-03 22:46:36,140] INFO [SocketServer brokerId=2] Started data-plane processors for 2 acceptors (kafka.network.SocketServer)
[2021-06-03 22:46:36,143] INFO Kafka version: 5.3.1-ccs (org.apache.kafka.common.utils.AppInfoParser)
[2021-06-03 22:46:36,144] INFO Kafka commitId: 03799faf9878a999 (org.apache.kafka.common.utils.AppInfoParser)
[2021-06-03 22:46:36,144] INFO Kafka startTimeMs: 1622760396141 (org.apache.kafka.common.utils.AppInfoParser)
[2021-06-03 22:46:36,145] INFO [KafkaServer id=2] started (kafka.server.KafkaServer)
2021-06-03T22:48:26.764+0000 INIT [cadi] Loading CADI Properties from /opt/app/osaaf/local/org.onap.dmaap.mr.location.props
2021-06-03T22:48:26.766+0000 INIT [cadi] Loading CADI Properties from /opt/app/osaaf/local/org.onap.dmaap.mr.cred.props
2021-06-03T22:48:26.771+0000 INIT [cadi] cadi_keyfile points to /opt/app/osaaf/local/org.onap.dmaap.mr.keyfile
2021-06-03T22:48:27.088+0000 INIT [cadi] cadi_protocols is set to TLSv1.1,TLSv1.2
2021-06-03T22:48:27.237+0000 INIT [cadi] AAFLocator for https://aaf-locate.onap:8095/locate/%CNS.%AAF_NS.service:2.1 could not be created. java.net.URISyntaxException: Malformed escape pair at index 36: https://aaf-locate.onap:8095/locate/%CNS.%AAF_NS.service:2.1
2021-06-03T22:48:27.237+0000 ERROR [cadi] Null Locator passed [Ljava.lang.Object;@46c4dbb0
org.onap.aaf.cadi.LocatorException: Null Locator passed
	at org.onap.aaf.cadi.http.HMangr.(HMangr.java:53)
	at org.onap.aaf.cadi.aaf.v2_0.AAFConHttp.(AAFConHttp.java:54)
	at org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider.setup(Cadi3AAFProvider.java:141)
	at org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider.(Cadi3AAFProvider.java:111)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
	at java.lang.Class.newInstance(Class.java:442)
	at java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:380)
	at java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404)
	at java.util.ServiceLoader$1.next(ServiceLoader.java:480)
	at org.onap.dmaap.commonauth.kafka.base.authorization.AuthorizationProviderFactory.(AuthorizationProviderFactory.java:34)
	at org.onap.dmaap.commonauth.kafka.base.authorization.AuthorizationProviderFactory.(AuthorizationProviderFactory.java:29)
	at org.onap.dmaap.kafkaAuthorize.PlainSaslServer1.evaluateResponse(PlainSaslServer1.java:106)
	at org.apache.kafka.common.security.authenticator.SaslServerAuthenticator.handleSaslToken(SaslServerAuthenticator.java:451)
	at org.apache.kafka.common.security.authenticator.SaslServerAuthenticator.authenticate(SaslServerAuthenticator.java:291)
	at org.apache.kafka.common.network.KafkaChannel.prepare(KafkaChannel.java:173)
	at org.apache.kafka.common.network.Selector.pollSelectionKeys(Selector.java:547)
	at org.apache.kafka.common.network.Selector.poll(Selector.java:483)
	at kafka.network.Processor.poll(SocketServer.scala:863)
	at kafka.network.Processor.run(SocketServer.scala:762)
	at java.lang.Thread.run(Thread.java:748)
[2021-06-03 22:48:27,239] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:48:27,239] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:51:25,101] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:51:25,102] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:52:30,258] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 2 from controller 1 epoch 8 for partition POLICY-PDP-PAP-1 (state.change.logger)
[2021-06-03 22:52:30,259] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 2 from controller 1 epoch 8 for partition POLICY-PDP-PAP-2 (state.change.logger)
[2021-06-03 22:52:30,259] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 2 from controller 1 epoch 8 for partition POLICY-PDP-PAP-0 (state.change.logger)
[2021-06-03 22:52:30,271] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 2 from controller 1 epoch 8 starting the become-leader transition for partition POLICY-PDP-PAP-0 (state.change.logger)
[2021-06-03 22:52:30,273] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(POLICY-PDP-PAP-0) (kafka.server.ReplicaFetcherManager)
[2021-06-03 22:52:30,370] INFO [Log partition=POLICY-PDP-PAP-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:30,379] INFO [Log partition=POLICY-PDP-PAP-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 81 ms (kafka.log.Log)
[2021-06-03 22:52:30,383] INFO Created log for partition POLICY-PDP-PAP-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:30,384] INFO [Partition POLICY-PDP-PAP-0 broker=2] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-0 (kafka.cluster.Partition)
[2021-06-03 22:52:30,387] INFO Replica loaded for partition POLICY-PDP-PAP-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:30,387] INFO Replica loaded for partition POLICY-PDP-PAP-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:30,387] INFO Replica loaded for partition POLICY-PDP-PAP-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:30,390] INFO [Partition POLICY-PDP-PAP-0 broker=2] POLICY-PDP-PAP-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:30,411] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 2 for partition POLICY-PDP-PAP-0 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:30,412] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 2 from controller 1 epoch 8 for the become-leader transition for partition POLICY-PDP-PAP-0 (state.change.logger)
[2021-06-03 22:52:30,412] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 2 from controller 1 epoch 8 starting the become-follower transition for partition POLICY-PDP-PAP-1 with leader 0 (state.change.logger)
[2021-06-03 22:52:30,412] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 2 from controller 1 epoch 8 starting the become-follower transition for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger)
[2021-06-03 22:52:30,415] INFO Replica loaded for partition POLICY-PDP-PAP-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:30,450] INFO [Log partition=POLICY-PDP-PAP-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:30,453] INFO [Log partition=POLICY-PDP-PAP-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 33 ms (kafka.log.Log)
[2021-06-03 22:52:30,454] INFO Created log for partition POLICY-PDP-PAP-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:30,455] INFO [Partition POLICY-PDP-PAP-1 broker=2] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-1 (kafka.cluster.Partition)
[2021-06-03 22:52:30,455] INFO Replica loaded for partition POLICY-PDP-PAP-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:30,455] INFO Replica loaded for partition POLICY-PDP-PAP-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:30,456] INFO Replica loaded for partition POLICY-PDP-PAP-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:30,456] INFO Replica loaded for partition POLICY-PDP-PAP-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:30,487] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:30,489] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 28 ms (kafka.log.Log)
[2021-06-03 22:52:30,491] INFO Created log for partition POLICY-PDP-PAP-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:30,491] INFO [Partition POLICY-PDP-PAP-2 broker=2] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-2 (kafka.cluster.Partition)
[2021-06-03 22:52:30,491] INFO Replica loaded for partition POLICY-PDP-PAP-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:30,493] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(POLICY-PDP-PAP-2, POLICY-PDP-PAP-1) (kafka.server.ReplicaFetcherManager)
[2021-06-03 22:52:30,495] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 2 for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger)
[2021-06-03 22:52:30,495] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 2 for partition POLICY-PDP-PAP-1 with leader 0 (state.change.logger)
[2021-06-03 22:52:30,498] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition POLICY-PDP-PAP-2 as part of become-follower request with correlation id 2 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:30,498] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition POLICY-PDP-PAP-1 as part of become-follower request with correlation id 2 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:30,581] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Starting (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:30,582] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:52:30,582] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:52:30,587] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=dev-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-PDP-PAP-2 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-06-03 22:52:30,594] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=dev-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-PDP-PAP-1 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-06-03 22:52:30,594] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Starting (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:30,598] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 2 for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger)
[2021-06-03 22:52:30,599] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 2 for partition POLICY-PDP-PAP-1 with leader 0 (state.change.logger)
[2021-06-03 22:52:30,599] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:52:30,599] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:52:30,600] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 2 from controller 1 epoch 8 for the become-follower transition for partition POLICY-PDP-PAP-1 with leader 0 (state.change.logger)
[2021-06-03 22:52:30,600] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 2 from controller 1 epoch 8 for the become-follower transition for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger)
[2021-06-03 22:52:30,600] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition POLICY-PDP-PAP-1 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:30,607] INFO [Log partition=POLICY-PDP-PAP-1, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:30,623] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition POLICY-PDP-PAP-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 3 (state.change.logger)
[2021-06-03 22:52:30,623] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition POLICY-PDP-PAP-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 3 (state.change.logger)
[2021-06-03 22:52:30,623] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition POLICY-PDP-PAP-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 3 (state.change.logger)
[2021-06-03 22:52:31,591] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition POLICY-PDP-PAP-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:31,591] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:55,887] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-13 (state.change.logger)
[2021-06-03 22:52:55,888] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-46 (state.change.logger)
[2021-06-03 22:52:55,888] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-9 (state.change.logger)
[2021-06-03 22:52:55,888] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-42 (state.change.logger)
[2021-06-03 22:52:55,888] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-21 (state.change.logger)
[2021-06-03 22:52:55,888] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-17 (state.change.logger)
[2021-06-03 22:52:55,888] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-30 (state.change.logger)
[2021-06-03 22:52:55,888] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-26 (state.change.logger)
[2021-06-03 22:52:55,888] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-5 (state.change.logger)
[2021-06-03 22:52:55,888] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-38 (state.change.logger)
[2021-06-03 22:52:55,889] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-1 (state.change.logger)
[2021-06-03 22:52:55,889] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-34 (state.change.logger)
[2021-06-03 22:52:55,889] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-16 (state.change.logger)
[2021-06-03 22:52:55,889] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-45 (state.change.logger)
[2021-06-03 22:52:55,889] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-12 (state.change.logger)
[2021-06-03 22:52:55,889] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-41 (state.change.logger)
[2021-06-03 22:52:55,889] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-24 (state.change.logger)
[2021-06-03 22:52:55,889] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-20 (state.change.logger)
[2021-06-03 22:52:55,889] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-49 (state.change.logger)
[2021-06-03 22:52:55,890] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-0 (state.change.logger)
[2021-06-03 22:52:55,890] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-29 (state.change.logger)
[2021-06-03 22:52:55,890] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-25 (state.change.logger)
[2021-06-03 22:52:55,890] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-8 (state.change.logger)
[2021-06-03 22:52:55,890] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-37 (state.change.logger)
[2021-06-03 22:52:55,890] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-4 (state.change.logger)
[2021-06-03 22:52:55,890] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-33 (state.change.logger)
[2021-06-03 22:52:55,890] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-15 (state.change.logger)
[2021-06-03 22:52:55,890] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-48 (state.change.logger)
[2021-06-03 22:52:55,891] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-11 (state.change.logger)
[2021-06-03 22:52:55,891] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-44 (state.change.logger)
[2021-06-03 22:52:55,891] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-23 (state.change.logger)
[2021-06-03 22:52:55,891] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-19 (state.change.logger)
[2021-06-03 22:52:55,891] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-32 (state.change.logger)
[2021-06-03 22:52:55,891] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-28 (state.change.logger)
[2021-06-03 22:52:55,891] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-7 (state.change.logger)
[2021-06-03 22:52:55,891] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-40 (state.change.logger)
[2021-06-03 22:52:55,891] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-3 (state.change.logger)
[2021-06-03 22:52:55,891] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-36 (state.change.logger)
[2021-06-03 22:52:55,892] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-47 (state.change.logger)
[2021-06-03 22:52:55,892] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-14 (state.change.logger)
[2021-06-03 22:52:55,892] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-43 (state.change.logger)
[2021-06-03 22:52:55,892] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-10 (state.change.logger)
[2021-06-03 22:52:55,892] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-22 (state.change.logger)
[2021-06-03 22:52:55,892] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-18 (state.change.logger)
[2021-06-03 22:52:55,892] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-31 (state.change.logger)
[2021-06-03 22:52:55,893] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-27 (state.change.logger)
[2021-06-03 22:52:55,893] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-39 (state.change.logger)
[2021-06-03 22:52:55,893] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-6 (state.change.logger)
[2021-06-03 22:52:55,893] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-35 (state.change.logger)
[2021-06-03 22:52:55,893] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-2 (state.change.logger)
[2021-06-03 22:52:55,938] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-29 (state.change.logger)
[2021-06-03 22:52:55,938] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-26 (state.change.logger)
[2021-06-03 22:52:55,938] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-23 (state.change.logger)
[2021-06-03 22:52:55,938] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-20 (state.change.logger)
[2021-06-03 22:52:55,939] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-17 (state.change.logger)
[2021-06-03 22:52:55,939] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-14 (state.change.logger)
[2021-06-03 22:52:55,939] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-11 (state.change.logger)
[2021-06-03 22:52:55,939] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-8 (state.change.logger)
[2021-06-03 22:52:55,939] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-5 (state.change.logger)
[2021-06-03 22:52:55,939] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-2 (state.change.logger)
[2021-06-03 22:52:55,939] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-47 (state.change.logger)
[2021-06-03 22:52:55,939] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-38 (state.change.logger)
[2021-06-03 22:52:55,939] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-35 (state.change.logger)
[2021-06-03 22:52:55,939] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-44 (state.change.logger)
[2021-06-03 22:52:55,939] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-32 (state.change.logger)
[2021-06-03 22:52:55,939] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-41 (state.change.logger)
[2021-06-03 22:52:55,940] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(__consumer_offsets-8, __consumer_offsets-35, __consumer_offsets-41, __consumer_offsets-23, __consumer_offsets-47, __consumer_offsets-38, __consumer_offsets-17, __consumer_offsets-11, __consumer_offsets-2, __consumer_offsets-14, __consumer_offsets-20, __consumer_offsets-44, __consumer_offsets-5, __consumer_offsets-26, __consumer_offsets-29, __consumer_offsets-32) (kafka.server.ReplicaFetcherManager)
[2021-06-03 22:52:55,975] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:55,979] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 33 ms (kafka.log.Log)
[2021-06-03 22:52:55,980] INFO Created log for partition __consumer_offsets-29 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:55,982] INFO [Partition __consumer_offsets-29 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-29 (kafka.cluster.Partition)
[2021-06-03 22:52:55,982] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:55,982] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:55,983] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:55,983] INFO [Partition __consumer_offsets-29 broker=2] __consumer_offsets-29 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:55,991] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-29 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:56,023] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:56,043] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 47 ms (kafka.log.Log)
[2021-06-03 22:52:56,044] INFO Created log for partition __consumer_offsets-26 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:56,046] INFO [Partition __consumer_offsets-26 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-26 (kafka.cluster.Partition)
[2021-06-03 22:52:56,046] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,046] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,046] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,047] INFO [Partition __consumer_offsets-26 broker=2] __consumer_offsets-26 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:56,053] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-26 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:56,083] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:56,086] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 27 ms (kafka.log.Log)
[2021-06-03 22:52:56,087] INFO Created log for partition __consumer_offsets-23 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:56,088] INFO [Partition __consumer_offsets-23 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-23 (kafka.cluster.Partition)
[2021-06-03 22:52:56,088] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,088] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,088] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,088] INFO [Partition __consumer_offsets-23 broker=2] __consumer_offsets-23 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:56,097] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-23 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:56,127] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:56,130] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 29 ms (kafka.log.Log)
[2021-06-03 22:52:56,130] INFO Created log for partition __consumer_offsets-20 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:56,131] INFO [Partition __consumer_offsets-20 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-20 (kafka.cluster.Partition)
[2021-06-03 22:52:56,131] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,131] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,131] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,131] INFO [Partition __consumer_offsets-20 broker=2] __consumer_offsets-20 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:56,140] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-20 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:56,179] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:56,181] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 29 ms (kafka.log.Log)
[2021-06-03 22:52:56,182] INFO Created log for partition __consumer_offsets-17 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:56,183] INFO [Partition __consumer_offsets-17 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-17 (kafka.cluster.Partition)
[2021-06-03 22:52:56,183] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,183] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,183] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,183] INFO [Partition __consumer_offsets-17 broker=2] __consumer_offsets-17 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:56,192] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-17 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:56,236] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:56,240] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 38 ms (kafka.log.Log)
[2021-06-03 22:52:56,241] INFO Created log for partition __consumer_offsets-14 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:56,242] INFO [Partition __consumer_offsets-14 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-14 (kafka.cluster.Partition)
[2021-06-03 22:52:56,242] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,242] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,242] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,242] INFO [Partition __consumer_offsets-14 broker=2] __consumer_offsets-14 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:56,252] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-14 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:56,289] INFO [Log partition=__consumer_offsets-11, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:56,292] INFO [Log partition=__consumer_offsets-11, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 34 ms (kafka.log.Log)
[2021-06-03 22:52:56,293] INFO Created log for partition __consumer_offsets-11 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:56,294] INFO [Partition __consumer_offsets-11 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-11 (kafka.cluster.Partition)
[2021-06-03 22:52:56,294] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,294] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,294] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,294] INFO [Partition __consumer_offsets-11 broker=2] __consumer_offsets-11 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:56,301] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-11 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:56,333] INFO [Log partition=__consumer_offsets-8, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:56,336] INFO [Log partition=__consumer_offsets-8, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 30 ms (kafka.log.Log)
[2021-06-03 22:52:56,337] INFO Created log for partition __consumer_offsets-8 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:56,338] INFO [Partition __consumer_offsets-8 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-8 (kafka.cluster.Partition)
[2021-06-03 22:52:56,338] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,338] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,338] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,338] INFO [Partition __consumer_offsets-8 broker=2] __consumer_offsets-8 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:56,346] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-8 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:56,377] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:56,380] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 29 ms (kafka.log.Log)
[2021-06-03 22:52:56,381] INFO Created log for partition __consumer_offsets-5 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:56,382] INFO [Partition __consumer_offsets-5 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-5 (kafka.cluster.Partition)
[2021-06-03 22:52:56,382] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,382] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,382] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,382] INFO [Partition __consumer_offsets-5 broker=2] __consumer_offsets-5 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:56,390] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-5 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:56,421] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:56,426] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 31 ms (kafka.log.Log)
[2021-06-03 22:52:56,429] INFO Created log for partition __consumer_offsets-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:56,430] INFO [Partition __consumer_offsets-2 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-2 (kafka.cluster.Partition)
[2021-06-03 22:52:56,430] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,430] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,430] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,430] INFO [Partition __consumer_offsets-2 broker=2] __consumer_offsets-2 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:56,444] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-2 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:56,486] INFO [Log partition=__consumer_offsets-47, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:56,489] INFO [Log partition=__consumer_offsets-47, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 38 ms (kafka.log.Log)
[2021-06-03 22:52:56,490] INFO Created log for partition __consumer_offsets-47 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:56,492] INFO [Partition __consumer_offsets-47 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-47 (kafka.cluster.Partition)
[2021-06-03 22:52:56,492] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,492] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,492] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,493] INFO [Partition __consumer_offsets-47 broker=2] __consumer_offsets-47 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:56,502] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-47 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:56,544] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:56,547] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 38 ms (kafka.log.Log)
[2021-06-03 22:52:56,549] INFO Created log for partition __consumer_offsets-38 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:56,550] INFO [Partition __consumer_offsets-38 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-38 (kafka.cluster.Partition)
[2021-06-03 22:52:56,550] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,550] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,550] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,551] INFO [Partition __consumer_offsets-38 broker=2] __consumer_offsets-38 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:56,559] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-38 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:56,814] INFO [Log partition=__consumer_offsets-35, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:56,817] INFO [Log partition=__consumer_offsets-35, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 252 ms (kafka.log.Log)
[2021-06-03 22:52:56,819] INFO Created log for partition __consumer_offsets-35 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:56,819] INFO [Partition __consumer_offsets-35 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-35 (kafka.cluster.Partition)
[2021-06-03 22:52:56,819] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,820] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,820] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,820] INFO [Partition __consumer_offsets-35 broker=2] __consumer_offsets-35 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:56,830] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-35 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:56,864] INFO [Log partition=__consumer_offsets-44, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:56,867] INFO [Log partition=__consumer_offsets-44, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 32 ms (kafka.log.Log)
[2021-06-03 22:52:56,868] INFO Created log for partition __consumer_offsets-44 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:56,869] INFO [Partition __consumer_offsets-44 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-44 (kafka.cluster.Partition)
[2021-06-03 22:52:56,869] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,869] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,870] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,870] INFO [Partition __consumer_offsets-44 broker=2] __consumer_offsets-44 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:56,877] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-44 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:56,912] INFO [Log partition=__consumer_offsets-32, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:56,916] INFO [Log partition=__consumer_offsets-32, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 32 ms (kafka.log.Log)
[2021-06-03 22:52:56,917] INFO Created log for partition __consumer_offsets-32 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:56,918] INFO [Partition __consumer_offsets-32 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-32 (kafka.cluster.Partition)
[2021-06-03 22:52:56,918] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,919] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,919] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,919] INFO [Partition __consumer_offsets-32 broker=2] __consumer_offsets-32 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:56,926] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-32 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:56,956] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:56,959] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 28 ms (kafka.log.Log)
[2021-06-03 22:52:56,960] INFO Created log for partition __consumer_offsets-41 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:56,961] INFO [Partition __consumer_offsets-41 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-41 (kafka.cluster.Partition)
[2021-06-03 22:52:56,961] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,961] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,961] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:56,961] INFO [Partition __consumer_offsets-41 broker=2] __consumer_offsets-41 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:52:56,968] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-41 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:52:56,969] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-29 (state.change.logger)
[2021-06-03 22:52:56,969] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-26 (state.change.logger)
[2021-06-03 22:52:56,969] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-23 (state.change.logger)
[2021-06-03 22:52:56,969] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-20 (state.change.logger)
[2021-06-03 22:52:56,969] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-17 (state.change.logger)
[2021-06-03 22:52:56,969] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-14 (state.change.logger)
[2021-06-03 22:52:56,970] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-11 (state.change.logger)
[2021-06-03 22:52:56,970] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-8 (state.change.logger)
[2021-06-03 22:52:56,970] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-5 (state.change.logger)
[2021-06-03 22:52:56,970] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-2 (state.change.logger)
[2021-06-03 22:52:56,970] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-47 (state.change.logger)
[2021-06-03 22:52:56,970] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-38 (state.change.logger)
[2021-06-03 22:52:56,970] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-35 (state.change.logger)
[2021-06-03 22:52:56,970] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-44 (state.change.logger)
[2021-06-03 22:52:56,970] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-32 (state.change.logger)
[2021-06-03 22:52:56,971] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-41 (state.change.logger)
[2021-06-03 22:52:56,971] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-0 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,971] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-48 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,971] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-10 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,971] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-45 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,971] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-7 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,971] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-42 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,972] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-4 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,972] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-1 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,972] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-39 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,972] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-36 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,972] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-33 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,972] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-49 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,972] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-30 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,973] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-46 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,973] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-27 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,973] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-24 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,973] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-43 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,973] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-21 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,974] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-40 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,974] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-37 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,974] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-18 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,974] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-34 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,974] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-15 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,975] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-12 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,975] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-31 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,975] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-9 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,975] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-19 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,975] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-28 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,975] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-6 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,976] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-25 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,976] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-16 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,976] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-22 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,976] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-3 with leader 0 (state.change.logger)
[2021-06-03 22:52:56,976] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-13 with leader 1 (state.change.logger)
[2021-06-03 22:52:56,977] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,015] INFO [Log partition=__consumer_offsets-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,019] INFO [Log partition=__consumer_offsets-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 34 ms (kafka.log.Log)
[2021-06-03 22:52:57,020] INFO Created log for partition __consumer_offsets-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,021] INFO [Partition __consumer_offsets-0 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-0 (kafka.cluster.Partition)
[2021-06-03 22:52:57,021] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,021] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,021] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,060] INFO [Log partition=__consumer_offsets-48, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,062] INFO [Log partition=__consumer_offsets-48, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 36 ms (kafka.log.Log)
[2021-06-03 22:52:57,063] INFO Created log for partition __consumer_offsets-48 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,064] INFO [Partition __consumer_offsets-48 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-48 (kafka.cluster.Partition)
[2021-06-03 22:52:57,064] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,064] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,064] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,091] INFO [Log partition=__consumer_offsets-10, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,094] INFO [Log partition=__consumer_offsets-10, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 25 ms (kafka.log.Log)
[2021-06-03 22:52:57,095] INFO Created log for partition __consumer_offsets-10 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,096] INFO [Partition __consumer_offsets-10 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-10 (kafka.cluster.Partition)
[2021-06-03 22:52:57,096] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,096] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,096] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,096] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,145] INFO [Log partition=__consumer_offsets-45, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,148] INFO [Log partition=__consumer_offsets-45, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 45 ms (kafka.log.Log)
[2021-06-03 22:52:57,149] INFO Created log for partition __consumer_offsets-45 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,150] INFO [Partition __consumer_offsets-45 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-45 (kafka.cluster.Partition)
[2021-06-03 22:52:57,150] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,150] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,150] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,182] INFO [Log partition=__consumer_offsets-7, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,186] INFO [Log partition=__consumer_offsets-7, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 30 ms (kafka.log.Log)
[2021-06-03 22:52:57,186] INFO Created log for partition __consumer_offsets-7 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,187] INFO [Partition __consumer_offsets-7 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-7 (kafka.cluster.Partition)
[2021-06-03 22:52:57,187] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,187] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,223] INFO [Log partition=__consumer_offsets-42, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,225] INFO [Log partition=__consumer_offsets-42, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 34 ms (kafka.log.Log)
[2021-06-03 22:52:57,226] INFO Created log for partition __consumer_offsets-42 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,227] INFO [Partition __consumer_offsets-42 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-42 (kafka.cluster.Partition)
[2021-06-03 22:52:57,227] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,227] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,227] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,253] INFO [Log partition=__consumer_offsets-4, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,256] INFO [Log partition=__consumer_offsets-4, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 25 ms (kafka.log.Log)
[2021-06-03 22:52:57,256] INFO Created log for partition __consumer_offsets-4 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,257] INFO [Partition __consumer_offsets-4 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-4 (kafka.cluster.Partition)
[2021-06-03 22:52:57,257] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,257] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,257] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,257] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,292] INFO [Log partition=__consumer_offsets-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,295] INFO [Log partition=__consumer_offsets-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 34 ms (kafka.log.Log)
[2021-06-03 22:52:57,296] INFO Created log for partition __consumer_offsets-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,296] INFO [Partition __consumer_offsets-1 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-1 (kafka.cluster.Partition)
[2021-06-03 22:52:57,296] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,296] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,296] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,327] INFO [Log partition=__consumer_offsets-39, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,332] INFO [Log partition=__consumer_offsets-39, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 31 ms (kafka.log.Log)
[2021-06-03 22:52:57,335] INFO Created log for partition __consumer_offsets-39 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,336] INFO [Partition __consumer_offsets-39 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-39 (kafka.cluster.Partition)
[2021-06-03 22:52:57,336] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,336] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,377] INFO [Log partition=__consumer_offsets-36, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,379] INFO [Log partition=__consumer_offsets-36, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 35 ms (kafka.log.Log)
[2021-06-03 22:52:57,380] INFO Created log for partition __consumer_offsets-36 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,381] INFO [Partition __consumer_offsets-36 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-36 (kafka.cluster.Partition)
[2021-06-03 22:52:57,381] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,381] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,381] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,381] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,417] INFO [Log partition=__consumer_offsets-33, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,424] INFO [Log partition=__consumer_offsets-33, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 39 ms (kafka.log.Log)
[2021-06-03 22:52:57,425] INFO Created log for partition __consumer_offsets-33 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,426] INFO [Partition __consumer_offsets-33 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-33 (kafka.cluster.Partition)
[2021-06-03 22:52:57,426] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,426] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,426] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,457] INFO [Log partition=__consumer_offsets-49, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,460] INFO [Log partition=__consumer_offsets-49, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 31 ms (kafka.log.Log)
[2021-06-03 22:52:57,461] INFO Created log for partition __consumer_offsets-49 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,461] INFO [Partition __consumer_offsets-49 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-49 (kafka.cluster.Partition)
[2021-06-03 22:52:57,462] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,462] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,495] INFO [Log partition=__consumer_offsets-30, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,497] INFO [Log partition=__consumer_offsets-30, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 30 ms (kafka.log.Log)
[2021-06-03 22:52:57,498] INFO Created log for partition __consumer_offsets-30 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,499] INFO [Partition __consumer_offsets-30 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-30 (kafka.cluster.Partition)
[2021-06-03 22:52:57,499] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,499] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,499] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,544] INFO [Log partition=__consumer_offsets-46, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,548] INFO [Log partition=__consumer_offsets-46, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 42 ms (kafka.log.Log)
[2021-06-03 22:52:57,550] INFO Created log for partition __consumer_offsets-46 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,551] INFO [Partition __consumer_offsets-46 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-46 (kafka.cluster.Partition)
[2021-06-03 22:52:57,551] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,551] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,552] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,552] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,587] INFO [Log partition=__consumer_offsets-27, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,589] INFO [Log partition=__consumer_offsets-27, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 33 ms (kafka.log.Log)
[2021-06-03 22:52:57,590] INFO Created log for partition __consumer_offsets-27 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,591] INFO [Partition __consumer_offsets-27 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-27 (kafka.cluster.Partition)
[2021-06-03 22:52:57,591] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,591] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,620] INFO [Log partition=__consumer_offsets-24, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,622] INFO [Log partition=__consumer_offsets-24, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 27 ms (kafka.log.Log)
[2021-06-03 22:52:57,623] INFO Created log for partition __consumer_offsets-24 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,623] INFO [Partition __consumer_offsets-24 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-24 (kafka.cluster.Partition)
[2021-06-03 22:52:57,624] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,624] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,624] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,624] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,653] INFO [Log partition=__consumer_offsets-43, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,655] INFO [Log partition=__consumer_offsets-43, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 27 ms (kafka.log.Log)
[2021-06-03 22:52:57,656] INFO Created log for partition __consumer_offsets-43 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,656] INFO [Partition __consumer_offsets-43 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-43 (kafka.cluster.Partition)
[2021-06-03 22:52:57,656] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,657] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,657] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,691] INFO [Log partition=__consumer_offsets-21, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,694] INFO [Log partition=__consumer_offsets-21, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 33 ms (kafka.log.Log)
[2021-06-03 22:52:57,695] INFO Created log for partition __consumer_offsets-21 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,696] INFO [Partition __consumer_offsets-21 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-21 (kafka.cluster.Partition)
[2021-06-03 22:52:57,696] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,696] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,728] INFO [Log partition=__consumer_offsets-40, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,733] INFO [Log partition=__consumer_offsets-40, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 32 ms (kafka.log.Log)
[2021-06-03 22:52:57,734] INFO Created log for partition __consumer_offsets-40 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,735] INFO [Partition __consumer_offsets-40 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-40 (kafka.cluster.Partition)
[2021-06-03 22:52:57,735] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,735] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,735] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,735] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,779] INFO [Log partition=__consumer_offsets-37, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,782] INFO [Log partition=__consumer_offsets-37, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 33 ms (kafka.log.Log)
[2021-06-03 22:52:57,783] INFO Created log for partition __consumer_offsets-37 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,783] INFO [Partition __consumer_offsets-37 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-37 (kafka.cluster.Partition)
[2021-06-03 22:52:57,784] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,784] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,814] INFO [Log partition=__consumer_offsets-18, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,816] INFO [Log partition=__consumer_offsets-18, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 27 ms (kafka.log.Log)
[2021-06-03 22:52:57,817] INFO Created log for partition __consumer_offsets-18 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,817] INFO [Partition __consumer_offsets-18 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-18 (kafka.cluster.Partition)
[2021-06-03 22:52:57,818] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,818] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,818] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,845] INFO [Log partition=__consumer_offsets-34, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,847] INFO [Log partition=__consumer_offsets-34, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 25 ms (kafka.log.Log)
[2021-06-03 22:52:57,848] INFO Created log for partition __consumer_offsets-34 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,848] INFO [Partition __consumer_offsets-34 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-34 (kafka.cluster.Partition)
[2021-06-03 22:52:57,849] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,849] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,849] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,849] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,899] INFO [Log partition=__consumer_offsets-15, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,901] INFO [Log partition=__consumer_offsets-15, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 48 ms (kafka.log.Log)
[2021-06-03 22:52:57,902] INFO Created log for partition __consumer_offsets-15 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,902] INFO [Partition __consumer_offsets-15 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-15 (kafka.cluster.Partition)
[2021-06-03 22:52:57,903] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,903] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,948] INFO [Log partition=__consumer_offsets-12, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,951] INFO [Log partition=__consumer_offsets-12, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 42 ms (kafka.log.Log)
[2021-06-03 22:52:57,952] INFO Created log for partition __consumer_offsets-12 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,952] INFO [Partition __consumer_offsets-12 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-12 (kafka.cluster.Partition)
[2021-06-03 22:52:57,952] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,952] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,952] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,952] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,983] INFO [Log partition=__consumer_offsets-31, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:57,986] INFO [Log partition=__consumer_offsets-31, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 29 ms (kafka.log.Log)
[2021-06-03 22:52:57,987] INFO Created log for partition __consumer_offsets-31 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:57,987] INFO [Partition __consumer_offsets-31 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-31 (kafka.cluster.Partition)
[2021-06-03 22:52:57,987] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,988] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:57,988] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,414] INFO [Log partition=__consumer_offsets-9, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:58,417] INFO [Log partition=__consumer_offsets-9, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 425 ms (kafka.log.Log)
[2021-06-03 22:52:58,418] INFO Created log for partition __consumer_offsets-9 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:58,419] INFO [Partition __consumer_offsets-9 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-9 (kafka.cluster.Partition)
[2021-06-03 22:52:58,419] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,419] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,419] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,454] INFO [Log partition=__consumer_offsets-19, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:58,456] INFO [Log partition=__consumer_offsets-19, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 29 ms (kafka.log.Log)
[2021-06-03 22:52:58,457] INFO Created log for partition __consumer_offsets-19 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:58,458] INFO [Partition __consumer_offsets-19 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-19 (kafka.cluster.Partition)
[2021-06-03 22:52:58,458] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,458] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,489] INFO [Log partition=__consumer_offsets-28, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:58,492] INFO [Log partition=__consumer_offsets-28, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 30 ms (kafka.log.Log)
[2021-06-03 22:52:58,492] INFO Created log for partition __consumer_offsets-28 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:58,493] INFO [Partition __consumer_offsets-28 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-28 (kafka.cluster.Partition)
[2021-06-03 22:52:58,493] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,493] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,493] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,521] INFO [Log partition=__consumer_offsets-6, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:58,523] INFO [Log partition=__consumer_offsets-6, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 26 ms (kafka.log.Log)
[2021-06-03 22:52:58,524] INFO Created log for partition __consumer_offsets-6 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:58,524] INFO [Partition __consumer_offsets-6 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-6 (kafka.cluster.Partition)
[2021-06-03 22:52:58,525] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,525] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,525] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,525] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,554] INFO [Log partition=__consumer_offsets-25, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:58,556] INFO [Log partition=__consumer_offsets-25, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 25 ms (kafka.log.Log)
[2021-06-03 22:52:58,557] INFO Created log for partition __consumer_offsets-25 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:58,558] INFO [Partition __consumer_offsets-25 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-25 (kafka.cluster.Partition)
[2021-06-03 22:52:58,558] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,558] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,587] INFO [Log partition=__consumer_offsets-16, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:58,590] INFO [Log partition=__consumer_offsets-16, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 28 ms (kafka.log.Log)
[2021-06-03 22:52:58,591] INFO Created log for partition __consumer_offsets-16 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:58,591] INFO [Partition __consumer_offsets-16 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-16 (kafka.cluster.Partition)
[2021-06-03 22:52:58,591] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,591] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,592] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,618] INFO [Log partition=__consumer_offsets-22, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:58,621] INFO [Log partition=__consumer_offsets-22, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 23 ms (kafka.log.Log)
[2021-06-03 22:52:58,621] INFO Created log for partition __consumer_offsets-22 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:58,622] INFO [Partition __consumer_offsets-22 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-22 (kafka.cluster.Partition)
[2021-06-03 22:52:58,622] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,622] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,622] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,622] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,649] INFO [Log partition=__consumer_offsets-3, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:58,652] INFO [Log partition=__consumer_offsets-3, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 26 ms (kafka.log.Log)
[2021-06-03 22:52:58,653] INFO Created log for partition __consumer_offsets-3 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:58,654] INFO [Partition __consumer_offsets-3 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-3 (kafka.cluster.Partition)
[2021-06-03 22:52:58,654] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,654] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,654] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,690] INFO [Log partition=__consumer_offsets-13, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:52:58,693] INFO [Log partition=__consumer_offsets-13, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 34 ms (kafka.log.Log)
[2021-06-03 22:52:58,694] INFO Created log for partition __consumer_offsets-13 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:52:58,694] INFO [Partition __consumer_offsets-13 broker=2] No checkpointed highwatermark is found for partition __consumer_offsets-13 (kafka.cluster.Partition)
[2021-06-03 22:52:58,694] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:52:58,695] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(__consumer_offsets-28, __consumer_offsets-6, __consumer_offsets-10, __consumer_offsets-36, __consumer_offsets-40, __consumer_offsets-37, __consumer_offsets-18, __consumer_offsets-22, __consumer_offsets-0, __consumer_offsets-4, __consumer_offsets-45, __consumer_offsets-49, __consumer_offsets-27, __consumer_offsets-12, __consumer_offsets-31, __consumer_offsets-9, __consumer_offsets-13, __consumer_offsets-39, __consumer_offsets-43, __consumer_offsets-21, __consumer_offsets-25, __consumer_offsets-3, __consumer_offsets-7, __consumer_offsets-48, __consumer_offsets-33, __consumer_offsets-30, __consumer_offsets-15, __consumer_offsets-34, __consumer_offsets-19, __consumer_offsets-16, __consumer_offsets-1, __consumer_offsets-42, __consumer_offsets-46, __consumer_offsets-24) (kafka.server.ReplicaFetcherManager)
[2021-06-03 22:52:58,695] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-22 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,695] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-25 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,695] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-28 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,695] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-31 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,695] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-34 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,695] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-37 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,695] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-40 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,695] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-43 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,695] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-46 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,695] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-49 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,695] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-1 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,696] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-4 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,696] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-7 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,696] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-10 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,696] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-13 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,696] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-16 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,696] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-19 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,696] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-0 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,696] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-3 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,696] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-6 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,696] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-9 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,696] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-12 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,696] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-15 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,697] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-18 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,697] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-21 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,697] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-24 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,697] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-27 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,697] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-30 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,697] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-33 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,697] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-36 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,697] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-39 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,697] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-42 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,697] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-45 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,697] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-48 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,698] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-22 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,698] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-25 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,698] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-28 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,698] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-31 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,698] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-34 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,699] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-37 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,699] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-40 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,699] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-43 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,699] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-46 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,699] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-49 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,699] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-1 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,699] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-4 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,699] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-7 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,699] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-10 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,699] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-13 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,699] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-16 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,699] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-19 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,700] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-0 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,700] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-3 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,700] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-6 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,700] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-9 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,700] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-12 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,700] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-15 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,700] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-18 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,700] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-21 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,700] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-24 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,700] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-27 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,700] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-30 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,700] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-33 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,701] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-36 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,701] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-39 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,701] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-42 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,701] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-45 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,701] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-48 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,705] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=dev-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(__consumer_offsets-22 -> (offset=0, leaderEpoch=0), __consumer_offsets-4 -> (offset=0, leaderEpoch=0), __consumer_offsets-7 -> (offset=0, leaderEpoch=0), __consumer_offsets-46 -> (offset=0, leaderEpoch=0), __consumer_offsets-25 -> (offset=0, leaderEpoch=0), __consumer_offsets-49 -> (offset=0, leaderEpoch=0), __consumer_offsets-16 -> (offset=0, leaderEpoch=0), __consumer_offsets-28 -> (offset=0, leaderEpoch=0), __consumer_offsets-31 -> (offset=0, leaderEpoch=0), __consumer_offsets-37 -> (offset=0, leaderEpoch=0), __consumer_offsets-19 -> (offset=0, leaderEpoch=0), __consumer_offsets-13 -> (offset=0, leaderEpoch=0), __consumer_offsets-43 -> (offset=0, leaderEpoch=0), __consumer_offsets-1 -> (offset=0, leaderEpoch=0), __consumer_offsets-34 -> (offset=0, leaderEpoch=0), __consumer_offsets-10 -> (offset=0, leaderEpoch=0), __consumer_offsets-40 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-06-03 22:52:58,705] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=dev-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(__consumer_offsets-30 -> (offset=0, leaderEpoch=0), __consumer_offsets-21 -> (offset=0, leaderEpoch=0), __consumer_offsets-27 -> (offset=0, leaderEpoch=0), __consumer_offsets-9 -> (offset=0, leaderEpoch=0), __consumer_offsets-33 -> (offset=0, leaderEpoch=0), __consumer_offsets-36 -> (offset=0, leaderEpoch=0), __consumer_offsets-42 -> (offset=0, leaderEpoch=0), __consumer_offsets-3 -> (offset=0, leaderEpoch=0), __consumer_offsets-18 -> (offset=0, leaderEpoch=0), __consumer_offsets-15 -> (offset=0, leaderEpoch=0), __consumer_offsets-24 -> (offset=0, leaderEpoch=0), __consumer_offsets-48 -> (offset=0, leaderEpoch=0), __consumer_offsets-6 -> (offset=0, leaderEpoch=0), __consumer_offsets-0 -> (offset=0, leaderEpoch=0), __consumer_offsets-39 -> (offset=0, leaderEpoch=0), __consumer_offsets-12 -> (offset=0, leaderEpoch=0), __consumer_offsets-45 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-06-03 22:52:58,705] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-22 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,705] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-25 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,705] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-28 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,706] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-31 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,706] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-34 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,706] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-37 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,706] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-40 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,706] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-43 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,706] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-46 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,706] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-49 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,706] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-1 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,706] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-4 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,706] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-7 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,707] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-10 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,707] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-13 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,707] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-16 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,707] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-19 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,707] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-0 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,707] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-3 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,707] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-6 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,707] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-9 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,707] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-12 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,707] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-15 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,707] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-18 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,707] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-21 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,707] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-24 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,708] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-27 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,708] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-30 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,708] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-33 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,708] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-36 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,708] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-39 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,708] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-42 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,708] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-45 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,708] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-48 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,708] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-0 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,708] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-48 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,708] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-10 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,708] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-45 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,709] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-7 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,709] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-42 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,709] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-4 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,709] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-1 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,709] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-39 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,709] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-36 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,709] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-33 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,709] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-49 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,709] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-30 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,709] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-46 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,710] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-27 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,710] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-24 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,710] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-43 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,710] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-21 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,710] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-40 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,710] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-37 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,710] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-18 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,710] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-34 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,710] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-15 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,710] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-12 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,710] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-31 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,710] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-9 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,710] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-19 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,711] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-28 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,711] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-6 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,711] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-25 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,711] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-16 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,711] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-22 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,711] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-3 with leader 0 (state.change.logger)
[2021-06-03 22:52:58,711] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-13 with leader 1 (state.change.logger)
[2021-06-03 22:52:58,713] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-2 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,714] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-5 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,714] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-8 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,714] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-11 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,714] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-14 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,714] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-17 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,714] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-20 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,714] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-23 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,714] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-26 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,715] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-29 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,715] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-32 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,715] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-35 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,715] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-38 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,715] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-41 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,715] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-44 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,715] INFO [GroupMetadataManager brokerId=2] Scheduling loading of offsets and group metadata from __consumer_offsets-47 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,716] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-22 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,716] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-25 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,717] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-28 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,717] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-31 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,717] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-34 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,717] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-37 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,717] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-40 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,717] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-43 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,717] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-46 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,717] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-49 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,717] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-1 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,718] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-4 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,718] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-7 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,718] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-10 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,718] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-13 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,718] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-16 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,718] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-19 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,719] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-0 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,719] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-3 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,719] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-6 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,719] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-9 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,720] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-12 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,720] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:52:58,726] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:52:58,726] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-15 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,726] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-18 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,726] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-21 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,726] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-24 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,726] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-27 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,726] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-30 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,726] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-33 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,726] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-36 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,726] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-39 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,726] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-42 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,726] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-45 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,727] INFO [GroupMetadataManager brokerId=2] Scheduling unloading of offsets and group metadata from __consumer_offsets-48 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,732] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-13 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,733] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-46 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,733] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-9 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,733] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-42 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,733] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-21 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,733] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-17 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,733] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-30 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,733] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-26 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,733] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-5 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,733] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-38 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,733] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,733] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-34 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,733] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-16 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,733] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-45 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,733] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-12 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,734] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-41 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,734] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-24 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,734] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-20 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,734] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-49 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,734] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,734] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-29 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,734] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-25 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,734] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-8 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,734] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-37 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,734] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-4 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,734] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-33 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,734] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-15 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,734] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-48 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,735] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-11 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,735] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-44 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,735] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-23 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,735] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-19 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,735] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-32 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,735] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-28 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,735] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-7 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,735] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-40 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,735] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-3 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,735] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-36 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,736] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-47 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,736] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-14 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,736] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-43 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,736] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-10 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,736] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-22 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,736] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-18 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,736] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-31 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,736] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-27 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,736] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-39 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,737] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-6 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,737] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-35 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,737] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-03 22:52:58,741] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-2 in 24 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,742] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-5 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,742] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-8 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,742] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-11 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,742] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-14 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,742] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-17 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,742] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-20 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,742] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-23 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,743] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-26 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,743] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-29 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,743] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-32 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,743] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-35 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,743] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-38 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,743] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-41 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,743] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-44 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,744] INFO [GroupMetadataManager brokerId=2] Finished loading offsets and group metadata from __consumer_offsets-47 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,746] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-22. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,746] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-25. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,746] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-28. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,746] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-31. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,746] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-34. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,746] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-37. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,746] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-40. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,746] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-43. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,746] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-46. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,746] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-49. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,746] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-1. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,746] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-4. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,746] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-7. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,746] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-10. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,746] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-13. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,746] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-16. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-19. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-0. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-3. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-6. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-9. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-12. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-15. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-18. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-21. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-24. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-27. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-30. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-33. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-36. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-39. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-42. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-45. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,747] INFO [GroupMetadataManager brokerId=2] Finished unloading __consumer_offsets-48. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:52:58,828] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-28 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,829] INFO [Log partition=__consumer_offsets-28, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,829] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-43 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,829] INFO [Log partition=__consumer_offsets-43, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,829] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-10 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,829] INFO [Log partition=__consumer_offsets-10, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,829] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-25 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,829] INFO [Log partition=__consumer_offsets-25, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,829] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-7 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,829] INFO [Log partition=__consumer_offsets-7, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,829] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-37 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,829] INFO [Log partition=__consumer_offsets-37, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,829] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-40 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,829] INFO [Log partition=__consumer_offsets-40, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,830] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-22 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,830] INFO [Log partition=__consumer_offsets-22, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,830] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-4 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,830] INFO [Log partition=__consumer_offsets-4, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,830] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-34 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,830] INFO [Log partition=__consumer_offsets-34, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,830] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-19 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,830] INFO [Log partition=__consumer_offsets-19, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,830] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-49 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,830] INFO [Log partition=__consumer_offsets-49, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,830] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-16 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,830] INFO [Log partition=__consumer_offsets-16, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,830] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-1 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,830] INFO [Log partition=__consumer_offsets-1, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,830] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-31 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,830] INFO [Log partition=__consumer_offsets-31, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,830] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-46 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,830] INFO [Log partition=__consumer_offsets-46, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,830] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-13 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:58,830] INFO [Log partition=__consumer_offsets-13, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:58,869] INFO [GroupCoordinator 2]: Preparing to rebalance group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-47) (reason: Adding new member dev-policy-drools-pdp-0-34fb2dac-9a1c-4a17-928c-a6be8457fc1c with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 22:52:59,107] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-6 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,107] INFO [Log partition=__consumer_offsets-6, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:59,107] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-39 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,108] INFO [Log partition=__consumer_offsets-39, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:59,108] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-21 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,108] INFO [Log partition=__consumer_offsets-21, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:59,108] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-36 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,108] INFO [Log partition=__consumer_offsets-36, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:59,108] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-3 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,108] INFO [Log partition=__consumer_offsets-3, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:59,108] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-18 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,108] INFO [Log partition=__consumer_offsets-18, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:59,109] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-48 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,109] INFO [Log partition=__consumer_offsets-48, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:59,109] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-33 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,109] INFO [Log partition=__consumer_offsets-33, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:59,109] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-30 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,109] INFO [Log partition=__consumer_offsets-30, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:59,109] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,110] INFO [Log partition=__consumer_offsets-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:59,110] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-15 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,110] INFO [Log partition=__consumer_offsets-15, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:59,110] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-45 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,110] INFO [Log partition=__consumer_offsets-45, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:59,110] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-27 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,110] INFO [Log partition=__consumer_offsets-27, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:59,110] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-12 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,110] INFO [Log partition=__consumer_offsets-12, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:59,111] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-9 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,111] INFO [Log partition=__consumer_offsets-9, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:59,111] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-42 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,111] INFO [Log partition=__consumer_offsets-42, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:52:59,111] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-24 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:52:59,111] INFO [Log partition=__consumer_offsets-24, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:53:01,160] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:01,160] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:01,881] INFO [GroupCoordinator 2]: Stabilized group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP generation 1 (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 22:53:01,897] INFO [GroupCoordinator 2]: Assignment received from leader for group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 22:53:02,125] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:02,126] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:04,386] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:04,386] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:18,650] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:18,650] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:29,538] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:29,539] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:32,813] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:32,813] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:47,396] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:47,397] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:47,408] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:47,408] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:47,413] INFO [GroupCoordinator 2]: Preparing to rebalance group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-14) (reason: Adding new member dev-policy-apex-pdp-0-77176881-7307-4fea-9e40-9208bfbfcd99 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 22:53:50,414] INFO [GroupCoordinator 2]: Stabilized group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP generation 1 (__consumer_offsets-14) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 22:53:50,416] INFO [GroupCoordinator 2]: Assignment received from leader for group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 22:53:50,524] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 6 (state.change.logger)
[2021-06-03 22:53:50,524] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 6 (state.change.logger)
[2021-06-03 22:53:50,562] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:50,562] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:53:51,048] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 7 (state.change.logger)
[2021-06-03 22:53:51,048] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 7 (state.change.logger)
[2021-06-03 22:53:51,519] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.mirrormakeragent-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 8 (state.change.logger)
[2021-06-03 22:54:06,167] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 9 from controller 1 epoch 8 for partition POLICY-NOTIFICATION-0 (state.change.logger)
[2021-06-03 22:54:06,167] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 9 from controller 1 epoch 8 for partition POLICY-NOTIFICATION-1 (state.change.logger)
[2021-06-03 22:54:06,167] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 9 from controller 1 epoch 8 for partition POLICY-NOTIFICATION-2 (state.change.logger)
[2021-06-03 22:54:06,170] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 9 from controller 1 epoch 8 starting the become-leader transition for partition POLICY-NOTIFICATION-2 (state.change.logger)
[2021-06-03 22:54:06,171] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(POLICY-NOTIFICATION-2) (kafka.server.ReplicaFetcherManager)
[2021-06-03 22:54:06,202] INFO [Log partition=POLICY-NOTIFICATION-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:54:06,204] INFO [Log partition=POLICY-NOTIFICATION-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 27 ms (kafka.log.Log)
[2021-06-03 22:54:06,206] INFO Created log for partition POLICY-NOTIFICATION-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:54:06,208] INFO [Partition POLICY-NOTIFICATION-2 broker=2] No checkpointed highwatermark is found for partition POLICY-NOTIFICATION-2 (kafka.cluster.Partition)
[2021-06-03 22:54:06,208] INFO Replica loaded for partition POLICY-NOTIFICATION-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:54:06,208] INFO Replica loaded for partition POLICY-NOTIFICATION-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:54:06,208] INFO Replica loaded for partition POLICY-NOTIFICATION-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:54:06,208] INFO [Partition POLICY-NOTIFICATION-2 broker=2] POLICY-NOTIFICATION-2 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:54:06,215] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 9 for partition POLICY-NOTIFICATION-2 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:54:06,215] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 9 from controller 1 epoch 8 for the become-leader transition for partition POLICY-NOTIFICATION-2 (state.change.logger)
[2021-06-03 22:54:06,215] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 9 from controller 1 epoch 8 starting the become-follower transition for partition POLICY-NOTIFICATION-0 with leader 0 (state.change.logger)
[2021-06-03 22:54:06,215] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 9 from controller 1 epoch 8 starting the become-follower transition for partition POLICY-NOTIFICATION-1 with leader 1 (state.change.logger)
[2021-06-03 22:54:06,216] INFO Replica loaded for partition POLICY-NOTIFICATION-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:54:06,216] INFO Replica loaded for partition POLICY-NOTIFICATION-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:54:06,249] INFO [Log partition=POLICY-NOTIFICATION-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:54:06,255] INFO [Log partition=POLICY-NOTIFICATION-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 33 ms (kafka.log.Log)
[2021-06-03 22:54:06,256] INFO Created log for partition POLICY-NOTIFICATION-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:54:06,258] INFO [Partition POLICY-NOTIFICATION-0 broker=2] No checkpointed highwatermark is found for partition POLICY-NOTIFICATION-0 (kafka.cluster.Partition)
[2021-06-03 22:54:06,258] INFO Replica loaded for partition POLICY-NOTIFICATION-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:54:06,260] INFO Replica loaded for partition POLICY-NOTIFICATION-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:54:06,292] INFO [Log partition=POLICY-NOTIFICATION-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:54:06,295] INFO [Log partition=POLICY-NOTIFICATION-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 30 ms (kafka.log.Log)
[2021-06-03 22:54:06,297] INFO Created log for partition POLICY-NOTIFICATION-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:54:06,300] INFO [Partition POLICY-NOTIFICATION-1 broker=2] No checkpointed highwatermark is found for partition POLICY-NOTIFICATION-1 (kafka.cluster.Partition)
[2021-06-03 22:54:06,300] INFO Replica loaded for partition POLICY-NOTIFICATION-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:54:06,300] INFO Replica loaded for partition POLICY-NOTIFICATION-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:54:06,301] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(POLICY-NOTIFICATION-1, POLICY-NOTIFICATION-0) (kafka.server.ReplicaFetcherManager)
[2021-06-03 22:54:06,301] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 9 for partition POLICY-NOTIFICATION-1 with leader 1 (state.change.logger)
[2021-06-03 22:54:06,301] TRACE [Broker id=2] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 9 for partition POLICY-NOTIFICATION-0 with leader 0 (state.change.logger)
[2021-06-03 22:54:06,301] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition POLICY-NOTIFICATION-1 as part of become-follower request with correlation id 9 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-03 22:54:06,301] TRACE [Broker id=2] Truncated logs and checkpointed recovery boundaries for partition POLICY-NOTIFICATION-0 as part of become-follower request with correlation id 9 from controller 1 epoch 8 with leader 0 (state.change.logger)
[2021-06-03 22:54:06,303] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=1, host=dev-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-NOTIFICATION-1 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-06-03 22:54:06,304] INFO [ReplicaFetcherManager on broker 2] Added fetcher to broker BrokerEndPoint(id=0, host=dev-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-NOTIFICATION-0 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-06-03 22:54:06,304] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 9 for partition POLICY-NOTIFICATION-1 with leader 1 (state.change.logger)
[2021-06-03 22:54:06,304] TRACE [Broker id=2] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 9 for partition POLICY-NOTIFICATION-0 with leader 0 (state.change.logger)
[2021-06-03 22:54:06,304] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 9 from controller 1 epoch 8 for the become-follower transition for partition POLICY-NOTIFICATION-0 with leader 0 (state.change.logger)
[2021-06-03 22:54:06,305] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 9 from controller 1 epoch 8 for the become-follower transition for partition POLICY-NOTIFICATION-1 with leader 1 (state.change.logger)
[2021-06-03 22:54:06,307] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition POLICY-NOTIFICATION-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 10 (state.change.logger)
[2021-06-03 22:54:06,308] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition POLICY-NOTIFICATION-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 10 (state.change.logger)
[2021-06-03 22:54:06,308] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition POLICY-NOTIFICATION-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 10 (state.change.logger)
[2021-06-03 22:54:06,713] INFO [ReplicaFetcher replicaId=2, leaderId=0, fetcherId=0] Truncating partition POLICY-NOTIFICATION-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:54:06,713] INFO [Log partition=POLICY-NOTIFICATION-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:54:06,748] INFO [ReplicaFetcher replicaId=2, leaderId=1, fetcherId=0] Truncating partition POLICY-NOTIFICATION-1 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-03 22:54:06,748] INFO [Log partition=POLICY-NOTIFICATION-1, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-03 22:54:20,856] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:54:20,856] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:54:20,863] INFO [GroupCoordinator 2]: Preparing to rebalance group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-11) (reason: Adding new member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-3c1b16c2-8b73-400e-a5d1-81a553ae4423 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 22:54:23,865] INFO [GroupCoordinator 2]: Stabilized group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP generation 1 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 22:54:23,868] INFO [GroupCoordinator 2]: Assignment received from leader for group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 22:54:24,005] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:54:24,005] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:56:36,055] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 5 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 22:58:42,425] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:58:42,425] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:58:42,429] INFO [GroupCoordinator 2]: Preparing to rebalance group policy-handler--POLICY-NOTIFICATION in state PreparingRebalance with old generation 0 (__consumer_offsets-23) (reason: Adding new member ph1-1ee1f186-8461-4128-afe0-55c2e3f937e4 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 22:58:45,438] INFO [GroupCoordinator 2]: Stabilized group policy-handler--POLICY-NOTIFICATION generation 1 (__consumer_offsets-23) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 22:58:45,442] INFO [GroupCoordinator 2]: Assignment received from leader for group policy-handler--POLICY-NOTIFICATION for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 22:58:45,569] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:58:45,569] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:58:48,095] TRACE [Broker id=2] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2, zkVersion=0, replicas=2, isNew=true) correlation id 11 from controller 1 epoch 8 for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (state.change.logger)
[2021-06-03 22:58:48,097] TRACE [Broker id=2] Handling LeaderAndIsr request correlationId 11 from controller 1 epoch 8 starting the become-leader transition for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (state.change.logger)
[2021-06-03 22:58:48,097] INFO [ReplicaFetcherManager on broker 2] Removed fetcher for partitions Set(SDC-DISTR-NOTIF-TOPIC-AUTO-0) (kafka.server.ReplicaFetcherManager)
[2021-06-03 22:58:48,136] INFO [Log partition=SDC-DISTR-NOTIF-TOPIC-AUTO-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-03 22:58:48,139] INFO [Log partition=SDC-DISTR-NOTIF-TOPIC-AUTO-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 35 ms (kafka.log.Log)
[2021-06-03 22:58:48,141] INFO Created log for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-03 22:58:48,143] INFO [Partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 broker=2] No checkpointed highwatermark is found for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (kafka.cluster.Partition)
[2021-06-03 22:58:48,143] INFO Replica loaded for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-03 22:58:48,143] INFO [Partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 broker=2] SDC-DISTR-NOTIF-TOPIC-AUTO-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-03 22:58:48,151] TRACE [Broker id=2] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 11 for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (last update controller epoch 8) (state.change.logger)
[2021-06-03 22:58:48,151] TRACE [Broker id=2] Completed LeaderAndIsr request correlationId 11 from controller 1 epoch 8 for the become-leader transition for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (state.change.logger)
[2021-06-03 22:58:48,154] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 12 (state.change.logger)
[2021-06-03 22:58:48,827] TRACE [Broker id=2] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 13 (state.change.logger)
[2021-06-03 22:58:49,577] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:58:49,577] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 22:58:49,581] INFO [GroupCoordinator 2]: Preparing to rebalance group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-32) (reason: Adding new member sdc-AUTO1-529e95c8-3bb0-4f4f-94e4-6c0cf3eff2bc with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 22:58:52,582] INFO [GroupCoordinator 2]: Stabilized group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO generation 1 (__consumer_offsets-32) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 22:58:52,584] INFO [GroupCoordinator 2]: Assignment received from leader for group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:01:16,269] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:01:16,270] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:01:18,321] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:01:18,321] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:01:21,476] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:01:21,476] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:01:31,175] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:01:31,175] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:01:34,096] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:01:34,097] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:01:37,274] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:01:37,274] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:01:59,780] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:01:59,780] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:02,931] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:02,931] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:05,120] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:05,120] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:05,124] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-41) (reason: Adding new member multicloud-windriver-id-14b721a9-924a-4b40-b10a-a1a8ec565d60 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:02:08,129] INFO [GroupCoordinator 2]: Stabilized group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-41) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:02:08,133] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:02:08,269] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:08,269] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:18,053] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:18,053] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:18,062] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:18,062] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:18,065] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-11) (reason: Adding new member multicloud-starlingx-id-de003e08-76a6-4248-8c35-ca94d349b47f with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:02:19,270] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:19,271] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:21,066] INFO [GroupCoordinator 2]: Stabilized group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:02:21,068] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:02:21,187] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:21,187] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:21,561] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:21,561] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:21,568] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-47) (reason: Adding new member multicloud-k8s-id-4d97d994-3660-4542-adf4-b5282552e773 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:02:24,570] INFO [GroupCoordinator 2]: Stabilized group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:02:24,659] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:02:24,785] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:24,785] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:26,570] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:02:26,570] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:04:11,488] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:04:11,489] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:04:11,605] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:04:11,605] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:04:11,611] INFO [GroupCoordinator 2]: Preparing to rebalance group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-44) (reason: Adding new member SO-COpenSource-Env11-00830432-34f4-4f0f-a539-ba82bcb57ad5 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:04:14,611] INFO [GroupCoordinator 2]: Stabilized group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO generation 1 (__consumer_offsets-44) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:04:14,623] INFO [GroupCoordinator 2]: Assignment received from leader for group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:04:14,757] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:04:14,757] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:06:36,050] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 23:14:02,880] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:14:02,880] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:16:36,050] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 23:18:28,253] INFO [GroupCoordinator 2]: Member ph1-1ee1f186-8461-4128-afe0-55c2e3f937e4 in group policy-handler--POLICY-NOTIFICATION has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:18:28,256] INFO [GroupCoordinator 2]: Preparing to rebalance group policy-handler--POLICY-NOTIFICATION in state PreparingRebalance with old generation 1 (__consumer_offsets-23) (reason: removing member ph1-1ee1f186-8461-4128-afe0-55c2e3f937e4 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:18:28,257] INFO [GroupCoordinator 2]: Group policy-handler--POLICY-NOTIFICATION with generation 2 is now empty (__consumer_offsets-23) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:11,318] INFO [GroupCoordinator 2]: Member multicloud-k8s-id-4d97d994-3660-4542-adf4-b5282552e773 in group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:11,319] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 1 (__consumer_offsets-47) (reason: removing member multicloud-k8s-id-4d97d994-3660-4542-adf4-b5282552e773 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:11,319] INFO [GroupCoordinator 2]: Group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 2 is now empty (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:11,483] INFO [GroupCoordinator 2]: Member SO-COpenSource-Env11-00830432-34f4-4f0f-a539-ba82bcb57ad5 in group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:11,483] INFO [GroupCoordinator 2]: Preparing to rebalance group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 1 (__consumer_offsets-44) (reason: removing member SO-COpenSource-Env11-00830432-34f4-4f0f-a539-ba82bcb57ad5 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:11,483] INFO [GroupCoordinator 2]: Group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO with generation 2 is now empty (__consumer_offsets-44) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:11,987] INFO [GroupCoordinator 2]: Member dev-policy-drools-pdp-0-34fb2dac-9a1c-4a17-928c-a6be8457fc1c in group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:11,987] INFO [GroupCoordinator 2]: Preparing to rebalance group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-47) (reason: removing member dev-policy-drools-pdp-0-34fb2dac-9a1c-4a17-928c-a6be8457fc1c on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:11,987] INFO [GroupCoordinator 2]: Group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:23,270] INFO [GroupCoordinator 2]: Member multicloud-starlingx-id-de003e08-76a6-4248-8c35-ca94d349b47f in group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:23,271] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 1 (__consumer_offsets-11) (reason: removing member multicloud-starlingx-id-de003e08-76a6-4248-8c35-ca94d349b47f on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:23,271] INFO [GroupCoordinator 2]: Group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 2 is now empty (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:24,795] INFO [GroupCoordinator 2]: Member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-3c1b16c2-8b73-400e-a5d1-81a553ae4423 in group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:24,796] INFO [GroupCoordinator 2]: Preparing to rebalance group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-11) (reason: removing member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-3c1b16c2-8b73-400e-a5d1-81a553ae4423 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:24,796] INFO [GroupCoordinator 2]: Group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:24,846] INFO [GroupCoordinator 2]: Member multicloud-windriver-id-14b721a9-924a-4b40-b10a-a1a8ec565d60 in group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:24,846] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 1 (__consumer_offsets-41) (reason: removing member multicloud-windriver-id-14b721a9-924a-4b40-b10a-a1a8ec565d60 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:24,846] INFO [GroupCoordinator 2]: Group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 2 is now empty (__consumer_offsets-41) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:25,899] INFO [GroupCoordinator 2]: Member dev-policy-apex-pdp-0-77176881-7307-4fea-9e40-9208bfbfcd99 in group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:25,899] INFO [GroupCoordinator 2]: Preparing to rebalance group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-14) (reason: removing member dev-policy-apex-pdp-0-77176881-7307-4fea-9e40-9208bfbfcd99 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:25,900] INFO [GroupCoordinator 2]: Group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-14) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:26,334] INFO [GroupCoordinator 2]: Member sdc-AUTO1-529e95c8-3bb0-4f4f-94e4-6c0cf3eff2bc in group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:26,334] INFO [GroupCoordinator 2]: Preparing to rebalance group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 1 (__consumer_offsets-32) (reason: removing member sdc-AUTO1-529e95c8-3bb0-4f4f-94e4-6c0cf3eff2bc on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:26,334] INFO [GroupCoordinator 2]: Group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO with generation 2 is now empty (__consumer_offsets-32) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:39,281] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:19:39,281] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:19:39,297] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:19:39,297] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:19:39,303] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 2 (__consumer_offsets-11) (reason: Adding new member multicloud-starlingx-id-cf66c5e6-5264-49a6-ba15-a2ce1081a8a2 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:39,362] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:19:39,362] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:19:39,365] INFO [GroupCoordinator 2]: Preparing to rebalance group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP in state PreparingRebalance with old generation 2 (__consumer_offsets-14) (reason: Adding new member dev-policy-apex-pdp-0-d6257b33-2062-4b17-aa6d-e154d9a0df84 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:42,305] INFO [GroupCoordinator 2]: Stabilized group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 3 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:42,367] INFO [GroupCoordinator 2]: Stabilized group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP generation 3 (__consumer_offsets-14) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:45,925] INFO [GroupCoordinator 2]: Assignment received from leader for group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:46,907] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:46,913] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:19:46,913] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:19:50,986] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:19:50,986] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:19:51,027] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 2 (__consumer_offsets-41) (reason: Adding new member multicloud-windriver-id-f2bb1189-9f5e-467e-9e07-c9e17e6f734e with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:54,034] INFO [GroupCoordinator 2]: Stabilized group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 3 (__consumer_offsets-41) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:19:54,117] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:20:00,154] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:20:00,154] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:20:08,922] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:20:08,922] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:20:16,898] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:20:16,898] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:20:34,758] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:20:34,758] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:20:34,788] INFO [GroupCoordinator 2]: Preparing to rebalance group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP in state PreparingRebalance with old generation 2 (__consumer_offsets-11) (reason: Adding new member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-2d4c2609-f27e-46e9-a899-88294d1354e3 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:20:37,789] INFO [GroupCoordinator 2]: Stabilized group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP generation 3 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:20:37,794] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:20:37,794] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:20:37,839] INFO [GroupCoordinator 2]: Assignment received from leader for group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:21:09,978] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:21:09,978] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:21:10,035] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:21:10,035] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:21:10,063] INFO [GroupCoordinator 2]: Preparing to rebalance group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP in state PreparingRebalance with old generation 2 (__consumer_offsets-47) (reason: Adding new member dev-policy-drools-pdp-0-3ab837bd-5ccf-49f8-b64c-a67d158216c0 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:21:13,065] INFO [GroupCoordinator 2]: Stabilized group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP generation 3 (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:21:22,044] INFO [GroupCoordinator 2]: Assignment received from leader for group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:21:37,889] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:21:37,889] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:21:37,898] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:21:37,898] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:21:37,906] INFO [GroupCoordinator 2]: Preparing to rebalance group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 2 (__consumer_offsets-44) (reason: Adding new member SO-COpenSource-Env11-f7913f9a-d61f-4a1c-9a55-1b89a63564d5 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:21:37,913] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:21:37,913] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:21:37,924] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 2 (__consumer_offsets-47) (reason: Adding new member multicloud-k8s-id-f77f92f2-aead-44a2-bcb5-a9b4fdc89e26 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:21:37,924] INFO [GroupCoordinator 2]: Preparing to rebalance group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 2 (__consumer_offsets-32) (reason: Adding new member sdc-AUTO1-921d8f83-f61b-4269-9da2-61db598aeb83 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:21:40,907] INFO [GroupCoordinator 2]: Stabilized group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO generation 3 (__consumer_offsets-44) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:21:40,925] INFO [GroupCoordinator 2]: Stabilized group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO generation 3 (__consumer_offsets-32) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:21:40,926] INFO [GroupCoordinator 2]: Stabilized group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 3 (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:21:41,692] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:21:41,692] INFO [GroupCoordinator 2]: Assignment received from leader for group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:21:41,738] INFO [GroupCoordinator 2]: Assignment received from leader for group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:22:26,550] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:22:26,550] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:23:16,638] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:23:16,639] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:23:22,894] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:23:22,894] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:23:29,493] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:23:29,493] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:23:29,552] WARN Attempting to send response via channel for which there is no open connection, connection id 10.242.47.10:9092-10.242.138.80:55796-85 (kafka.network.Processor)
[2021-06-03 23:23:30,132] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:23:30,132] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:23:30,238] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:23:30,238] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:23:58,438] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:23:58,438] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:23:59,115] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:23:59,115] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:23:59,211] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:23:59,212] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:24:26,286] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:24:26,287] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:24:26,334] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:24:26,334] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:24:38,081] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:24:38,081] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:25:02,207] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:25:02,207] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:25:03,387] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:25:03,387] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:25:34,368] INFO [GroupCoordinator 2]: Member dev-policy-drools-pdp-0-3ab837bd-5ccf-49f8-b64c-a67d158216c0 in group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:25:34,369] INFO [GroupCoordinator 2]: Preparing to rebalance group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP in state PreparingRebalance with old generation 3 (__consumer_offsets-47) (reason: removing member dev-policy-drools-pdp-0-3ab837bd-5ccf-49f8-b64c-a67d158216c0 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:25:34,369] INFO [GroupCoordinator 2]: Group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP with generation 4 is now empty (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:25:47,208] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:25:47,208] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:26:36,060] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 10 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 23:27:34,432] INFO [GroupCoordinator 2]: Member SO-COpenSource-Env11-f7913f9a-d61f-4a1c-9a55-1b89a63564d5 in group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:27:34,433] INFO [GroupCoordinator 2]: Preparing to rebalance group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 3 (__consumer_offsets-44) (reason: removing member SO-COpenSource-Env11-f7913f9a-d61f-4a1c-9a55-1b89a63564d5 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:27:34,434] INFO [GroupCoordinator 2]: Group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO with generation 4 is now empty (__consumer_offsets-44) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:27:40,217] INFO [GroupCoordinator 2]: Member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-2d4c2609-f27e-46e9-a899-88294d1354e3 in group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:27:40,218] INFO [GroupCoordinator 2]: Preparing to rebalance group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP in state PreparingRebalance with old generation 3 (__consumer_offsets-11) (reason: removing member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-2d4c2609-f27e-46e9-a899-88294d1354e3 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:27:40,218] INFO [GroupCoordinator 2]: Group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP with generation 4 is now empty (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:27:40,863] INFO [GroupCoordinator 2]: Member sdc-AUTO1-921d8f83-f61b-4269-9da2-61db598aeb83 in group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:27:40,863] INFO [GroupCoordinator 2]: Preparing to rebalance group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 3 (__consumer_offsets-32) (reason: removing member sdc-AUTO1-921d8f83-f61b-4269-9da2-61db598aeb83 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:27:40,863] INFO [GroupCoordinator 2]: Group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO with generation 4 is now empty (__consumer_offsets-32) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:28:04,373] INFO [GroupCoordinator 2]: Member multicloud-starlingx-id-cf66c5e6-5264-49a6-ba15-a2ce1081a8a2 in group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:28:04,373] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 3 (__consumer_offsets-11) (reason: removing member multicloud-starlingx-id-cf66c5e6-5264-49a6-ba15-a2ce1081a8a2 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:28:04,373] INFO [GroupCoordinator 2]: Group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 4 is now empty (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:28:04,375] INFO [GroupCoordinator 2]: Member multicloud-windriver-id-f2bb1189-9f5e-467e-9e07-c9e17e6f734e in group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:28:04,375] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 3 (__consumer_offsets-41) (reason: removing member multicloud-windriver-id-f2bb1189-9f5e-467e-9e07-c9e17e6f734e on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:28:04,375] INFO [GroupCoordinator 2]: Group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 4 is now empty (__consumer_offsets-41) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:28:15,778] INFO [GroupCoordinator 2]: Member multicloud-k8s-id-f77f92f2-aead-44a2-bcb5-a9b4fdc89e26 in group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:28:15,779] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 3 (__consumer_offsets-47) (reason: removing member multicloud-k8s-id-f77f92f2-aead-44a2-bcb5-a9b4fdc89e26 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:28:15,779] INFO [GroupCoordinator 2]: Group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 4 is now empty (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:28:49,922] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 4 (__consumer_offsets-11) (reason: Adding new member multicloud-starlingx-id-47f30daf-4f28-40d4-bd2b-9833f069bf56 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:28:52,925] INFO [GroupCoordinator 2]: Stabilized group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 5 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:28:56,952] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 5 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:28:57,087] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 4 (__consumer_offsets-41) (reason: Adding new member multicloud-windriver-id-087500a2-d115-4a31-b5dd-5db8b7dbbf30 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:29:00,088] INFO [GroupCoordinator 2]: Stabilized group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 5 (__consumer_offsets-41) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:29:00,480] INFO [GroupCoordinator 2]: Member multicloud-starlingx-id-47f30daf-4f28-40d4-bd2b-9833f069bf56 in group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:29:00,480] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 5 (__consumer_offsets-11) (reason: removing member multicloud-starlingx-id-47f30daf-4f28-40d4-bd2b-9833f069bf56 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:29:00,480] INFO [GroupCoordinator 2]: Group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 6 is now empty (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:29:00,520] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 5 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:29:35,048] INFO [GroupCoordinator 2]: Member dev-policy-apex-pdp-0-d6257b33-2062-4b17-aa6d-e154d9a0df84 in group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:29:35,049] INFO [GroupCoordinator 2]: Preparing to rebalance group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP in state PreparingRebalance with old generation 3 (__consumer_offsets-14) (reason: removing member dev-policy-apex-pdp-0-d6257b33-2062-4b17-aa6d-e154d9a0df84 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:29:35,049] INFO [GroupCoordinator 2]: Group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP with generation 4 is now empty (__consumer_offsets-14) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:30:45,060] INFO [GroupCoordinator 2]: Member multicloud-windriver-id-087500a2-d115-4a31-b5dd-5db8b7dbbf30 in group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:30:45,062] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 5 (__consumer_offsets-41) (reason: removing member multicloud-windriver-id-087500a2-d115-4a31-b5dd-5db8b7dbbf30 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:30:45,062] INFO [GroupCoordinator 2]: Group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 6 is now empty (__consumer_offsets-41) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:28,502] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:28,503] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:28,502] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:28,503] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:28,503] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:28,503] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:28,542] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:28,542] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:28,543] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:28,543] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:28,596] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:28,596] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:29,058] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:29,059] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:29,076] INFO [GroupCoordinator 2]: Preparing to rebalance group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP in state PreparingRebalance with old generation 4 (__consumer_offsets-14) (reason: Adding new member dev-policy-apex-pdp-0-05f520de-d57e-4ee8-a314-ae4c057572e3 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:29,340] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:29,340] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:29,342] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:29,342] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:29,370] INFO [GroupCoordinator 2]: Preparing to rebalance group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP in state PreparingRebalance with old generation 4 (__consumer_offsets-11) (reason: Adding new member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-ce10dcdd-ebef-44a3-a48f-02340d3bdd94 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:29,371] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 6 (__consumer_offsets-11) (reason: Adding new member multicloud-starlingx-id-9e245830-3c40-44a7-b3e5-e307eafd4181 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:30,056] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:30,056] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:30,074] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 6 (__consumer_offsets-41) (reason: Adding new member multicloud-windriver-id-bb38000e-9bd8-4a19-91d2-2215dbd6f8bd with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:30,356] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:30,356] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:30,367] INFO [GroupCoordinator 2]: Preparing to rebalance group policy-handler--POLICY-NOTIFICATION in state PreparingRebalance with old generation 2 (__consumer_offsets-23) (reason: Adding new member ph1-174f056a-f36c-409f-92b8-2f6a020f9951 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:31,366] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:31,367] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:31,415] INFO [GroupCoordinator 2]: Preparing to rebalance group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP in state PreparingRebalance with old generation 4 (__consumer_offsets-47) (reason: Adding new member dev-policy-drools-pdp-0-22764ca2-e0fb-42ed-90c0-89bd2920853e with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:32,079] INFO [GroupCoordinator 2]: Stabilized group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP generation 5 (__consumer_offsets-14) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:32,087] INFO [GroupCoordinator 2]: Assignment received from leader for group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP for generation 5 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:32,371] INFO [GroupCoordinator 2]: Stabilized group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP generation 5 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:32,371] INFO [GroupCoordinator 2]: Stabilized group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 7 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:32,377] INFO [GroupCoordinator 2]: Assignment received from leader for group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP for generation 5 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:32,386] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 7 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:33,074] INFO [GroupCoordinator 2]: Stabilized group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 7 (__consumer_offsets-41) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:33,079] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 7 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:33,368] INFO [GroupCoordinator 2]: Stabilized group policy-handler--POLICY-NOTIFICATION generation 3 (__consumer_offsets-23) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:33,375] INFO [GroupCoordinator 2]: Assignment received from leader for group policy-handler--POLICY-NOTIFICATION for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:33,597] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:33,597] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:33,605] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 4 (__consumer_offsets-47) (reason: Adding new member multicloud-k8s-id-6c678f4a-a338-46ac-9353-840df0260378 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:34,415] INFO [GroupCoordinator 2]: Stabilized group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP generation 5 (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:34,547] INFO [GroupCoordinator 2]: Assignment received from leader for group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP for generation 5 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:35,649] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:35,650] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:36,606] INFO [GroupCoordinator 2]: Stabilized group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 5 (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:50,013] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 5 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:50,195] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:50,196] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:50,199] INFO [GroupCoordinator 2]: Preparing to rebalance group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 4 (__consumer_offsets-44) (reason: Adding new member SO-COpenSource-Env11-9a561906-e2a3-4675-8618-9f93e17fd1f5 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:53,201] INFO [GroupCoordinator 2]: Stabilized group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO generation 5 (__consumer_offsets-44) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:53,601] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:53,601] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:53,601] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:53,601] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:53,744] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:53,744] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:53,788] INFO [GroupCoordinator 2]: Assignment received from leader for group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO for generation 5 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:33:55,554] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:55,554] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:55,555] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:55,555] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:56,836] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:56,836] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:56,866] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:33:56,866] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:34:00,642] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:34:00,642] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:34:00,751] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:34:00,751] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:34:00,884] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:34:00,884] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:34:06,283] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:34:06,283] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:34:06,498] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:34:06,498] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:36,051] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 23:36:41,705] INFO [GroupCoordinator 2]: Member multicloud-k8s-id-6c678f4a-a338-46ac-9353-840df0260378 in group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:41,706] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 5 (__consumer_offsets-47) (reason: removing member multicloud-k8s-id-6c678f4a-a338-46ac-9353-840df0260378 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:41,707] INFO [GroupCoordinator 2]: Group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 6 is now empty (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:41,776] INFO [GroupCoordinator 2]: Member SO-COpenSource-Env11-9a561906-e2a3-4675-8618-9f93e17fd1f5 in group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:41,776] INFO [GroupCoordinator 2]: Preparing to rebalance group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 5 (__consumer_offsets-44) (reason: removing member SO-COpenSource-Env11-9a561906-e2a3-4675-8618-9f93e17fd1f5 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:41,776] INFO [GroupCoordinator 2]: Group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO with generation 6 is now empty (__consumer_offsets-44) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:41,809] INFO [GroupCoordinator 2]: Member dev-policy-drools-pdp-0-22764ca2-e0fb-42ed-90c0-89bd2920853e in group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:41,809] INFO [GroupCoordinator 2]: Preparing to rebalance group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP in state PreparingRebalance with old generation 5 (__consumer_offsets-47) (reason: removing member dev-policy-drools-pdp-0-22764ca2-e0fb-42ed-90c0-89bd2920853e on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:41,809] INFO [GroupCoordinator 2]: Group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP with generation 6 is now empty (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:41,835] INFO [GroupCoordinator 2]: Member ph1-174f056a-f36c-409f-92b8-2f6a020f9951 in group policy-handler--POLICY-NOTIFICATION has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:41,835] INFO [GroupCoordinator 2]: Preparing to rebalance group policy-handler--POLICY-NOTIFICATION in state PreparingRebalance with old generation 3 (__consumer_offsets-23) (reason: removing member ph1-174f056a-f36c-409f-92b8-2f6a020f9951 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:41,835] INFO [GroupCoordinator 2]: Group policy-handler--POLICY-NOTIFICATION with generation 4 is now empty (__consumer_offsets-23) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:42,075] INFO [GroupCoordinator 2]: Member multicloud-windriver-id-bb38000e-9bd8-4a19-91d2-2215dbd6f8bd in group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:42,075] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 7 (__consumer_offsets-41) (reason: removing member multicloud-windriver-id-bb38000e-9bd8-4a19-91d2-2215dbd6f8bd on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:42,075] INFO [GroupCoordinator 2]: Group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 8 is now empty (__consumer_offsets-41) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:42,503] INFO [GroupCoordinator 2]: Member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-ce10dcdd-ebef-44a3-a48f-02340d3bdd94 in group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:42,503] INFO [GroupCoordinator 2]: Preparing to rebalance group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP in state PreparingRebalance with old generation 5 (__consumer_offsets-11) (reason: removing member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-ce10dcdd-ebef-44a3-a48f-02340d3bdd94 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:42,503] INFO [GroupCoordinator 2]: Group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP with generation 6 is now empty (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:42,514] INFO [GroupCoordinator 2]: Member dev-policy-apex-pdp-0-05f520de-d57e-4ee8-a314-ae4c057572e3 in group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:42,514] INFO [GroupCoordinator 2]: Preparing to rebalance group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP in state PreparingRebalance with old generation 5 (__consumer_offsets-14) (reason: removing member dev-policy-apex-pdp-0-05f520de-d57e-4ee8-a314-ae4c057572e3 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:42,515] INFO [GroupCoordinator 2]: Group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP with generation 6 is now empty (__consumer_offsets-14) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:43,482] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:43,482] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:43,528] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:43,528] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:43,545] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 8 (__consumer_offsets-41) (reason: Adding new member multicloud-windriver-id-2e9fd8bf-fab1-4bf7-a88f-4a1c8a06b196 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:45,407] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:45,407] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:46,546] INFO [GroupCoordinator 2]: Stabilized group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 9 (__consumer_offsets-41) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:46,556] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 9 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:46,631] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:46,631] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:46,634] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 6 (__consumer_offsets-47) (reason: Adding new member multicloud-k8s-id-741969c8-41af-4341-aea6-322597485e30 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:46,695] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:46,695] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:46,723] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:46,723] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:48,820] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:48,820] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:49,306] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:49,306] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:49,635] INFO [GroupCoordinator 2]: Stabilized group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 7 (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:49,676] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 7 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:49,878] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:49,878] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:49,952] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:49,952] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:50,152] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:50,152] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:50,156] INFO [GroupCoordinator 2]: Preparing to rebalance group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 4 (__consumer_offsets-32) (reason: Adding new member sdc-AUTO1-24e3f2a1-81b9-4a8a-abfa-82fc18b53cc1 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:53,157] INFO [GroupCoordinator 2]: Stabilized group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO generation 5 (__consumer_offsets-32) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:53,256] INFO [GroupCoordinator 2]: Assignment received from leader for group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO for generation 5 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:36:55,437] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:36:55,437] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:01,093] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:01,093] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:01,101] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:01,102] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:01,103] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:01,103] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:01,105] INFO [GroupCoordinator 2]: Preparing to rebalance group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP in state PreparingRebalance with old generation 6 (__consumer_offsets-14) (reason: Adding new member dev-policy-apex-pdp-0-6130579b-dc33-41c9-81c8-85bf6ea0b08e with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:37:01,683] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:01,683] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:01,687] INFO [GroupCoordinator 2]: Preparing to rebalance group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP in state PreparingRebalance with old generation 6 (__consumer_offsets-47) (reason: Adding new member dev-policy-drools-pdp-0-d229cb09-d2de-4a21-a11b-c6d0e596e6fe with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:37:01,688] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:01,689] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:01,691] INFO [GroupCoordinator 2]: Preparing to rebalance group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP in state PreparingRebalance with old generation 6 (__consumer_offsets-11) (reason: Adding new member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-a46a4e1c-9382-4a9e-b2db-8ff2d1bdb9b6 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:37:02,249] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:02,249] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:02,538] INFO [GroupCoordinator 2]: Preparing to rebalance group policy-handler--POLICY-NOTIFICATION in state PreparingRebalance with old generation 4 (__consumer_offsets-23) (reason: Adding new member ph1-35c0ba54-c50d-45fc-8cca-a16b7cacda38 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:37:03,525] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:03,525] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:03,537] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 7 (__consumer_offsets-11) (reason: Adding new member multicloud-starlingx-id-63ca2491-5dc5-49b3-889c-de7d40106061 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:37:04,107] INFO [GroupCoordinator 2]: Stabilized group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP generation 7 (__consumer_offsets-14) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:37:04,120] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:04,120] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:04,206] INFO [GroupCoordinator 2]: Assignment received from leader for group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP for generation 7 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:37:04,688] INFO [GroupCoordinator 2]: Stabilized group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP generation 7 (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:37:04,692] INFO [GroupCoordinator 2]: Stabilized group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP generation 7 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:37:04,871] INFO [GroupCoordinator 2]: Assignment received from leader for group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP for generation 7 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:37:04,871] INFO [GroupCoordinator 2]: Assignment received from leader for group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP for generation 7 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:37:05,538] INFO [GroupCoordinator 2]: Stabilized group policy-handler--POLICY-NOTIFICATION generation 5 (__consumer_offsets-23) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:37:05,555] INFO [GroupCoordinator 2]: Assignment received from leader for group policy-handler--POLICY-NOTIFICATION for generation 5 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:37:07,582] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:07,582] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:09,827] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:09,827] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:09,828] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:09,828] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:09,867] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:09,867] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:12,738] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:12,738] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:13,422] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:13,422] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:13,423] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:13,423] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:15,821] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:15,821] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:15,869] INFO [GroupCoordinator 2]: Preparing to rebalance group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 6 (__consumer_offsets-44) (reason: Adding new member SO-COpenSource-Env11-ee0cbc33-de8b-45cc-a184-8c5ac9e49961 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:37:18,870] INFO [GroupCoordinator 2]: Stabilized group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO generation 7 (__consumer_offsets-44) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:37:19,200] INFO [GroupCoordinator 2]: Assignment received from leader for group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO for generation 7 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:37:22,054] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:37:22,054] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:41:45,360] INFO [GroupCoordinator 2]: Member multicloud-starlingx-id-9e245830-3c40-44a7-b3e5-e307eafd4181 in group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:41:45,365] INFO [GroupCoordinator 2]: Stabilized group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 8 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:41:48,288] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 8 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:41:59,420] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:41:59,420] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:42:47,907] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:42:47,908] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:43:12,938] INFO [GroupCoordinator 2]: Member multicloud-k8s-id-741969c8-41af-4341-aea6-322597485e30 in group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:43:12,938] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 7 (__consumer_offsets-47) (reason: removing member multicloud-k8s-id-741969c8-41af-4341-aea6-322597485e30 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:43:12,939] INFO [GroupCoordinator 2]: Group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 8 is now empty (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:43:24,864] INFO [GroupCoordinator 2]: Member SO-COpenSource-Env11-ee0cbc33-de8b-45cc-a184-8c5ac9e49961 in group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:43:24,864] INFO [GroupCoordinator 2]: Preparing to rebalance group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 7 (__consumer_offsets-44) (reason: removing member SO-COpenSource-Env11-ee0cbc33-de8b-45cc-a184-8c5ac9e49961 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:43:24,864] INFO [GroupCoordinator 2]: Group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO with generation 8 is now empty (__consumer_offsets-44) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:43:25,268] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:43:25,268] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:43:49,046] INFO [GroupCoordinator 2]: Member dev-policy-drools-pdp-0-d229cb09-d2de-4a21-a11b-c6d0e596e6fe in group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:43:49,046] INFO [GroupCoordinator 2]: Preparing to rebalance group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP in state PreparingRebalance with old generation 7 (__consumer_offsets-47) (reason: removing member dev-policy-drools-pdp-0-d229cb09-d2de-4a21-a11b-c6d0e596e6fe on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:43:49,046] INFO [GroupCoordinator 2]: Group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP with generation 8 is now empty (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:43:55,385] INFO [GroupCoordinator 2]: Member ph1-35c0ba54-c50d-45fc-8cca-a16b7cacda38 in group policy-handler--POLICY-NOTIFICATION has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:43:55,386] INFO [GroupCoordinator 2]: Preparing to rebalance group policy-handler--POLICY-NOTIFICATION in state PreparingRebalance with old generation 5 (__consumer_offsets-23) (reason: removing member ph1-35c0ba54-c50d-45fc-8cca-a16b7cacda38 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:43:55,386] INFO [GroupCoordinator 2]: Group policy-handler--POLICY-NOTIFICATION with generation 6 is now empty (__consumer_offsets-23) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:44:12,554] INFO [GroupCoordinator 2]: Member multicloud-starlingx-id-63ca2491-5dc5-49b3-889c-de7d40106061 in group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:44:12,556] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 8 (__consumer_offsets-11) (reason: removing member multicloud-starlingx-id-63ca2491-5dc5-49b3-889c-de7d40106061 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:44:12,556] INFO [GroupCoordinator 2]: Group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 9 is now empty (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:44:15,597] INFO [GroupCoordinator 2]: Member sdc-AUTO1-24e3f2a1-81b9-4a8a-abfa-82fc18b53cc1 in group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:44:15,597] INFO [GroupCoordinator 2]: Preparing to rebalance group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 5 (__consumer_offsets-32) (reason: removing member sdc-AUTO1-24e3f2a1-81b9-4a8a-abfa-82fc18b53cc1 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:44:15,598] INFO [GroupCoordinator 2]: Group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO with generation 6 is now empty (__consumer_offsets-32) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:44:19,110] INFO [GroupCoordinator 2]: Member multicloud-windriver-id-2e9fd8bf-fab1-4bf7-a88f-4a1c8a06b196 in group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:44:19,110] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 9 (__consumer_offsets-41) (reason: removing member multicloud-windriver-id-2e9fd8bf-fab1-4bf7-a88f-4a1c8a06b196 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:44:19,110] INFO [GroupCoordinator 2]: Group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 10 is now empty (__consumer_offsets-41) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:44:26,215] INFO [GroupCoordinator 2]: Member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-a46a4e1c-9382-4a9e-b2db-8ff2d1bdb9b6 in group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:44:26,215] INFO [GroupCoordinator 2]: Preparing to rebalance group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP in state PreparingRebalance with old generation 7 (__consumer_offsets-11) (reason: removing member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-a46a4e1c-9382-4a9e-b2db-8ff2d1bdb9b6 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:44:26,216] INFO [GroupCoordinator 2]: Group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP with generation 8 is now empty (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:44:32,174] INFO [GroupCoordinator 2]: Member dev-policy-apex-pdp-0-6130579b-dc33-41c9-81c8-85bf6ea0b08e in group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:44:32,174] INFO [GroupCoordinator 2]: Preparing to rebalance group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP in state PreparingRebalance with old generation 7 (__consumer_offsets-14) (reason: removing member dev-policy-apex-pdp-0-6130579b-dc33-41c9-81c8-85bf6ea0b08e on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:44:32,175] INFO [GroupCoordinator 2]: Group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP with generation 8 is now empty (__consumer_offsets-14) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:46:36,050] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-03 23:47:09,246] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:09,246] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:09,309] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:09,309] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:10,918] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:10,918] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:10,920] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:10,920] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:10,920] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:10,920] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:11,689] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:11,689] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:11,689] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:11,689] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:16,064] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:16,064] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:16,488] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:16,488] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:16,564] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:16,564] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:16,761] INFO [GroupCoordinator 2]: Preparing to rebalance group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP in state PreparingRebalance with old generation 8 (__consumer_offsets-14) (reason: Adding new member dev-policy-apex-pdp-0-9afa4dbb-0df8-451a-815e-d04c226f4a3a with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:16,978] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 8 (__consumer_offsets-47) (reason: Adding new member multicloud-k8s-id-9120d3e1-d3da-439f-8172-9439345ef130 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:17,384] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:17,384] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:18,207] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:18,207] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:18,656] INFO [GroupCoordinator 2]: Preparing to rebalance group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP in state PreparingRebalance with old generation 8 (__consumer_offsets-11) (reason: Adding new member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-e42d1b73-649e-4ea6-92f7-f2b59fb803de with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:19,763] INFO [GroupCoordinator 2]: Stabilized group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP generation 9 (__consumer_offsets-14) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:19,922] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:19,922] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:19,968] INFO [GroupCoordinator 2]: Assignment received from leader for group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP for generation 9 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:19,978] INFO [GroupCoordinator 2]: Stabilized group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 9 (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:19,980] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 9 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:20,025] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 10 (__consumer_offsets-41) (reason: Adding new member multicloud-windriver-id-a5acbaef-3085-447f-887d-c453035b86d9 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:21,658] INFO [GroupCoordinator 2]: Stabilized group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP generation 9 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:23,030] INFO [GroupCoordinator 2]: Stabilized group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 11 (__consumer_offsets-41) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:23,165] INFO [GroupCoordinator 2]: Assignment received from leader for group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP for generation 9 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:23,177] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 11 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:31,279] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:31,279] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:32,173] INFO [GroupCoordinator 2]: Preparing to rebalance group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP in state PreparingRebalance with old generation 8 (__consumer_offsets-47) (reason: Adding new member dev-policy-drools-pdp-0-23d0501c-435f-4b5b-9d8d-6a94ac233828 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:34,323] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:34,323] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:35,174] INFO [GroupCoordinator 2]: Stabilized group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP generation 9 (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:48,044] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:48,044] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:52,137] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:52,137] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:52,317] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 9 (__consumer_offsets-11) (reason: Adding new member multicloud-starlingx-id-71c70afd-b7d5-4a8f-a876-4d69b898d44c with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:52,431] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:52,431] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:53,387] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:53,387] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:53,592] INFO [GroupCoordinator 2]: Preparing to rebalance group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 6 (__consumer_offsets-32) (reason: Adding new member sdc-AUTO1-82c90cba-367b-4a10-8511-c20701595dc4 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:54,790] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:54,790] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:54,797] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:54,797] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:55,318] INFO [GroupCoordinator 2]: Stabilized group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 10 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:56,593] INFO [GroupCoordinator 2]: Stabilized group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO generation 7 (__consumer_offsets-32) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:56,857] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 10 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:47:56,862] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:56,862] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:57,010] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:57,010] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:58,342] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:47:58,342] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:48:01,636] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:48:01,636] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:48:01,650] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:48:01,650] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:48:02,519] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:48:02,520] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:48:13,362] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:48:13,362] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:48:14,081] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:48:14,081] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:49:54,160] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:49:54,160] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:49:54,160] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:49:54,160] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:49:57,065] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:49:57,066] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-03 23:49:58,209] INFO [GroupCoordinator 2]: Assignment received from leader for group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO for generation 7 (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:50:35,326] INFO [GroupCoordinator 2]: Member multicloud-k8s-id-9120d3e1-d3da-439f-8172-9439345ef130 in group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:50:35,327] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 9 (__consumer_offsets-47) (reason: removing member multicloud-k8s-id-9120d3e1-d3da-439f-8172-9439345ef130 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:50:35,327] INFO [GroupCoordinator 2]: Group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 10 is now empty (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:51:35,181] INFO [GroupCoordinator 2]: Member dev-policy-drools-pdp-0-23d0501c-435f-4b5b-9d8d-6a94ac233828 in group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:51:35,183] INFO [GroupCoordinator 2]: Preparing to rebalance group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP in state PreparingRebalance with old generation 9 (__consumer_offsets-47) (reason: removing member dev-policy-drools-pdp-0-23d0501c-435f-4b5b-9d8d-6a94ac233828 on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:51:35,184] INFO [GroupCoordinator 2]: Group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP with generation 10 is now empty (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:53:54,305] INFO [GroupCoordinator 2]: Member dev-policy-apex-pdp-0-9afa4dbb-0df8-451a-815e-d04c226f4a3a in group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:53:54,306] INFO [GroupCoordinator 2]: Preparing to rebalance group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP in state PreparingRebalance with old generation 9 (__consumer_offsets-14) (reason: removing member dev-policy-apex-pdp-0-9afa4dbb-0df8-451a-815e-d04c226f4a3a on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:53:54,307] INFO [GroupCoordinator 2]: Group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP with generation 10 is now empty (__consumer_offsets-14) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:53:54,312] INFO [GroupCoordinator 2]: Member multicloud-windriver-id-a5acbaef-3085-447f-887d-c453035b86d9 in group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:53:54,312] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 11 (__consumer_offsets-41) (reason: removing member multicloud-windriver-id-a5acbaef-3085-447f-887d-c453035b86d9 on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:53:54,312] INFO [GroupCoordinator 2]: Group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 12 is now empty (__consumer_offsets-41) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:53:54,313] INFO [GroupCoordinator 2]: Member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-e42d1b73-649e-4ea6-92f7-f2b59fb803de in group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:53:54,313] INFO [GroupCoordinator 2]: Preparing to rebalance group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP in state PreparingRebalance with old generation 9 (__consumer_offsets-11) (reason: removing member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-e42d1b73-649e-4ea6-92f7-f2b59fb803de on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:53:54,313] INFO [GroupCoordinator 2]: Group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP with generation 10 is now empty (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:53:58,222] INFO [GroupCoordinator 2]: Member sdc-AUTO1-82c90cba-367b-4a10-8511-c20701595dc4 in group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:53:58,222] INFO [GroupCoordinator 2]: Preparing to rebalance group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 7 (__consumer_offsets-32) (reason: removing member sdc-AUTO1-82c90cba-367b-4a10-8511-c20701595dc4 on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:53:58,222] INFO [GroupCoordinator 2]: Group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO with generation 8 is now empty (__consumer_offsets-32) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:54:34,986] INFO [GroupCoordinator 2]: Member multicloud-starlingx-id-71c70afd-b7d5-4a8f-a876-4d69b898d44c in group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:54:34,987] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 10 (__consumer_offsets-11) (reason: removing member multicloud-starlingx-id-71c70afd-b7d5-4a8f-a876-4d69b898d44c on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:54:34,987] INFO [GroupCoordinator 2]: Group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 11 is now empty (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-03 23:56:36,051] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-04 00:06:36,051] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-04 00:16:36,051] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-04 00:19:18,037] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,037] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,177] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,177] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,338] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,338] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,340] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,340] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,353] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 11 (__consumer_offsets-11) (reason: Adding new member multicloud-starlingx-id-2ae3bc16-5214-4a4d-8e6c-0601b4e723c9 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:18,358] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,358] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,402] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,403] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,410] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,410] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,413] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,413] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,414] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,414] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,486] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,486] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,490] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 12 (__consumer_offsets-41) (reason: Adding new member multicloud-windriver-id-b5ea0ab7-33b6-4336-84c5-f6874f5eccb5 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:18,538] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,538] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,602] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:18,602] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:21,356] INFO [GroupCoordinator 2]: Stabilized group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 12 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:21,357] INFO [GroupCoordinator 2]: Member multicloud-starlingx-id-2ae3bc16-5214-4a4d-8e6c-0601b4e723c9 in group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:21,357] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 12 (__consumer_offsets-11) (reason: removing member multicloud-starlingx-id-2ae3bc16-5214-4a4d-8e6c-0601b4e723c9 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:21,357] INFO [GroupCoordinator 2]: Group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 13 is now empty (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:21,487] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:21,487] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:21,492] INFO [GroupCoordinator 2]: Stabilized group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 13 (__consumer_offsets-41) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:21,494] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 13 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:21,507] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:21,507] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:21,576] INFO [GroupCoordinator 2]: Member multicloud-windriver-id-b5ea0ab7-33b6-4336-84c5-f6874f5eccb5 in group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:21,576] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 13 (__consumer_offsets-41) (reason: removing member multicloud-windriver-id-b5ea0ab7-33b6-4336-84c5-f6874f5eccb5 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:21,576] INFO [GroupCoordinator 2]: Group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 14 is now empty (__consumer_offsets-41) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:21,613] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:21,613] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:22,230] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:22,230] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:22,232] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:22,232] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:22,303] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:22,303] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:22,307] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 13 (__consumer_offsets-11) (reason: Adding new member multicloud-starlingx-id-02d75252-2c12-4de4-b299-6005cd458c75 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:22,315] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:22,315] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:22,318] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 10 (__consumer_offsets-47) (reason: Adding new member multicloud-k8s-id-a2bb4258-6653-42b4-9b05-3a2dc782acf3 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:22,335] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:22,335] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:22,345] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:22,345] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:22,358] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 14 (__consumer_offsets-41) (reason: Adding new member multicloud-windriver-id-ac61cea0-3b69-4509-a89b-7222111be402 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:25,312] INFO [GroupCoordinator 2]: Stabilized group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 14 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:25,314] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 14 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:25,320] INFO [GroupCoordinator 2]: Stabilized group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 11 (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:25,326] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 11 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:25,359] INFO [GroupCoordinator 2]: Stabilized group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 15 (__consumer_offsets-41) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:25,361] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 15 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:25,429] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:25,429] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:25,455] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:25,455] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:25,474] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:25,474] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:25,477] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:25,478] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:25,502] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:25,502] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:26,483] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:26,483] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:29,535] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:29,536] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:29,541] INFO [GroupCoordinator 2]: Preparing to rebalance group policy-handler--POLICY-NOTIFICATION in state PreparingRebalance with old generation 6 (__consumer_offsets-23) (reason: Adding new member ph1-e2850052-ee4a-4552-87db-b65c7674ae6f with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:30,306] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:30,306] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:30,309] INFO [GroupCoordinator 2]: Preparing to rebalance group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP in state PreparingRebalance with old generation 10 (__consumer_offsets-47) (reason: Adding new member dev-policy-drools-pdp-0-dafcc599-6634-46fe-89f8-0de19cc1ded1 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:31,285] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:31,285] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:31,289] INFO [GroupCoordinator 2]: Preparing to rebalance group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP in state PreparingRebalance with old generation 10 (__consumer_offsets-14) (reason: Adding new member dev-policy-apex-pdp-0-696de786-ea7e-4862-aace-24f1399fcedb with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:32,067] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:32,067] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:32,419] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:32,419] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:32,423] INFO [GroupCoordinator 2]: Preparing to rebalance group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP in state PreparingRebalance with old generation 10 (__consumer_offsets-11) (reason: Adding new member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-4e85278c-2eda-490c-9c10-88558db304c0 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:32,543] INFO [GroupCoordinator 2]: Stabilized group policy-handler--POLICY-NOTIFICATION generation 7 (__consumer_offsets-23) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:32,544] INFO [GroupCoordinator 2]: Assignment received from leader for group policy-handler--POLICY-NOTIFICATION for generation 7 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:32,686] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:32,686] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:33,315] INFO [GroupCoordinator 2]: Stabilized group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP generation 11 (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:33,317] INFO [GroupCoordinator 2]: Assignment received from leader for group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP for generation 11 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:33,450] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:33,450] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:34,289] INFO [GroupCoordinator 2]: Stabilized group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP generation 11 (__consumer_offsets-14) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:34,291] INFO [GroupCoordinator 2]: Assignment received from leader for group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP for generation 11 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:34,411] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:34,412] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:35,158] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:35,158] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:35,424] INFO [GroupCoordinator 2]: Stabilized group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP generation 11 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:35,438] INFO [GroupCoordinator 2]: Assignment received from leader for group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP for generation 11 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:35,565] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:35,565] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:36,278] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:36,278] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:45,110] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:45,110] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:46,558] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:46,558] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:46,565] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:46,565] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:46,568] INFO [GroupCoordinator 2]: Preparing to rebalance group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 8 (__consumer_offsets-44) (reason: Adding new member SO-COpenSource-Env11-914b5c03-9ef2-4f30-86c8-34c4571af9fd with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:48,607] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:48,607] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:48,618] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:48,618] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:19:48,642] INFO [GroupCoordinator 2]: Preparing to rebalance group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 8 (__consumer_offsets-32) (reason: Adding new member sdc-AUTO1-10f4b05a-da01-43bd-a32a-be627678bf51 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:49,574] INFO [GroupCoordinator 2]: Stabilized group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO generation 9 (__consumer_offsets-44) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:49,575] INFO [GroupCoordinator 2]: Assignment received from leader for group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO for generation 9 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:51,642] INFO [GroupCoordinator 2]: Stabilized group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO generation 9 (__consumer_offsets-32) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:19:51,644] INFO [GroupCoordinator 2]: Assignment received from leader for group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO for generation 9 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:20:56,519] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:20:56,520] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:20,100] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:20,100] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:21,306] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:21,306] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:21,317] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 14 (__consumer_offsets-11) (reason: Adding new member multicloud-starlingx-id-5ded1536-c6a8-420b-89fe-f7b240a7b6d1 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:21:21,323] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:21,323] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:21,327] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 15 (__consumer_offsets-41) (reason: Adding new member multicloud-windriver-id-0cebe275-7e32-4b7e-b8dd-13c276d4b1bf with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:21:21,801] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:21,801] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:21,906] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:21,906] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:21,918] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 11 (__consumer_offsets-47) (reason: Adding new member multicloud-k8s-id-52d30d41-2b3b-42e4-9ae6-dfeb8f54465b with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:21:24,788] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:24,788] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:28,243] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:28,243] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:28,314] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:28,314] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:29,926] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:29,927] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:30,886] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:30,886] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:30,979] INFO [GroupCoordinator 2]: Preparing to rebalance group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP in state PreparingRebalance with old generation 11 (__consumer_offsets-11) (reason: Adding new member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-888bd088-8f3a-4369-acd2-d4d6259a4ae6 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:21:30,990] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:30,991] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:31,011] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:31,011] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:31,015] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:31,015] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:31,016] INFO [GroupCoordinator 2]: Preparing to rebalance group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP in state PreparingRebalance with old generation 11 (__consumer_offsets-47) (reason: Adding new member dev-policy-drools-pdp-0-2fa5208d-bde0-40e1-9ff3-28b98d465803 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:21:31,019] INFO [GroupCoordinator 2]: Preparing to rebalance group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP in state PreparingRebalance with old generation 11 (__consumer_offsets-14) (reason: Adding new member dev-policy-apex-pdp-0-fcbcc83b-d12f-4dc1-9f48-0138153aa108 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:21:31,022] INFO [GroupCoordinator 2]: Preparing to rebalance group policy-handler--POLICY-NOTIFICATION in state PreparingRebalance with old generation 7 (__consumer_offsets-23) (reason: Adding new member ph1-3b1da158-b0fa-4011-98e2-08f819b05389 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:21:31,610] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:31,610] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:31,700] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:31,700] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:34,230] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:34,230] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:51,070] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:51,070] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:21:51,082] INFO [GroupCoordinator 2]: Preparing to rebalance group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 9 (__consumer_offsets-32) (reason: Adding new member sdc-AUTO1-d8336e90-33eb-4fab-9e35-5ed93b6eb985 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:48,305] INFO [GroupCoordinator 2]: Member ph1-e2850052-ee4a-4552-87db-b65c7674ae6f in group policy-handler--POLICY-NOTIFICATION has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:48,311] INFO [GroupCoordinator 2]: Stabilized group policy-handler--POLICY-NOTIFICATION generation 8 (__consumer_offsets-23) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:51,106] INFO [GroupCoordinator 2]: Member multicloud-starlingx-id-02d75252-2c12-4de4-b299-6005cd458c75 in group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:51,107] INFO [GroupCoordinator 2]: Stabilized group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 15 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:51,651] INFO [GroupCoordinator 2]: Member sdc-AUTO1-10f4b05a-da01-43bd-a32a-be627678bf51 in group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:51,652] INFO [GroupCoordinator 2]: Stabilized group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO generation 10 (__consumer_offsets-32) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:53,904] INFO [GroupCoordinator 2]: Member dev-policy-apex-pdp-0-696de786-ea7e-4862-aace-24f1399fcedb in group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:53,906] INFO [GroupCoordinator 2]: Stabilized group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP generation 12 (__consumer_offsets-14) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:53,906] INFO [GroupCoordinator 2]: Member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-4e85278c-2eda-490c-9c10-88558db304c0 in group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:53,906] INFO [GroupCoordinator 2]: Stabilized group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP generation 12 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:53,907] INFO [GroupCoordinator 2]: Member dev-policy-drools-pdp-0-dafcc599-6634-46fe-89f8-0de19cc1ded1 in group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:53,908] INFO [GroupCoordinator 2]: Stabilized group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP generation 12 (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:55,238] INFO [GroupCoordinator 2]: Member multicloud-windriver-id-ac61cea0-3b69-4509-a89b-7222111be402 in group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:55,239] INFO [GroupCoordinator 2]: Stabilized group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 16 (__consumer_offsets-41) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:55,243] INFO [GroupCoordinator 2]: Member multicloud-k8s-id-a2bb4258-6653-42b4-9b05-3a2dc782acf3 in group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:55,244] INFO [GroupCoordinator 2]: Stabilized group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 12 (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:56,218] INFO [GroupCoordinator 2]: Member SO-COpenSource-Env11-914b5c03-9ef2-4f30-86c8-34c4571af9fd in group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:56,218] INFO [GroupCoordinator 2]: Preparing to rebalance group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 9 (__consumer_offsets-44) (reason: removing member SO-COpenSource-Env11-914b5c03-9ef2-4f30-86c8-34c4571af9fd on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:23:56,218] INFO [GroupCoordinator 2]: Group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO with generation 10 is now empty (__consumer_offsets-44) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:51,496] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 16 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:51,546] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,546] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,719] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,719] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,745] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,745] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,760] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 15 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:51,762] INFO [GroupCoordinator 2]: Assignment received from leader for group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 12 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:51,771] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,771] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,790] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,791] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,797] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,797] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,803] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,803] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,814] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,815] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,815] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,815] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,832] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,832] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,875] INFO [GroupCoordinator 2]: Preparing to rebalance group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 10 (__consumer_offsets-44) (reason: Adding new member SO-COpenSource-Env11-21d6f196-6efe-47d6-8db0-4ef5b5975e6a with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:51,992] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:51,992] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:52,006] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:52,006] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:52,342] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:52,343] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:52,344] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:52,344] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:52,355] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:52,355] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:24:52,367] INFO [GroupCoordinator 2]: Assignment received from leader for group c607efa4-70be-48c2-8536-e983ebf4291f--POLICY-PDP-PAP for generation 12 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:52,692] INFO [GroupCoordinator 2]: Member sdc-AUTO1-d8336e90-33eb-4fab-9e35-5ed93b6eb985 in group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:52,692] INFO [GroupCoordinator 2]: Preparing to rebalance group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 10 (__consumer_offsets-32) (reason: removing member sdc-AUTO1-d8336e90-33eb-4fab-9e35-5ed93b6eb985 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:52,692] INFO [GroupCoordinator 2]: Group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO with generation 11 is now empty (__consumer_offsets-32) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:53,297] INFO [GroupCoordinator 2]: Member dev-policy-drools-pdp-0-2fa5208d-bde0-40e1-9ff3-28b98d465803 in group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:53,297] INFO [GroupCoordinator 2]: Preparing to rebalance group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP in state PreparingRebalance with old generation 12 (__consumer_offsets-47) (reason: removing member dev-policy-drools-pdp-0-2fa5208d-bde0-40e1-9ff3-28b98d465803 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:53,297] INFO [GroupCoordinator 2]: Group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP with generation 13 is now empty (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:53,850] INFO [GroupCoordinator 2]: Member ph1-3b1da158-b0fa-4011-98e2-08f819b05389 in group policy-handler--POLICY-NOTIFICATION has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:53,850] INFO [GroupCoordinator 2]: Preparing to rebalance group policy-handler--POLICY-NOTIFICATION in state PreparingRebalance with old generation 8 (__consumer_offsets-23) (reason: removing member ph1-3b1da158-b0fa-4011-98e2-08f819b05389 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:53,850] INFO [GroupCoordinator 2]: Group policy-handler--POLICY-NOTIFICATION with generation 9 is now empty (__consumer_offsets-23) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:54,875] INFO [GroupCoordinator 2]: Stabilized group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO generation 11 (__consumer_offsets-44) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:54,878] INFO [GroupCoordinator 2]: Assignment received from leader for group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO for generation 11 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:55,033] INFO [GroupCoordinator 2]: Member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-888bd088-8f3a-4369-acd2-d4d6259a4ae6 in group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:55,033] INFO [GroupCoordinator 2]: Preparing to rebalance group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP in state PreparingRebalance with old generation 12 (__consumer_offsets-11) (reason: removing member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-888bd088-8f3a-4369-acd2-d4d6259a4ae6 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:24:55,034] INFO [GroupCoordinator 2]: Group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP with generation 13 is now empty (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:25:01,032] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:01,033] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:06,182] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:06,183] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:06,199] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:06,199] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:06,199] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:06,199] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:06,203] INFO [GroupCoordinator 2]: Preparing to rebalance group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP in state PreparingRebalance with old generation 13 (__consumer_offsets-11) (reason: Adding new member dev-policy-xacml-pdp-6d49cb47c8-qzhgf-13df3530-7117-47cd-8614-dcc02486ce20 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:25:06,203] INFO [GroupCoordinator 2]: Preparing to rebalance group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP in state PreparingRebalance with old generation 13 (__consumer_offsets-47) (reason: Adding new member dev-policy-drools-pdp-0-ccc50169-a027-4427-b493-b485ca09e32f with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:25:07,541] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:07,541] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:07,548] INFO [GroupCoordinator 2]: Preparing to rebalance group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 11 (__consumer_offsets-32) (reason: Adding new member sdc-AUTO1-0e0a83be-f1e4-4208-9e84-e6a76c9c75eb with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:25:09,204] INFO [GroupCoordinator 2]: Stabilized group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP generation 14 (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:25:09,207] INFO [GroupCoordinator 2]: Stabilized group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP generation 14 (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:25:09,207] INFO [GroupCoordinator 2]: Assignment received from leader for group 57e761cc-71e8-41e2-a2c8-31297dc42876--POLICY-PDP-PAP for generation 14 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:25:09,210] INFO [GroupCoordinator 2]: Assignment received from leader for group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP for generation 14 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:25:09,402] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:09,402] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:09,403] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:09,403] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:10,149] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:10,149] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:10,178] INFO [GroupCoordinator 2]: Preparing to rebalance group policy-handler--POLICY-NOTIFICATION in state PreparingRebalance with old generation 9 (__consumer_offsets-23) (reason: Adding new member ph1-bd7be565-07a3-475c-b95c-e6df83abbec3 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:25:10,189] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:10,189] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:10,550] INFO [GroupCoordinator 2]: Stabilized group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO generation 12 (__consumer_offsets-32) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:25:10,552] INFO [GroupCoordinator 2]: Assignment received from leader for group sdc-AUTO--SDC-DISTR-STATUS-TOPIC-AUTO for generation 12 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:25:13,180] INFO [GroupCoordinator 2]: Stabilized group policy-handler--POLICY-NOTIFICATION generation 10 (__consumer_offsets-23) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:25:13,182] INFO [GroupCoordinator 2]: Assignment received from leader for group policy-handler--POLICY-NOTIFICATION for generation 10 (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:25:13,510] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:25:13,511] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:26:36,050] INFO [GroupMetadataManager brokerId=2] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-04 00:36:12,391] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:36:12,392] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-04 00:36:20,188] INFO [GroupCoordinator 2]: Member multicloud-k8s-id-52d30d41-2b3b-42e4-9ae6-dfeb8f54465b in group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:36:20,189] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 12 (__consumer_offsets-47) (reason: removing member multicloud-k8s-id-52d30d41-2b3b-42e4-9ae6-dfeb8f54465b on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:36:20,189] INFO [GroupCoordinator 2]: Group multicloud-k8s-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 13 is now empty (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:36:20,483] INFO [GroupCoordinator 2]: Member SO-COpenSource-Env11-21d6f196-6efe-47d6-8db0-4ef5b5975e6a in group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:36:20,484] INFO [GroupCoordinator 2]: Preparing to rebalance group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO in state PreparingRebalance with old generation 11 (__consumer_offsets-44) (reason: removing member SO-COpenSource-Env11-21d6f196-6efe-47d6-8db0-4ef5b5975e6a on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:36:20,484] INFO [GroupCoordinator 2]: Group SO-OpenSource-Env11--SDC-DISTR-STATUS-TOPIC-AUTO with generation 12 is now empty (__consumer_offsets-44) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:36:20,517] INFO [GroupCoordinator 2]: Member dev-policy-drools-pdp-0-ccc50169-a027-4427-b493-b485ca09e32f in group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:36:20,517] INFO [GroupCoordinator 2]: Preparing to rebalance group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP in state PreparingRebalance with old generation 14 (__consumer_offsets-47) (reason: removing member dev-policy-drools-pdp-0-ccc50169-a027-4427-b493-b485ca09e32f on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:36:20,517] INFO [GroupCoordinator 2]: Group 71ec5384-a14b-4f3f-9123-95f1f79182b0--POLICY-PDP-PAP with generation 15 is now empty (__consumer_offsets-47) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:36:21,710] INFO [GroupCoordinator 2]: Member ph1-bd7be565-07a3-475c-b95c-e6df83abbec3 in group policy-handler--POLICY-NOTIFICATION has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:36:21,710] INFO [GroupCoordinator 2]: Preparing to rebalance group policy-handler--POLICY-NOTIFICATION in state PreparingRebalance with old generation 10 (__consumer_offsets-23) (reason: removing member ph1-bd7be565-07a3-475c-b95c-e6df83abbec3 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:36:21,710] INFO [GroupCoordinator 2]: Group policy-handler--POLICY-NOTIFICATION with generation 11 is now empty (__consumer_offsets-23) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:36:22,979] INFO [GroupCoordinator 2]: Member multicloud-starlingx-id-5ded1536-c6a8-420b-89fe-f7b240a7b6d1 in group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:36:22,979] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 15 (__consumer_offsets-11) (reason: removing member multicloud-starlingx-id-5ded1536-c6a8-420b-89fe-f7b240a7b6d1 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:36:22,979] INFO [GroupCoordinator 2]: Group multicloud-starlingx-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 16 is now empty (__consumer_offsets-11) (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:36:23,249] INFO [GroupCoordinator 2]: Member multicloud-windriver-id-0cebe275-7e32-4b7e-b8dd-13c276d4b1bf in group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-04 00:36:23,249] INFO [GroupCoordinator 2]: Preparing to rebalance group multicloud-windriver-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 16 (__consumer_offsets-41) (reason: removing member multicloud-windriver-id-0cebe275-7e32-4b7e-b8dd-13c276d4b1bf on LeaveGroup