Results

By type

          + export KAFKA_BROKER_ID=0
+ cp /opt/app/osaaf/local/cadi.properties /etc/kafka/data/cadi.properties
+ export KAFKA_ADVERTISED_LISTENERS=EXTERNAL_SASL_PLAINTEXT://172.16.10.103:30490,INTERNAL_SASL_PLAINTEXT://:9092
+ exec /etc/confluent/docker/run
===> ENV Variables ...
A1POLICYMANAGEMENT_EXTERNAL_PORT=tcp://10.96.165.175:8433
A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP=tcp://10.96.165.175:8433
A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_ADDR=10.96.165.175
A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_PORT=8433
A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_PROTO=tcp
A1POLICYMANAGEMENT_EXTERNAL_SERVICE_HOST=10.96.165.175
A1POLICYMANAGEMENT_EXTERNAL_SERVICE_PORT=8433
A1POLICYMANAGEMENT_EXTERNAL_SERVICE_PORT_HTTPS_API=8433
A1POLICYMANAGEMENT_PORT=tcp://10.96.78.228:8433
A1POLICYMANAGEMENT_PORT_8081_TCP=tcp://10.96.78.228:8081
A1POLICYMANAGEMENT_PORT_8081_TCP_ADDR=10.96.78.228
A1POLICYMANAGEMENT_PORT_8081_TCP_PORT=8081
A1POLICYMANAGEMENT_PORT_8081_TCP_PROTO=tcp
A1POLICYMANAGEMENT_PORT_8433_TCP=tcp://10.96.78.228:8433
A1POLICYMANAGEMENT_PORT_8433_TCP_ADDR=10.96.78.228
A1POLICYMANAGEMENT_PORT_8433_TCP_PORT=8433
A1POLICYMANAGEMENT_PORT_8433_TCP_PROTO=tcp
A1POLICYMANAGEMENT_SERVICE_HOST=10.96.78.228
A1POLICYMANAGEMENT_SERVICE_PORT=8433
A1POLICYMANAGEMENT_SERVICE_PORT_HTTPS_API=8433
A1POLICYMANAGEMENT_SERVICE_PORT_HTTP_API=8081
AAF_CASS_PORT=tcp://10.96.158.41:7000
AAF_CASS_PORT_7000_TCP=tcp://10.96.158.41:7000
AAF_CASS_PORT_7000_TCP_ADDR=10.96.158.41
AAF_CASS_PORT_7000_TCP_PORT=7000
AAF_CASS_PORT_7000_TCP_PROTO=tcp
AAF_CASS_PORT_7001_TCP=tcp://10.96.158.41:7001
AAF_CASS_PORT_7001_TCP_ADDR=10.96.158.41
AAF_CASS_PORT_7001_TCP_PORT=7001
AAF_CASS_PORT_7001_TCP_PROTO=tcp
AAF_CASS_PORT_9042_TCP=tcp://10.96.158.41:9042
AAF_CASS_PORT_9042_TCP_ADDR=10.96.158.41
AAF_CASS_PORT_9042_TCP_PORT=9042
AAF_CASS_PORT_9042_TCP_PROTO=tcp
AAF_CASS_PORT_9160_TCP=tcp://10.96.158.41:9160
AAF_CASS_PORT_9160_TCP_ADDR=10.96.158.41
AAF_CASS_PORT_9160_TCP_PORT=9160
AAF_CASS_PORT_9160_TCP_PROTO=tcp
AAF_CASS_SERVICE_HOST=10.96.158.41
AAF_CASS_SERVICE_PORT=7000
AAF_CASS_SERVICE_PORT_TCP_CQL=9042
AAF_CASS_SERVICE_PORT_TCP_INTRA=7000
AAF_CASS_SERVICE_PORT_TCP_THRIFT=9160
AAF_CASS_SERVICE_PORT_TLS=7001
AAF_CM_PORT=tcp://10.96.219.13:8150
AAF_CM_PORT_8150_TCP=tcp://10.96.219.13:8150
AAF_CM_PORT_8150_TCP_ADDR=10.96.219.13
AAF_CM_PORT_8150_TCP_PORT=8150
AAF_CM_PORT_8150_TCP_PROTO=tcp
AAF_CM_SERVICE_HOST=10.96.219.13
AAF_CM_SERVICE_PORT=8150
AAF_CM_SERVICE_PORT_API=8150
AAF_FS_PORT=tcp://10.96.117.89:8096
AAF_FS_PORT_8096_TCP=tcp://10.96.117.89:8096
AAF_FS_PORT_8096_TCP_ADDR=10.96.117.89
AAF_FS_PORT_8096_TCP_PORT=8096
AAF_FS_PORT_8096_TCP_PROTO=tcp
AAF_FS_SERVICE_HOST=10.96.117.89
AAF_FS_SERVICE_PORT=8096
AAF_FS_SERVICE_PORT_API=8096
AAF_GUI_PORT=tcp://10.96.215.154:8200
AAF_GUI_PORT_8200_TCP=tcp://10.96.215.154:8200
AAF_GUI_PORT_8200_TCP_ADDR=10.96.215.154
AAF_GUI_PORT_8200_TCP_PORT=8200
AAF_GUI_PORT_8200_TCP_PROTO=tcp
AAF_GUI_SERVICE_HOST=10.96.215.154
AAF_GUI_SERVICE_PORT=8200
AAF_GUI_SERVICE_PORT_GUI=8200
AAF_HELLO_PORT=tcp://10.96.147.90:8130
AAF_HELLO_PORT_8130_TCP=tcp://10.96.147.90:8130
AAF_HELLO_PORT_8130_TCP_ADDR=10.96.147.90
AAF_HELLO_PORT_8130_TCP_PORT=8130
AAF_HELLO_PORT_8130_TCP_PROTO=tcp
AAF_HELLO_SERVICE_HOST=10.96.147.90
AAF_HELLO_SERVICE_PORT=8130
AAF_HELLO_SERVICE_PORT_API=8130
AAF_LOCATE_PORT=tcp://10.96.110.170:8095
AAF_LOCATE_PORT_8095_TCP=tcp://10.96.110.170:8095
AAF_LOCATE_PORT_8095_TCP_ADDR=10.96.110.170
AAF_LOCATE_PORT_8095_TCP_PORT=8095
AAF_LOCATE_PORT_8095_TCP_PROTO=tcp
AAF_LOCATE_SERVICE_HOST=10.96.110.170
AAF_LOCATE_SERVICE_PORT=8095
AAF_LOCATE_SERVICE_PORT_API=8095
AAF_OAUTH_PORT=tcp://10.96.216.15:8140
AAF_OAUTH_PORT_8140_TCP=tcp://10.96.216.15:8140
AAF_OAUTH_PORT_8140_TCP_ADDR=10.96.216.15
AAF_OAUTH_PORT_8140_TCP_PORT=8140
AAF_OAUTH_PORT_8140_TCP_PROTO=tcp
AAF_OAUTH_SERVICE_HOST=10.96.216.15
AAF_OAUTH_SERVICE_PORT=8140
AAF_OAUTH_SERVICE_PORT_API=8140
AAF_SERVICE_PORT=tcp://10.96.217.48:8100
AAF_SERVICE_PORT_8100_TCP=tcp://10.96.217.48:8100
AAF_SERVICE_PORT_8100_TCP_ADDR=10.96.217.48
AAF_SERVICE_PORT_8100_TCP_PORT=8100
AAF_SERVICE_PORT_8100_TCP_PROTO=tcp
AAF_SERVICE_SERVICE_HOST=10.96.217.48
AAF_SERVICE_SERVICE_PORT=8100
AAF_SERVICE_SERVICE_PORT_API=8100
AAF_SMS_DB_PORT=tcp://10.96.38.144:8200
AAF_SMS_DB_PORT_8200_TCP=tcp://10.96.38.144:8200
AAF_SMS_DB_PORT_8200_TCP_ADDR=10.96.38.144
AAF_SMS_DB_PORT_8200_TCP_PORT=8200
AAF_SMS_DB_PORT_8200_TCP_PROTO=tcp
AAF_SMS_DB_SERVICE_HOST=10.96.38.144
AAF_SMS_DB_SERVICE_PORT=8200
AAF_SMS_DB_SERVICE_PORT_AAF_SMS_DB=8200
AAF_SMS_PORT=tcp://10.96.52.97:10443
AAF_SMS_PORT_10443_TCP=tcp://10.96.52.97:10443
AAF_SMS_PORT_10443_TCP_ADDR=10.96.52.97
AAF_SMS_PORT_10443_TCP_PORT=10443
AAF_SMS_PORT_10443_TCP_PROTO=tcp
AAF_SMS_SERVICE_HOST=10.96.52.97
AAF_SMS_SERVICE_PORT=10443
AAI_BABEL_PORT=tcp://10.96.186.187:9516
AAI_BABEL_PORT_9516_TCP=tcp://10.96.186.187:9516
AAI_BABEL_PORT_9516_TCP_ADDR=10.96.186.187
AAI_BABEL_PORT_9516_TCP_PORT=9516
AAI_BABEL_PORT_9516_TCP_PROTO=tcp
AAI_BABEL_SERVICE_HOST=10.96.186.187
AAI_BABEL_SERVICE_PORT=9516
AAI_BABEL_SERVICE_PORT_BABEL=9516
AAI_MODELLOADER_PORT=tcp://10.96.149.129:8080
AAI_MODELLOADER_PORT_8080_TCP=tcp://10.96.149.129:8080
AAI_MODELLOADER_PORT_8080_TCP_ADDR=10.96.149.129
AAI_MODELLOADER_PORT_8080_TCP_PORT=8080
AAI_MODELLOADER_PORT_8080_TCP_PROTO=tcp
AAI_MODELLOADER_PORT_8443_TCP=tcp://10.96.149.129:8443
AAI_MODELLOADER_PORT_8443_TCP_ADDR=10.96.149.129
AAI_MODELLOADER_PORT_8443_TCP_PORT=8443
AAI_MODELLOADER_PORT_8443_TCP_PROTO=tcp
AAI_MODELLOADER_SERVICE_HOST=10.96.149.129
AAI_MODELLOADER_SERVICE_PORT=8080
AAI_MODELLOADER_SERVICE_PORT_AAI_MODELLOADER=8080
AAI_MODELLOADER_SERVICE_PORT_AAI_MODELLOADER_SSL=8443
AAI_PORT=tcp://10.96.26.175:8443
AAI_PORT_8443_TCP=tcp://10.96.26.175:8443
AAI_PORT_8443_TCP_ADDR=10.96.26.175
AAI_PORT_8443_TCP_PORT=8443
AAI_PORT_8443_TCP_PROTO=tcp
AAI_RESOURCES_PORT=tcp://10.96.137.254:8447
AAI_RESOURCES_PORT_5005_TCP=tcp://10.96.137.254:5005
AAI_RESOURCES_PORT_5005_TCP_ADDR=10.96.137.254
AAI_RESOURCES_PORT_5005_TCP_PORT=5005
AAI_RESOURCES_PORT_5005_TCP_PROTO=tcp
AAI_RESOURCES_PORT_8447_TCP=tcp://10.96.137.254:8447
AAI_RESOURCES_PORT_8447_TCP_ADDR=10.96.137.254
AAI_RESOURCES_PORT_8447_TCP_PORT=8447
AAI_RESOURCES_PORT_8447_TCP_PROTO=tcp
AAI_RESOURCES_SERVICE_HOST=10.96.137.254
AAI_RESOURCES_SERVICE_PORT=8447
AAI_RESOURCES_SERVICE_PORT_AAI_RESOURCES_5005=5005
AAI_RESOURCES_SERVICE_PORT_AAI_RESOURCES_8447=8447
AAI_SERVICE_HOST=10.96.26.175
AAI_SERVICE_PORT=8443
AAI_SERVICE_PORT_AAI_SSL=8443
AAI_SPARKY_BE_PORT=tcp://10.96.91.66:8000
AAI_SPARKY_BE_PORT_8000_TCP=tcp://10.96.91.66:8000
AAI_SPARKY_BE_PORT_8000_TCP_ADDR=10.96.91.66
AAI_SPARKY_BE_PORT_8000_TCP_PORT=8000
AAI_SPARKY_BE_PORT_8000_TCP_PROTO=tcp
AAI_SPARKY_BE_SERVICE_HOST=10.96.91.66
AAI_SPARKY_BE_SERVICE_PORT=8000
AAI_SPARKY_BE_SERVICE_PORT_AAI_SPARKY_BE=8000
AAI_TRAVERSAL_PORT=tcp://10.96.174.55:8446
AAI_TRAVERSAL_PORT_5005_TCP=tcp://10.96.174.55:5005
AAI_TRAVERSAL_PORT_5005_TCP_ADDR=10.96.174.55
AAI_TRAVERSAL_PORT_5005_TCP_PORT=5005
AAI_TRAVERSAL_PORT_5005_TCP_PROTO=tcp
AAI_TRAVERSAL_PORT_8446_TCP=tcp://10.96.174.55:8446
AAI_TRAVERSAL_PORT_8446_TCP_ADDR=10.96.174.55
AAI_TRAVERSAL_PORT_8446_TCP_PORT=8446
AAI_TRAVERSAL_PORT_8446_TCP_PROTO=tcp
AAI_TRAVERSAL_SERVICE_HOST=10.96.174.55
AAI_TRAVERSAL_SERVICE_PORT=8446
AAI_TRAVERSAL_SERVICE_PORT_AAI_TRAVERSAL_5005=5005
AAI_TRAVERSAL_SERVICE_PORT_AAI_TRAVERSAL_8446=8446
ALLOW_UNSIGNED=false
APPC_ANSIBLE_SERVER_PORT=tcp://10.96.139.18:8000
APPC_ANSIBLE_SERVER_PORT_8000_TCP=tcp://10.96.139.18:8000
APPC_ANSIBLE_SERVER_PORT_8000_TCP_ADDR=10.96.139.18
APPC_ANSIBLE_SERVER_PORT_8000_TCP_PORT=8000
APPC_ANSIBLE_SERVER_PORT_8000_TCP_PROTO=tcp
APPC_ANSIBLE_SERVER_SERVICE_HOST=10.96.139.18
APPC_ANSIBLE_SERVER_SERVICE_PORT=8000
APPC_ANSIBLE_SERVER_SERVICE_PORT_APPC_ANSIBLE_SERVER=8000
APPC_CDT_PORT=tcp://10.96.148.41:18080
APPC_CDT_PORT_18080_TCP=tcp://10.96.148.41:18080
APPC_CDT_PORT_18080_TCP_ADDR=10.96.148.41
APPC_CDT_PORT_18080_TCP_PORT=18080
APPC_CDT_PORT_18080_TCP_PROTO=tcp
APPC_CDT_SERVICE_HOST=10.96.148.41
APPC_CDT_SERVICE_PORT=18080
APPC_CDT_SERVICE_PORT_APPC_CDT=18080
APPC_DB_PORT=tcp://10.96.159.171:3306
APPC_DB_PORT_3306_TCP=tcp://10.96.159.171:3306
APPC_DB_PORT_3306_TCP_ADDR=10.96.159.171
APPC_DB_PORT_3306_TCP_PORT=3306
APPC_DB_PORT_3306_TCP_PROTO=tcp
APPC_DB_SERVICE_HOST=10.96.159.171
APPC_DB_SERVICE_PORT=3306
APPC_DB_SERVICE_PORT_MYSQL=3306
APPC_DGBUILDER_PORT=tcp://10.96.208.115:3000
APPC_DGBUILDER_PORT_3000_TCP=tcp://10.96.208.115:3000
APPC_DGBUILDER_PORT_3000_TCP_ADDR=10.96.208.115
APPC_DGBUILDER_PORT_3000_TCP_PORT=3000
APPC_DGBUILDER_PORT_3000_TCP_PROTO=tcp
APPC_DGBUILDER_SERVICE_HOST=10.96.208.115
APPC_DGBUILDER_SERVICE_PORT=3000
APPC_DGBUILDER_SERVICE_PORT_DGBUILDER=3000
APPC_PORT=tcp://10.96.2.65:8443
APPC_PORT_1830_TCP=tcp://10.96.2.65:1830
APPC_PORT_1830_TCP_ADDR=10.96.2.65
APPC_PORT_1830_TCP_PORT=1830
APPC_PORT_1830_TCP_PROTO=tcp
APPC_PORT_8443_TCP=tcp://10.96.2.65:8443
APPC_PORT_8443_TCP_ADDR=10.96.2.65
APPC_PORT_8443_TCP_PORT=8443
APPC_PORT_8443_TCP_PROTO=tcp
APPC_PORT_9090_TCP=tcp://10.96.2.65:9090
APPC_PORT_9090_TCP_ADDR=10.96.2.65
APPC_PORT_9090_TCP_PORT=9090
APPC_PORT_9090_TCP_PROTO=tcp
APPC_SERVICE_HOST=10.96.2.65
APPC_SERVICE_PORT=8443
APPC_SERVICE_PORT_APPC_1830=1830
APPC_SERVICE_PORT_APPC_8443=8443
APPC_SERVICE_PORT_APPC_9090=9090
AWX_POSTGRESQL_PORT=tcp://10.96.193.219:5432
AWX_POSTGRESQL_PORT_5432_TCP=tcp://10.96.193.219:5432
AWX_POSTGRESQL_PORT_5432_TCP_ADDR=10.96.193.219
AWX_POSTGRESQL_PORT_5432_TCP_PORT=5432
AWX_POSTGRESQL_PORT_5432_TCP_PROTO=tcp
AWX_POSTGRESQL_SERVICE_HOST=10.96.193.219
AWX_POSTGRESQL_SERVICE_PORT=5432
AWX_POSTGRESQL_SERVICE_PORT_AWX_POSTGRESQL=5432
AWX_RABBITMQ_PORT=tcp://10.96.87.196:15672
AWX_RABBITMQ_PORT_15672_TCP=tcp://10.96.87.196:15672
AWX_RABBITMQ_PORT_15672_TCP_ADDR=10.96.87.196
AWX_RABBITMQ_PORT_15672_TCP_PORT=15672
AWX_RABBITMQ_PORT_15672_TCP_PROTO=tcp
AWX_RABBITMQ_PORT_5672_TCP=tcp://10.96.87.196:5672
AWX_RABBITMQ_PORT_5672_TCP_ADDR=10.96.87.196
AWX_RABBITMQ_PORT_5672_TCP_PORT=5672
AWX_RABBITMQ_PORT_5672_TCP_PROTO=tcp
AWX_RABBITMQ_SERVICE_HOST=10.96.87.196
AWX_RABBITMQ_SERVICE_PORT=15672
AWX_RABBITMQ_SERVICE_PORT_AMQP=5672
AWX_RABBITMQ_SERVICE_PORT_HTTP=15672
AWX_RMQ_MGMT_PORT=tcp://10.96.186.208:15672
AWX_RMQ_MGMT_PORT_15672_TCP=tcp://10.96.186.208:15672
AWX_RMQ_MGMT_PORT_15672_TCP_ADDR=10.96.186.208
AWX_RMQ_MGMT_PORT_15672_TCP_PORT=15672
AWX_RMQ_MGMT_PORT_15672_TCP_PROTO=tcp
AWX_RMQ_MGMT_SERVICE_HOST=10.96.186.208
AWX_RMQ_MGMT_SERVICE_PORT=15672
AWX_RMQ_MGMT_SERVICE_PORT_RMQMGMT=15672
AWX_WEB_PORT=tcp://10.96.197.49:8052
AWX_WEB_PORT_8052_TCP=tcp://10.96.197.49:8052
AWX_WEB_PORT_8052_TCP_ADDR=10.96.197.49
AWX_WEB_PORT_8052_TCP_PORT=8052
AWX_WEB_PORT_8052_TCP_PROTO=tcp
AWX_WEB_SERVICE_HOST=10.96.197.49
AWX_WEB_SERVICE_PORT=8052
AWX_WEB_SERVICE_PORT_WEB=8052
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT=tcp://10.96.137.198:5701
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP=tcp://10.96.137.198:5701
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_ADDR=10.96.137.198
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_PORT=5701
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_PROTO=tcp
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_HOST=10.96.137.198
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_PORT=5701
CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_PORT_BLUEPRINTS_PROCESSOR_CLUSTER=5701
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT=tcp://10.96.190.171:9111
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP=tcp://10.96.190.171:9111
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_ADDR=10.96.190.171
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_PORT=9111
CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_PROTO=tcp
CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_HOST=10.96.190.171
CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_PORT=9111
CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_PORT_BLUEPRINTS_PROCESSOR_GRPC=9111
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT=tcp://10.96.158.136:8080
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP=tcp://10.96.158.136:8080
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_ADDR=10.96.158.136
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_PORT=8080
CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_PROTO=tcp
CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_HOST=10.96.158.136
CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_PORT=8080
CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_PORT_BLUEPRINTS_PROCESSOR_HTTP=8080
CDS_COMMAND_EXECUTOR_PORT=tcp://10.96.238.157:50051
CDS_COMMAND_EXECUTOR_PORT_50051_TCP=tcp://10.96.238.157:50051
CDS_COMMAND_EXECUTOR_PORT_50051_TCP_ADDR=10.96.238.157
CDS_COMMAND_EXECUTOR_PORT_50051_TCP_PORT=50051
CDS_COMMAND_EXECUTOR_PORT_50051_TCP_PROTO=tcp
CDS_COMMAND_EXECUTOR_SERVICE_HOST=10.96.238.157
CDS_COMMAND_EXECUTOR_SERVICE_PORT=50051
CDS_COMMAND_EXECUTOR_SERVICE_PORT_COMMAND_EXECUTOR_GRPC=50051
CDS_DB_PORT=tcp://10.96.170.243:3306
CDS_DB_PORT_3306_TCP=tcp://10.96.170.243:3306
CDS_DB_PORT_3306_TCP_ADDR=10.96.170.243
CDS_DB_PORT_3306_TCP_PORT=3306
CDS_DB_PORT_3306_TCP_PROTO=tcp
CDS_DB_SERVICE_HOST=10.96.170.243
CDS_DB_SERVICE_PORT=3306
CDS_DB_SERVICE_PORT_MYSQL=3306
CDS_PY_EXECUTOR_PORT=tcp://10.96.119.248:50052
CDS_PY_EXECUTOR_PORT_50052_TCP=tcp://10.96.119.248:50052
CDS_PY_EXECUTOR_PORT_50052_TCP_ADDR=10.96.119.248
CDS_PY_EXECUTOR_PORT_50052_TCP_PORT=50052
CDS_PY_EXECUTOR_PORT_50052_TCP_PROTO=tcp
CDS_PY_EXECUTOR_PORT_50053_TCP=tcp://10.96.119.248:50053
CDS_PY_EXECUTOR_PORT_50053_TCP_ADDR=10.96.119.248
CDS_PY_EXECUTOR_PORT_50053_TCP_PORT=50053
CDS_PY_EXECUTOR_PORT_50053_TCP_PROTO=tcp
CDS_PY_EXECUTOR_SERVICE_HOST=10.96.119.248
CDS_PY_EXECUTOR_SERVICE_PORT=50052
CDS_PY_EXECUTOR_SERVICE_PORT_EXECUTOR_GRPC=50052
CDS_PY_EXECUTOR_SERVICE_PORT_MANAGER_GRPC=50053
CDS_SDC_LISTENER_PORT=tcp://10.96.213.156:8080
CDS_SDC_LISTENER_PORT_8080_TCP=tcp://10.96.213.156:8080
CDS_SDC_LISTENER_PORT_8080_TCP_ADDR=10.96.213.156
CDS_SDC_LISTENER_PORT_8080_TCP_PORT=8080
CDS_SDC_LISTENER_PORT_8080_TCP_PROTO=tcp
CDS_SDC_LISTENER_SERVICE_HOST=10.96.213.156
CDS_SDC_LISTENER_SERVICE_PORT=8080
CDS_SDC_LISTENER_SERVICE_PORT_CDS_SDC_LISTENER_HTTP=8080
CDS_UI_PORT=tcp://10.96.84.239:3000
CDS_UI_PORT_3000_TCP=tcp://10.96.84.239:3000
CDS_UI_PORT_3000_TCP_ADDR=10.96.84.239
CDS_UI_PORT_3000_TCP_PORT=3000
CDS_UI_PORT_3000_TCP_PROTO=tcp
CDS_UI_SERVICE_HOST=10.96.84.239
CDS_UI_SERVICE_PORT=3000
CDS_UI_SERVICE_PORT_CDS_UI_3000=3000
CLI_PORT=tcp://10.96.129.5:443
CLI_PORT_443_TCP=tcp://10.96.129.5:443
CLI_PORT_443_TCP_ADDR=10.96.129.5
CLI_PORT_443_TCP_PORT=443
CLI_PORT_443_TCP_PROTO=tcp
CLI_PORT_9090_TCP=tcp://10.96.129.5:9090
CLI_PORT_9090_TCP_ADDR=10.96.129.5
CLI_PORT_9090_TCP_PORT=9090
CLI_PORT_9090_TCP_PROTO=tcp
CLI_SERVICE_HOST=10.96.129.5
CLI_SERVICE_PORT=443
CLI_SERVICE_PORT_CLI443=443
CLI_SERVICE_PORT_CLI9090=9090
CMSO_DB_PORT=tcp://10.96.186.100:3306
CMSO_DB_PORT_3306_TCP=tcp://10.96.186.100:3306
CMSO_DB_PORT_3306_TCP_ADDR=10.96.186.100
CMSO_DB_PORT_3306_TCP_PORT=3306
CMSO_DB_PORT_3306_TCP_PROTO=tcp
CMSO_DB_SERVICE_HOST=10.96.186.100
CMSO_DB_SERVICE_PORT=3306
CMSO_DB_SERVICE_PORT_MYSQL=3306
COMPONENT=kafka
CONFIG_BINDING_SERVICE_PORT=tcp://10.96.169.177:10000
CONFIG_BINDING_SERVICE_PORT_10000_TCP=tcp://10.96.169.177:10000
CONFIG_BINDING_SERVICE_PORT_10000_TCP_ADDR=10.96.169.177
CONFIG_BINDING_SERVICE_PORT_10000_TCP_PORT=10000
CONFIG_BINDING_SERVICE_PORT_10000_TCP_PROTO=tcp
CONFIG_BINDING_SERVICE_PORT_10443_TCP=tcp://10.96.169.177:10443
CONFIG_BINDING_SERVICE_PORT_10443_TCP_ADDR=10.96.169.177
CONFIG_BINDING_SERVICE_PORT_10443_TCP_PORT=10443
CONFIG_BINDING_SERVICE_PORT_10443_TCP_PROTO=tcp
CONFIG_BINDING_SERVICE_SERVICE_HOST=10.96.169.177
CONFIG_BINDING_SERVICE_SERVICE_PORT=10000
CONFIG_BINDING_SERVICE_SERVICE_PORT_CONFIG_BINDING_SERVICE_INSECURE=10000
CONFIG_BINDING_SERVICE_SERVICE_PORT_CONFIG_BINDING_SERVICE_SECURE=10443
CONFLUENT_DEB_VERSION=1
CONFLUENT_MAJOR_VERSION=5
CONFLUENT_MINOR_VERSION=3
CONFLUENT_MVN_LABEL=
CONFLUENT_PATCH_VERSION=1
CONFLUENT_PLATFORM_LABEL=
CONFLUENT_VERSION=5.3.1
CONSUL_SERVER_UI_PORT=tcp://10.96.72.68:8500
CONSUL_SERVER_UI_PORT_8500_TCP=tcp://10.96.72.68:8500
CONSUL_SERVER_UI_PORT_8500_TCP_ADDR=10.96.72.68
CONSUL_SERVER_UI_PORT_8500_TCP_PORT=8500
CONSUL_SERVER_UI_PORT_8500_TCP_PROTO=tcp
CONSUL_SERVER_UI_SERVICE_HOST=10.96.72.68
CONSUL_SERVER_UI_SERVICE_PORT=8500
CONSUL_SERVER_UI_SERVICE_PORT_CONSUL_UI=8500
CPS_PG_PRIMARY_PORT=tcp://10.96.115.171:5432
CPS_PG_PRIMARY_PORT_5432_TCP=tcp://10.96.115.171:5432
CPS_PG_PRIMARY_PORT_5432_TCP_ADDR=10.96.115.171
CPS_PG_PRIMARY_PORT_5432_TCP_PORT=5432
CPS_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
CPS_PG_PRIMARY_SERVICE_HOST=10.96.115.171
CPS_PG_PRIMARY_SERVICE_PORT=5432
CPS_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
CPS_PG_REPLICA_PORT=tcp://10.96.83.252:5432
CPS_PG_REPLICA_PORT_5432_TCP=tcp://10.96.83.252:5432
CPS_PG_REPLICA_PORT_5432_TCP_ADDR=10.96.83.252
CPS_PG_REPLICA_PORT_5432_TCP_PORT=5432
CPS_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
CPS_PG_REPLICA_SERVICE_HOST=10.96.83.252
CPS_PG_REPLICA_SERVICE_PORT=5432
CPS_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
CPS_PORT=tcp://10.96.127.217:8080
CPS_PORT_8080_TCP=tcp://10.96.127.217:8080
CPS_PORT_8080_TCP_ADDR=10.96.127.217
CPS_PORT_8080_TCP_PORT=8080
CPS_PORT_8080_TCP_PROTO=tcp
CPS_POSTGRES_PORT=tcp://10.96.228.181:5432
CPS_POSTGRES_PORT_5432_TCP=tcp://10.96.228.181:5432
CPS_POSTGRES_PORT_5432_TCP_ADDR=10.96.228.181
CPS_POSTGRES_PORT_5432_TCP_PORT=5432
CPS_POSTGRES_PORT_5432_TCP_PROTO=tcp
CPS_POSTGRES_SERVICE_HOST=10.96.228.181
CPS_POSTGRES_SERVICE_PORT=5432
CPS_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
CPS_SERVICE_HOST=10.96.127.217
CPS_SERVICE_PORT=8080
CPS_SERVICE_PORT_HTTP=8080
CUB_CLASSPATH=/etc/confluent/docker/docker-utils.jar
DASHBOARD_PORT=tcp://10.96.245.218:8443
DASHBOARD_PORT_8443_TCP=tcp://10.96.245.218:8443
DASHBOARD_PORT_8443_TCP_ADDR=10.96.245.218
DASHBOARD_PORT_8443_TCP_PORT=8443
DASHBOARD_PORT_8443_TCP_PROTO=tcp
DASHBOARD_SERVICE_HOST=10.96.245.218
DASHBOARD_SERVICE_PORT=8443
DASHBOARD_SERVICE_PORT_DASHBOARD=8443
DBC_PG_PRIMARY_PORT=tcp://10.96.130.234:5432
DBC_PG_PRIMARY_PORT_5432_TCP=tcp://10.96.130.234:5432
DBC_PG_PRIMARY_PORT_5432_TCP_ADDR=10.96.130.234
DBC_PG_PRIMARY_PORT_5432_TCP_PORT=5432
DBC_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
DBC_PG_PRIMARY_SERVICE_HOST=10.96.130.234
DBC_PG_PRIMARY_SERVICE_PORT=5432
DBC_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
DBC_PG_REPLICA_PORT=tcp://10.96.182.246:5432
DBC_PG_REPLICA_PORT_5432_TCP=tcp://10.96.182.246:5432
DBC_PG_REPLICA_PORT_5432_TCP_ADDR=10.96.182.246
DBC_PG_REPLICA_PORT_5432_TCP_PORT=5432
DBC_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
DBC_PG_REPLICA_SERVICE_HOST=10.96.182.246
DBC_PG_REPLICA_SERVICE_PORT=5432
DBC_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
DBC_POSTGRES_PORT=tcp://10.96.234.188:5432
DBC_POSTGRES_PORT_5432_TCP=tcp://10.96.234.188:5432
DBC_POSTGRES_PORT_5432_TCP_ADDR=10.96.234.188
DBC_POSTGRES_PORT_5432_TCP_PORT=5432
DBC_POSTGRES_PORT_5432_TCP_PROTO=tcp
DBC_POSTGRES_SERVICE_HOST=10.96.234.188
DBC_POSTGRES_SERVICE_PORT=5432
DBC_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
DCAEMOD_DESIGNTOOL_PORT=tcp://10.96.40.85:8080
DCAEMOD_DESIGNTOOL_PORT_8080_TCP=tcp://10.96.40.85:8080
DCAEMOD_DESIGNTOOL_PORT_8080_TCP_ADDR=10.96.40.85
DCAEMOD_DESIGNTOOL_PORT_8080_TCP_PORT=8080
DCAEMOD_DESIGNTOOL_PORT_8080_TCP_PROTO=tcp
DCAEMOD_DESIGNTOOL_SERVICE_HOST=10.96.40.85
DCAEMOD_DESIGNTOOL_SERVICE_PORT=8080
DCAEMOD_DESIGNTOOL_SERVICE_PORT_HTTP=8080
DCAEMOD_DISTRIBUTOR_API_PORT=tcp://10.96.253.33:8080
DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP=tcp://10.96.253.33:8080
DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP_ADDR=10.96.253.33
DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP_PORT=8080
DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP_PROTO=tcp
DCAEMOD_DISTRIBUTOR_API_SERVICE_HOST=10.96.253.33
DCAEMOD_DISTRIBUTOR_API_SERVICE_PORT=8080
DCAEMOD_DISTRIBUTOR_API_SERVICE_PORT_HTTP=8080
DCAEMOD_GENPROCESSOR_PORT=tcp://10.96.158.158:8080
DCAEMOD_GENPROCESSOR_PORT_8080_TCP=tcp://10.96.158.158:8080
DCAEMOD_GENPROCESSOR_PORT_8080_TCP_ADDR=10.96.158.158
DCAEMOD_GENPROCESSOR_PORT_8080_TCP_PORT=8080
DCAEMOD_GENPROCESSOR_PORT_8080_TCP_PROTO=tcp
DCAEMOD_GENPROCESSOR_SERVICE_HOST=10.96.158.158
DCAEMOD_GENPROCESSOR_SERVICE_PORT=8080
DCAEMOD_GENPROCESSOR_SERVICE_PORT_HTTP=8080
DCAEMOD_HEALTHCHECK_PORT=tcp://10.96.89.175:8080
DCAEMOD_HEALTHCHECK_PORT_8080_TCP=tcp://10.96.89.175:8080
DCAEMOD_HEALTHCHECK_PORT_8080_TCP_ADDR=10.96.89.175
DCAEMOD_HEALTHCHECK_PORT_8080_TCP_PORT=8080
DCAEMOD_HEALTHCHECK_PORT_8080_TCP_PROTO=tcp
DCAEMOD_HEALTHCHECK_SERVICE_HOST=10.96.89.175
DCAEMOD_HEALTHCHECK_SERVICE_PORT=8080
DCAEMOD_HEALTHCHECK_SERVICE_PORT_HTTP=8080
DCAEMOD_NIFI_REGISTRY_PORT=tcp://10.96.142.128:18080
DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP=tcp://10.96.142.128:18080
DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP_ADDR=10.96.142.128
DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP_PORT=18080
DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP_PROTO=tcp
DCAEMOD_NIFI_REGISTRY_SERVICE_HOST=10.96.142.128
DCAEMOD_NIFI_REGISTRY_SERVICE_PORT=18080
DCAEMOD_NIFI_REGISTRY_SERVICE_PORT_HTTP=18080
DCAEMOD_ONBOARDING_API_PORT=tcp://10.96.178.1:8080
DCAEMOD_ONBOARDING_API_PORT_8080_TCP=tcp://10.96.178.1:8080
DCAEMOD_ONBOARDING_API_PORT_8080_TCP_ADDR=10.96.178.1
DCAEMOD_ONBOARDING_API_PORT_8080_TCP_PORT=8080
DCAEMOD_ONBOARDING_API_PORT_8080_TCP_PROTO=tcp
DCAEMOD_ONBOARDING_API_SERVICE_HOST=10.96.178.1
DCAEMOD_ONBOARDING_API_SERVICE_PORT=8080
DCAEMOD_ONBOARDING_API_SERVICE_PORT_HTTP=8080
DCAEMOD_PG_PRIMARY_PORT=tcp://10.96.145.177:5432
DCAEMOD_PG_PRIMARY_PORT_5432_TCP=tcp://10.96.145.177:5432
DCAEMOD_PG_PRIMARY_PORT_5432_TCP_ADDR=10.96.145.177
DCAEMOD_PG_PRIMARY_PORT_5432_TCP_PORT=5432
DCAEMOD_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
DCAEMOD_PG_PRIMARY_SERVICE_HOST=10.96.145.177
DCAEMOD_PG_PRIMARY_SERVICE_PORT=5432
DCAEMOD_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
DCAEMOD_PG_REPLICA_PORT=tcp://10.96.149.138:5432
DCAEMOD_PG_REPLICA_PORT_5432_TCP=tcp://10.96.149.138:5432
DCAEMOD_PG_REPLICA_PORT_5432_TCP_ADDR=10.96.149.138
DCAEMOD_PG_REPLICA_PORT_5432_TCP_PORT=5432
DCAEMOD_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
DCAEMOD_PG_REPLICA_SERVICE_HOST=10.96.149.138
DCAEMOD_PG_REPLICA_SERVICE_PORT=5432
DCAEMOD_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
DCAEMOD_POSTGRES_PORT=tcp://10.96.241.50:5432
DCAEMOD_POSTGRES_PORT_5432_TCP=tcp://10.96.241.50:5432
DCAEMOD_POSTGRES_PORT_5432_TCP_ADDR=10.96.241.50
DCAEMOD_POSTGRES_PORT_5432_TCP_PORT=5432
DCAEMOD_POSTGRES_PORT_5432_TCP_PROTO=tcp
DCAEMOD_POSTGRES_SERVICE_HOST=10.96.241.50
DCAEMOD_POSTGRES_SERVICE_PORT=5432
DCAEMOD_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
DCAEMOD_RUNTIME_API_PORT=tcp://10.96.4.32:9090
DCAEMOD_RUNTIME_API_PORT_9090_TCP=tcp://10.96.4.32:9090
DCAEMOD_RUNTIME_API_PORT_9090_TCP_ADDR=10.96.4.32
DCAEMOD_RUNTIME_API_PORT_9090_TCP_PORT=9090
DCAEMOD_RUNTIME_API_PORT_9090_TCP_PROTO=tcp
DCAEMOD_RUNTIME_API_SERVICE_HOST=10.96.4.32
DCAEMOD_RUNTIME_API_SERVICE_PORT=9090
DCAEMOD_RUNTIME_API_SERVICE_PORT_HTTP=9090
DCAE_CLOUDIFY_MANAGER_PORT=tcp://10.96.95.114:443
DCAE_CLOUDIFY_MANAGER_PORT_443_TCP=tcp://10.96.95.114:443
DCAE_CLOUDIFY_MANAGER_PORT_443_TCP_ADDR=10.96.95.114
DCAE_CLOUDIFY_MANAGER_PORT_443_TCP_PORT=443
DCAE_CLOUDIFY_MANAGER_PORT_443_TCP_PROTO=tcp
DCAE_CLOUDIFY_MANAGER_SERVICE_HOST=10.96.95.114
DCAE_CLOUDIFY_MANAGER_SERVICE_PORT=443
DCAE_CLOUDIFY_MANAGER_SERVICE_PORT_DCAE_CLOUDIFY_MANAGER=443
DCAE_DASHBOARD_PG_PRIMARY_PORT=tcp://10.96.140.207:5432
DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP=tcp://10.96.140.207:5432
DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP_ADDR=10.96.140.207
DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP_PORT=5432
DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
DCAE_DASHBOARD_PG_PRIMARY_SERVICE_HOST=10.96.140.207
DCAE_DASHBOARD_PG_PRIMARY_SERVICE_PORT=5432
DCAE_DASHBOARD_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_DASHBOARD_PG_REPLICA_PORT=tcp://10.96.137.46:5432
DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP=tcp://10.96.137.46:5432
DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP_ADDR=10.96.137.46
DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP_PORT=5432
DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
DCAE_DASHBOARD_PG_REPLICA_SERVICE_HOST=10.96.137.46
DCAE_DASHBOARD_PG_REPLICA_SERVICE_PORT=5432
DCAE_DASHBOARD_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_DASHBOARD_POSTGRES_PORT=tcp://10.96.90.35:5432
DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP=tcp://10.96.90.35:5432
DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP_ADDR=10.96.90.35
DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP_PORT=5432
DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP_PROTO=tcp
DCAE_DASHBOARD_POSTGRES_SERVICE_HOST=10.96.90.35
DCAE_DASHBOARD_POSTGRES_SERVICE_PORT=5432
DCAE_DASHBOARD_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_HEALTHCHECK_PORT=tcp://10.96.84.99:80
DCAE_HEALTHCHECK_PORT_80_TCP=tcp://10.96.84.99:80
DCAE_HEALTHCHECK_PORT_80_TCP_ADDR=10.96.84.99
DCAE_HEALTHCHECK_PORT_80_TCP_PORT=80
DCAE_HEALTHCHECK_PORT_80_TCP_PROTO=tcp
DCAE_HEALTHCHECK_SERVICE_HOST=10.96.84.99
DCAE_HEALTHCHECK_SERVICE_PORT=80
DCAE_HEALTHCHECK_SERVICE_PORT_DCAE_HEALTHCHECK=80
DCAE_HV_VES_COLLECTOR_PORT=tcp://10.96.5.147:6061
DCAE_HV_VES_COLLECTOR_PORT_6061_TCP=tcp://10.96.5.147:6061
DCAE_HV_VES_COLLECTOR_PORT_6061_TCP_ADDR=10.96.5.147
DCAE_HV_VES_COLLECTOR_PORT_6061_TCP_PORT=6061
DCAE_HV_VES_COLLECTOR_PORT_6061_TCP_PROTO=tcp
DCAE_HV_VES_COLLECTOR_SERVICE_HOST=10.96.5.147
DCAE_HV_VES_COLLECTOR_SERVICE_PORT=6061
DCAE_HV_VES_COLLECTOR_SERVICE_PORT_HTTPS_HTTP=6061
DCAE_INV_PG_PRIMARY_PORT=tcp://10.96.1.152:5432
DCAE_INV_PG_PRIMARY_PORT_5432_TCP=tcp://10.96.1.152:5432
DCAE_INV_PG_PRIMARY_PORT_5432_TCP_ADDR=10.96.1.152
DCAE_INV_PG_PRIMARY_PORT_5432_TCP_PORT=5432
DCAE_INV_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
DCAE_INV_PG_PRIMARY_SERVICE_HOST=10.96.1.152
DCAE_INV_PG_PRIMARY_SERVICE_PORT=5432
DCAE_INV_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_INV_PG_REPLICA_PORT=tcp://10.96.12.112:5432
DCAE_INV_PG_REPLICA_PORT_5432_TCP=tcp://10.96.12.112:5432
DCAE_INV_PG_REPLICA_PORT_5432_TCP_ADDR=10.96.12.112
DCAE_INV_PG_REPLICA_PORT_5432_TCP_PORT=5432
DCAE_INV_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
DCAE_INV_PG_REPLICA_SERVICE_HOST=10.96.12.112
DCAE_INV_PG_REPLICA_SERVICE_PORT=5432
DCAE_INV_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_INV_POSTGRES_PORT=tcp://10.96.57.120:5432
DCAE_INV_POSTGRES_PORT_5432_TCP=tcp://10.96.57.120:5432
DCAE_INV_POSTGRES_PORT_5432_TCP_ADDR=10.96.57.120
DCAE_INV_POSTGRES_PORT_5432_TCP_PORT=5432
DCAE_INV_POSTGRES_PORT_5432_TCP_PROTO=tcp
DCAE_INV_POSTGRES_SERVICE_HOST=10.96.57.120
DCAE_INV_POSTGRES_SERVICE_PORT=5432
DCAE_INV_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_MONGOHOST_READ_PORT=tcp://10.96.188.3:27017
DCAE_MONGOHOST_READ_PORT_27017_TCP=tcp://10.96.188.3:27017
DCAE_MONGOHOST_READ_PORT_27017_TCP_ADDR=10.96.188.3
DCAE_MONGOHOST_READ_PORT_27017_TCP_PORT=27017
DCAE_MONGOHOST_READ_PORT_27017_TCP_PROTO=tcp
DCAE_MONGOHOST_READ_SERVICE_HOST=10.96.188.3
DCAE_MONGOHOST_READ_SERVICE_PORT=27017
DCAE_MONGOHOST_READ_SERVICE_PORT_MONGO=27017
DCAE_MS_HEALTHCHECK_PORT=tcp://10.96.57.19:8080
DCAE_MS_HEALTHCHECK_PORT_8080_TCP=tcp://10.96.57.19:8080
DCAE_MS_HEALTHCHECK_PORT_8080_TCP_ADDR=10.96.57.19
DCAE_MS_HEALTHCHECK_PORT_8080_TCP_PORT=8080
DCAE_MS_HEALTHCHECK_PORT_8080_TCP_PROTO=tcp
DCAE_MS_HEALTHCHECK_SERVICE_HOST=10.96.57.19
DCAE_MS_HEALTHCHECK_SERVICE_PORT=8080
DCAE_MS_HEALTHCHECK_SERVICE_PORT_HTTP=8080
DCAE_PG_PRIMARY_PORT=tcp://10.96.95.79:5432
DCAE_PG_PRIMARY_PORT_5432_TCP=tcp://10.96.95.79:5432
DCAE_PG_PRIMARY_PORT_5432_TCP_ADDR=10.96.95.79
DCAE_PG_PRIMARY_PORT_5432_TCP_PORT=5432
DCAE_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp
DCAE_PG_PRIMARY_SERVICE_HOST=10.96.95.79
DCAE_PG_PRIMARY_SERVICE_PORT=5432
DCAE_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_PG_REPLICA_PORT=tcp://10.96.21.243:5432
DCAE_PG_REPLICA_PORT_5432_TCP=tcp://10.96.21.243:5432
DCAE_PG_REPLICA_PORT_5432_TCP_ADDR=10.96.21.243
DCAE_PG_REPLICA_PORT_5432_TCP_PORT=5432
DCAE_PG_REPLICA_PORT_5432_TCP_PROTO=tcp
DCAE_PG_REPLICA_SERVICE_HOST=10.96.21.243
DCAE_PG_REPLICA_SERVICE_PORT=5432
DCAE_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_POSTGRES_PORT=tcp://10.96.89.166:5432
DCAE_POSTGRES_PORT_5432_TCP=tcp://10.96.89.166:5432
DCAE_POSTGRES_PORT_5432_TCP_ADDR=10.96.89.166
DCAE_POSTGRES_PORT_5432_TCP_PORT=5432
DCAE_POSTGRES_PORT_5432_TCP_PROTO=tcp
DCAE_POSTGRES_SERVICE_HOST=10.96.89.166
DCAE_POSTGRES_SERVICE_PORT=5432
DCAE_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
DCAE_PRH_PORT=tcp://10.96.122.212:8100
DCAE_PRH_PORT_8100_TCP=tcp://10.96.122.212:8100
DCAE_PRH_PORT_8100_TCP_ADDR=10.96.122.212
DCAE_PRH_PORT_8100_TCP_PORT=8100
DCAE_PRH_PORT_8100_TCP_PROTO=tcp
DCAE_PRH_SERVICE_HOST=10.96.122.212
DCAE_PRH_SERVICE_PORT=8100
DCAE_PRH_SERVICE_PORT_HTTP=8100
DCAE_TCAGEN2_PORT=tcp://10.96.220.91:9091
DCAE_TCAGEN2_PORT_9091_TCP=tcp://10.96.220.91:9091
DCAE_TCAGEN2_PORT_9091_TCP_ADDR=10.96.220.91
DCAE_TCAGEN2_PORT_9091_TCP_PORT=9091
DCAE_TCAGEN2_PORT_9091_TCP_PROTO=tcp
DCAE_TCAGEN2_SERVICE_HOST=10.96.220.91
DCAE_TCAGEN2_SERVICE_PORT=9091
DCAE_TCAGEN2_SERVICE_PORT_HTTP=9091
DCAE_VES_COLLECTOR_PORT=tcp://10.96.215.250:8443
DCAE_VES_COLLECTOR_PORT_8443_TCP=tcp://10.96.215.250:8443
DCAE_VES_COLLECTOR_PORT_8443_TCP_ADDR=10.96.215.250
DCAE_VES_COLLECTOR_PORT_8443_TCP_PORT=8443
DCAE_VES_COLLECTOR_PORT_8443_TCP_PROTO=tcp
DCAE_VES_COLLECTOR_SERVICE_HOST=10.96.215.250
DCAE_VES_COLLECTOR_SERVICE_PORT=8443
DCAE_VES_COLLECTOR_SERVICE_PORT_HTTPS_HTTP=8443
DEPLOYMENT_HANDLER_PORT=tcp://10.96.136.242:8443
DEPLOYMENT_HANDLER_PORT_8443_TCP=tcp://10.96.136.242:8443
DEPLOYMENT_HANDLER_PORT_8443_TCP_ADDR=10.96.136.242
DEPLOYMENT_HANDLER_PORT_8443_TCP_PORT=8443
DEPLOYMENT_HANDLER_PORT_8443_TCP_PROTO=tcp
DEPLOYMENT_HANDLER_SERVICE_HOST=10.96.136.242
DEPLOYMENT_HANDLER_SERVICE_PORT=8443
DEPLOYMENT_HANDLER_SERVICE_PORT_DEPLOYMENT_HANDLER=8443
DEV_APPC_DB_METRICS_PORT=tcp://10.96.254.167:9104
DEV_APPC_DB_METRICS_PORT_9104_TCP=tcp://10.96.254.167:9104
DEV_APPC_DB_METRICS_PORT_9104_TCP_ADDR=10.96.254.167
DEV_APPC_DB_METRICS_PORT_9104_TCP_PORT=9104
DEV_APPC_DB_METRICS_PORT_9104_TCP_PROTO=tcp
DEV_APPC_DB_METRICS_SERVICE_HOST=10.96.254.167
DEV_APPC_DB_METRICS_SERVICE_PORT=9104
DEV_APPC_DB_METRICS_SERVICE_PORT_METRICS=9104
DEV_CDS_DB_METRICS_PORT=tcp://10.96.156.10:9104
DEV_CDS_DB_METRICS_PORT_9104_TCP=tcp://10.96.156.10:9104
DEV_CDS_DB_METRICS_PORT_9104_TCP_ADDR=10.96.156.10
DEV_CDS_DB_METRICS_PORT_9104_TCP_PORT=9104
DEV_CDS_DB_METRICS_PORT_9104_TCP_PROTO=tcp
DEV_CDS_DB_METRICS_SERVICE_HOST=10.96.156.10
DEV_CDS_DB_METRICS_SERVICE_PORT=9104
DEV_CDS_DB_METRICS_SERVICE_PORT_METRICS=9104
DEV_CMSO_DB_METRICS_PORT=tcp://10.96.143.183:9104
DEV_CMSO_DB_METRICS_PORT_9104_TCP=tcp://10.96.143.183:9104
DEV_CMSO_DB_METRICS_PORT_9104_TCP_ADDR=10.96.143.183
DEV_CMSO_DB_METRICS_PORT_9104_TCP_PORT=9104
DEV_CMSO_DB_METRICS_PORT_9104_TCP_PROTO=tcp
DEV_CMSO_DB_METRICS_SERVICE_HOST=10.96.143.183
DEV_CMSO_DB_METRICS_SERVICE_PORT=9104
DEV_CMSO_DB_METRICS_SERVICE_PORT_METRICS=9104
DEV_DMAAP_DR_DB_METRICS_PORT=tcp://10.96.127.159:9104
DEV_DMAAP_DR_DB_METRICS_PORT_9104_TCP=tcp://10.96.127.159:9104
DEV_DMAAP_DR_DB_METRICS_PORT_9104_TCP_ADDR=10.96.127.159
DEV_DMAAP_DR_DB_METRICS_PORT_9104_TCP_PORT=9104
DEV_DMAAP_DR_DB_METRICS_PORT_9104_TCP_PROTO=tcp
DEV_DMAAP_DR_DB_METRICS_SERVICE_HOST=10.96.127.159
DEV_DMAAP_DR_DB_METRICS_SERVICE_PORT=9104
DEV_DMAAP_DR_DB_METRICS_SERVICE_PORT_METRICS=9104
DEV_MARIADB_GALERA_METRICS_PORT=tcp://10.96.1.137:9104
DEV_MARIADB_GALERA_METRICS_PORT_9104_TCP=tcp://10.96.1.137:9104
DEV_MARIADB_GALERA_METRICS_PORT_9104_TCP_ADDR=10.96.1.137
DEV_MARIADB_GALERA_METRICS_PORT_9104_TCP_PORT=9104
DEV_MARIADB_GALERA_METRICS_PORT_9104_TCP_PROTO=tcp
DEV_MARIADB_GALERA_METRICS_SERVICE_HOST=10.96.1.137
DEV_MARIADB_GALERA_METRICS_SERVICE_PORT=9104
DEV_MARIADB_GALERA_METRICS_SERVICE_PORT_METRICS=9104
DEV_POLICY_MARIADB_METRICS_PORT=tcp://10.96.71.185:9104
DEV_POLICY_MARIADB_METRICS_PORT_9104_TCP=tcp://10.96.71.185:9104
DEV_POLICY_MARIADB_METRICS_PORT_9104_TCP_ADDR=10.96.71.185
DEV_POLICY_MARIADB_METRICS_PORT_9104_TCP_PORT=9104
DEV_POLICY_MARIADB_METRICS_PORT_9104_TCP_PROTO=tcp
DEV_POLICY_MARIADB_METRICS_SERVICE_HOST=10.96.71.185
DEV_POLICY_MARIADB_METRICS_SERVICE_PORT=9104
DEV_POLICY_MARIADB_METRICS_SERVICE_PORT_METRICS=9104
DMAAP_BC_PORT=tcp://10.96.127.210:8443
DMAAP_BC_PORT_8443_TCP=tcp://10.96.127.210:8443
DMAAP_BC_PORT_8443_TCP_ADDR=10.96.127.210
DMAAP_BC_PORT_8443_TCP_PORT=8443
DMAAP_BC_PORT_8443_TCP_PROTO=tcp
DMAAP_BC_SERVICE_HOST=10.96.127.210
DMAAP_BC_SERVICE_PORT=8443
DMAAP_BC_SERVICE_PORT_HTTPS_API=8443
DMAAP_DR_DB_PORT=tcp://10.96.127.103:3306
DMAAP_DR_DB_PORT_3306_TCP=tcp://10.96.127.103:3306
DMAAP_DR_DB_PORT_3306_TCP_ADDR=10.96.127.103
DMAAP_DR_DB_PORT_3306_TCP_PORT=3306
DMAAP_DR_DB_PORT_3306_TCP_PROTO=tcp
DMAAP_DR_DB_SERVICE_HOST=10.96.127.103
DMAAP_DR_DB_SERVICE_PORT=3306
DMAAP_DR_DB_SERVICE_PORT_MYSQL=3306
DMAAP_DR_NODE_EXTERNAL_PORT=tcp://10.96.111.19:8443
DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP=tcp://10.96.111.19:8443
DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_ADDR=10.96.111.19
DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_PORT=8443
DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_PROTO=tcp
DMAAP_DR_NODE_EXTERNAL_SERVICE_HOST=10.96.111.19
DMAAP_DR_NODE_EXTERNAL_SERVICE_PORT=8443
DMAAP_DR_NODE_EXTERNAL_SERVICE_PORT_HTTPS_API=8443
DMAAP_DR_NODE_PORT=tcp://10.96.216.183:8443
DMAAP_DR_NODE_PORT_8080_TCP=tcp://10.96.216.183:8080
DMAAP_DR_NODE_PORT_8080_TCP_ADDR=10.96.216.183
DMAAP_DR_NODE_PORT_8080_TCP_PORT=8080
DMAAP_DR_NODE_PORT_8080_TCP_PROTO=tcp
DMAAP_DR_NODE_PORT_8443_TCP=tcp://10.96.216.183:8443
DMAAP_DR_NODE_PORT_8443_TCP_ADDR=10.96.216.183
DMAAP_DR_NODE_PORT_8443_TCP_PORT=8443
DMAAP_DR_NODE_PORT_8443_TCP_PROTO=tcp
DMAAP_DR_NODE_SERVICE_HOST=10.96.216.183
DMAAP_DR_NODE_SERVICE_PORT=8443
DMAAP_DR_NODE_SERVICE_PORT_HTTPS_API=8443
DMAAP_DR_NODE_SERVICE_PORT_HTTP_API=8080
DMAAP_DR_PROV_PORT=tcp://10.96.36.167:443
DMAAP_DR_PROV_PORT_443_TCP=tcp://10.96.36.167:443
DMAAP_DR_PROV_PORT_443_TCP_ADDR=10.96.36.167
DMAAP_DR_PROV_PORT_443_TCP_PORT=443
DMAAP_DR_PROV_PORT_443_TCP_PROTO=tcp
DMAAP_DR_PROV_SERVICE_HOST=10.96.36.167
DMAAP_DR_PROV_SERVICE_PORT=443
DMAAP_DR_PROV_SERVICE_PORT_DR_PROV_PORT2=443
EJBCA_PORT=tcp://10.96.9.14:8443
EJBCA_PORT_8080_TCP=tcp://10.96.9.14:8080
EJBCA_PORT_8080_TCP_ADDR=10.96.9.14
EJBCA_PORT_8080_TCP_PORT=8080
EJBCA_PORT_8080_TCP_PROTO=tcp
EJBCA_PORT_8443_TCP=tcp://10.96.9.14:8443
EJBCA_PORT_8443_TCP_ADDR=10.96.9.14
EJBCA_PORT_8443_TCP_PORT=8443
EJBCA_PORT_8443_TCP_PROTO=tcp
EJBCA_SERVICE_HOST=10.96.9.14
EJBCA_SERVICE_PORT=8443
EJBCA_SERVICE_PORT_HTTPS_API=8443
EJBCA_SERVICE_PORT_HTTP_API=8080
ESR_GUI_PORT=tcp://10.96.207.159:8080
ESR_GUI_PORT_8080_TCP=tcp://10.96.207.159:8080
ESR_GUI_PORT_8080_TCP_ADDR=10.96.207.159
ESR_GUI_PORT_8080_TCP_PORT=8080
ESR_GUI_PORT_8080_TCP_PROTO=tcp
ESR_GUI_SERVICE_HOST=10.96.207.159
ESR_GUI_SERVICE_PORT=8080
ESR_GUI_SERVICE_PORT_ESR_GUI=8080
ESR_SERVER_PORT=tcp://10.96.85.51:9518
ESR_SERVER_PORT_9518_TCP=tcp://10.96.85.51:9518
ESR_SERVER_PORT_9518_TCP_ADDR=10.96.85.51
ESR_SERVER_PORT_9518_TCP_PORT=9518
ESR_SERVER_PORT_9518_TCP_PROTO=tcp
ESR_SERVER_SERVICE_HOST=10.96.85.51
ESR_SERVER_SERVICE_PORT=9518
ESR_SERVER_SERVICE_PORT_ESR_SERVER=9518
HOLMES_ENGINE_MGMT_PORT=tcp://10.96.203.59:9102
HOLMES_ENGINE_MGMT_PORT_9102_TCP=tcp://10.96.203.59:9102
HOLMES_ENGINE_MGMT_PORT_9102_TCP_ADDR=10.96.203.59
HOLMES_ENGINE_MGMT_PORT_9102_TCP_PORT=9102
HOLMES_ENGINE_MGMT_PORT_9102_TCP_PROTO=tcp
HOLMES_ENGINE_MGMT_SERVICE_HOST=10.96.203.59
HOLMES_ENGINE_MGMT_SERVICE_PORT=9102
HOLMES_ENGINE_MGMT_SERVICE_PORT_HTTPS_REST=9102
HOLMES_POSTGRES_PORT=tcp://10.96.82.97:5432
HOLMES_POSTGRES_PORT_5432_TCP=tcp://10.96.82.97:5432
HOLMES_POSTGRES_PORT_5432_TCP_ADDR=10.96.82.97
HOLMES_POSTGRES_PORT_5432_TCP_PORT=5432
HOLMES_POSTGRES_PORT_5432_TCP_PROTO=tcp
HOLMES_POSTGRES_PRIMARY_PORT=tcp://10.96.136.1:5432
HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP=tcp://10.96.136.1:5432
HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_ADDR=10.96.136.1
HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_PORT=5432
HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_PROTO=tcp
HOLMES_POSTGRES_PRIMARY_SERVICE_HOST=10.96.136.1
HOLMES_POSTGRES_PRIMARY_SERVICE_PORT=5432
HOLMES_POSTGRES_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432
HOLMES_POSTGRES_REPLICA_PORT=tcp://10.96.233.177:5432
HOLMES_POSTGRES_REPLICA_PORT_5432_TCP=tcp://10.96.233.177:5432
HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_ADDR=10.96.233.177
HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_PORT=5432
HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_PROTO=tcp
HOLMES_POSTGRES_REPLICA_SERVICE_HOST=10.96.233.177
HOLMES_POSTGRES_REPLICA_SERVICE_PORT=5432
HOLMES_POSTGRES_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432
HOLMES_POSTGRES_SERVICE_HOST=10.96.82.97
HOLMES_POSTGRES_SERVICE_PORT=5432
HOLMES_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432
HOLMES_RULE_MGMT_PORT=tcp://10.96.15.156:9101
HOLMES_RULE_MGMT_PORT_9101_TCP=tcp://10.96.15.156:9101
HOLMES_RULE_MGMT_PORT_9101_TCP_ADDR=10.96.15.156
HOLMES_RULE_MGMT_PORT_9101_TCP_PORT=9101
HOLMES_RULE_MGMT_PORT_9101_TCP_PROTO=tcp
HOLMES_RULE_MGMT_PORT_9104_TCP=tcp://10.96.15.156:9104
HOLMES_RULE_MGMT_PORT_9104_TCP_ADDR=10.96.15.156
HOLMES_RULE_MGMT_PORT_9104_TCP_PORT=9104
HOLMES_RULE_MGMT_PORT_9104_TCP_PROTO=tcp
HOLMES_RULE_MGMT_SERVICE_HOST=10.96.15.156
HOLMES_RULE_MGMT_SERVICE_PORT=9101
HOLMES_RULE_MGMT_SERVICE_PORT_HTTPS_REST=9101
HOLMES_RULE_MGMT_SERVICE_PORT_HTTPS_UI=9104
HOME=/home/mrkafka
HOSTNAME=dev-message-router-kafka-0
HOST_IP=172.16.10.103
INVENTORY_PORT=tcp://10.96.137.227:8080
INVENTORY_PORT_8080_TCP=tcp://10.96.137.227:8080
INVENTORY_PORT_8080_TCP_ADDR=10.96.137.227
INVENTORY_PORT_8080_TCP_PORT=8080
INVENTORY_PORT_8080_TCP_PROTO=tcp
INVENTORY_SERVICE_HOST=10.96.137.227
INVENTORY_SERVICE_PORT=8080
INVENTORY_SERVICE_PORT_INVENTORY=8080
KAFKA_ADVERTISED_LISTENERS=EXTERNAL_SASL_PLAINTEXT://172.16.10.103:30490,INTERNAL_SASL_PLAINTEXT://:9092
KAFKA_AUTHORIZER_CLASS_NAME=org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer
KAFKA_BROKER_ID=0
KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE=false
KAFKA_DEFAULT_REPLICATION_FACTOR=3
KAFKA_INTER_BROKER_LISTENER_NAME=INTERNAL_SASL_PLAINTEXT
KAFKA_JMX_PORT=5555
KAFKA_LISTENERS=EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT
KAFKA_LOG_DIRS=/var/lib/kafka/data
KAFKA_LOG_RETENTION_HOURS=168
KAFKA_NUM_PARTITIONS=3
KAFKA_NUM_RECOVERY_THREADS_PER_DATA_DIR=5
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=3
KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/secrets/jaas/kafka_server_jaas.conf
KAFKA_SASL_ENABLED_MECHANISMS=PLAIN
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL=PLAIN
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR=1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR=1
KAFKA_USER=mrkafka
KAFKA_VERSION=5.3.1
KAFKA_ZOOKEEPER_CONNECT=dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181
KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS=6000
KAFKA_ZOOKEEPER_SET_ACL=true
KUBERNETES_PORT=tcp://10.96.0.1:443
KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443
KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1
KUBERNETES_PORT_443_TCP_PORT=443
KUBERNETES_PORT_443_TCP_PROTO=tcp
KUBERNETES_SERVICE_HOST=10.96.0.1
KUBERNETES_SERVICE_PORT=443
KUBERNETES_SERVICE_PORT_HTTPS=443
LANG=C.UTF-8
MARIADB_GALERA_PORT=tcp://10.96.181.116:3306
MARIADB_GALERA_PORT_3306_TCP=tcp://10.96.181.116:3306
MARIADB_GALERA_PORT_3306_TCP_ADDR=10.96.181.116
MARIADB_GALERA_PORT_3306_TCP_PORT=3306
MARIADB_GALERA_PORT_3306_TCP_PROTO=tcp
MARIADB_GALERA_SERVICE_HOST=10.96.181.116
MARIADB_GALERA_SERVICE_PORT=3306
MARIADB_GALERA_SERVICE_PORT_MYSQL=3306
MESSAGE_ROUTER_EXTERNAL_PORT=tcp://10.96.94.157:3905
MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP=tcp://10.96.94.157:3905
MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_ADDR=10.96.94.157
MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_PORT=3905
MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_PROTO=tcp
MESSAGE_ROUTER_EXTERNAL_SERVICE_HOST=10.96.94.157
MESSAGE_ROUTER_EXTERNAL_SERVICE_PORT=3905
MESSAGE_ROUTER_EXTERNAL_SERVICE_PORT_HTTPS_API=3905
MESSAGE_ROUTER_KAFKA_0_PORT=tcp://10.96.162.190:9091
MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP=tcp://10.96.162.190:9091
MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_ADDR=10.96.162.190
MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_PORT=9091
MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_PROTO=tcp
MESSAGE_ROUTER_KAFKA_0_SERVICE_HOST=10.96.162.190
MESSAGE_ROUTER_KAFKA_0_SERVICE_PORT=9091
MESSAGE_ROUTER_KAFKA_0_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_0=9091
MESSAGE_ROUTER_KAFKA_1_PORT=tcp://10.96.187.168:9091
MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP=tcp://10.96.187.168:9091
MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_ADDR=10.96.187.168
MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_PORT=9091
MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_PROTO=tcp
MESSAGE_ROUTER_KAFKA_1_SERVICE_HOST=10.96.187.168
MESSAGE_ROUTER_KAFKA_1_SERVICE_PORT=9091
MESSAGE_ROUTER_KAFKA_1_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_1=9091
MESSAGE_ROUTER_KAFKA_2_PORT=tcp://10.96.89.73:9091
MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP=tcp://10.96.89.73:9091
MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_ADDR=10.96.89.73
MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_PORT=9091
MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_PROTO=tcp
MESSAGE_ROUTER_KAFKA_2_SERVICE_HOST=10.96.89.73
MESSAGE_ROUTER_KAFKA_2_SERVICE_PORT=9091
MESSAGE_ROUTER_KAFKA_2_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_2=9091
MESSAGE_ROUTER_PORT=tcp://10.96.32.226:3905
MESSAGE_ROUTER_PORT_3904_TCP=tcp://10.96.32.226:3904
MESSAGE_ROUTER_PORT_3904_TCP_ADDR=10.96.32.226
MESSAGE_ROUTER_PORT_3904_TCP_PORT=3904
MESSAGE_ROUTER_PORT_3904_TCP_PROTO=tcp
MESSAGE_ROUTER_PORT_3905_TCP=tcp://10.96.32.226:3905
MESSAGE_ROUTER_PORT_3905_TCP_ADDR=10.96.32.226
MESSAGE_ROUTER_PORT_3905_TCP_PORT=3905
MESSAGE_ROUTER_PORT_3905_TCP_PROTO=tcp
MESSAGE_ROUTER_SERVICE_HOST=10.96.32.226
MESSAGE_ROUTER_SERVICE_PORT=3905
MESSAGE_ROUTER_SERVICE_PORT_HTTPS_API=3905
MESSAGE_ROUTER_SERVICE_PORT_HTTP_API=3904
MODELING_ETSICATALOG_PORT=tcp://10.96.187.176:8806
MODELING_ETSICATALOG_PORT_8806_TCP=tcp://10.96.187.176:8806
MODELING_ETSICATALOG_PORT_8806_TCP_ADDR=10.96.187.176
MODELING_ETSICATALOG_PORT_8806_TCP_PORT=8806
MODELING_ETSICATALOG_PORT_8806_TCP_PROTO=tcp
MODELING_ETSICATALOG_SERVICE_HOST=10.96.187.176
MODELING_ETSICATALOG_SERVICE_PORT=8806
MODELING_ETSICATALOG_SERVICE_PORT_MODELING_ETSICATALOG=8806
MSB_CONSUL_PORT=tcp://10.96.159.72:8500
MSB_CONSUL_PORT_8500_TCP=tcp://10.96.159.72:8500
MSB_CONSUL_PORT_8500_TCP_ADDR=10.96.159.72
MSB_CONSUL_PORT_8500_TCP_PORT=8500
MSB_CONSUL_PORT_8500_TCP_PROTO=tcp
MSB_CONSUL_SERVICE_HOST=10.96.159.72
MSB_CONSUL_SERVICE_PORT=8500
MSB_CONSUL_SERVICE_PORT_HTTP_MSB_CONSUL=8500
MSB_DISCOVERY_PORT=tcp://10.96.125.6:10081
MSB_DISCOVERY_PORT_10081_TCP=tcp://10.96.125.6:10081
MSB_DISCOVERY_PORT_10081_TCP_ADDR=10.96.125.6
MSB_DISCOVERY_PORT_10081_TCP_PORT=10081
MSB_DISCOVERY_PORT_10081_TCP_PROTO=tcp
MSB_DISCOVERY_SERVICE_HOST=10.96.125.6
MSB_DISCOVERY_SERVICE_PORT=10081
MSB_DISCOVERY_SERVICE_PORT_HTTP_MSB_DISCOVERY=10081
MSB_EAG_PORT=tcp://10.96.190.42:443
MSB_EAG_PORT_443_TCP=tcp://10.96.190.42:443
MSB_EAG_PORT_443_TCP_ADDR=10.96.190.42
MSB_EAG_PORT_443_TCP_PORT=443
MSB_EAG_PORT_443_TCP_PROTO=tcp
MSB_EAG_SERVICE_HOST=10.96.190.42
MSB_EAG_SERVICE_PORT=443
MSB_EAG_SERVICE_PORT_HTTPS_MSB_EAG=443
MSB_IAG_PORT=tcp://10.96.34.252:443
MSB_IAG_PORT_443_TCP=tcp://10.96.34.252:443
MSB_IAG_PORT_443_TCP_ADDR=10.96.34.252
MSB_IAG_PORT_443_TCP_PORT=443
MSB_IAG_PORT_443_TCP_PROTO=tcp
MSB_IAG_SERVICE_HOST=10.96.34.252
MSB_IAG_SERVICE_PORT=443
MSB_IAG_SERVICE_PORT_HTTPS_MSB_IAG=443
MULTICLOUD_FCAPS_PORT=tcp://10.96.189.187:9011
MULTICLOUD_FCAPS_PORT_9011_TCP=tcp://10.96.189.187:9011
MULTICLOUD_FCAPS_PORT_9011_TCP_ADDR=10.96.189.187
MULTICLOUD_FCAPS_PORT_9011_TCP_PORT=9011
MULTICLOUD_FCAPS_PORT_9011_TCP_PROTO=tcp
MULTICLOUD_FCAPS_SERVICE_HOST=10.96.189.187
MULTICLOUD_FCAPS_SERVICE_PORT=9011
MULTICLOUD_FCAPS_SERVICE_PORT_MULTICLOUD_FCAPS=9011
MULTICLOUD_FRAMEWORK_PORT=tcp://10.96.209.22:9001
MULTICLOUD_FRAMEWORK_PORT_9001_TCP=tcp://10.96.209.22:9001
MULTICLOUD_FRAMEWORK_PORT_9001_TCP_ADDR=10.96.209.22
MULTICLOUD_FRAMEWORK_PORT_9001_TCP_PORT=9001
MULTICLOUD_FRAMEWORK_PORT_9001_TCP_PROTO=tcp
MULTICLOUD_FRAMEWORK_SERVICE_HOST=10.96.209.22
MULTICLOUD_FRAMEWORK_SERVICE_PORT=9001
MULTICLOUD_FRAMEWORK_SERVICE_PORT_MULTICLOUD_FRAMEWORK=9001
MULTICLOUD_K8S_MONGO_READ_PORT=tcp://10.96.48.66:27017
MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP=tcp://10.96.48.66:27017
MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_ADDR=10.96.48.66
MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_PORT=27017
MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_PROTO=tcp
MULTICLOUD_K8S_MONGO_READ_SERVICE_HOST=10.96.48.66
MULTICLOUD_K8S_MONGO_READ_SERVICE_PORT=27017
MULTICLOUD_K8S_MONGO_READ_SERVICE_PORT_MONGO=27017
MULTICLOUD_K8S_PORT=tcp://10.96.79.66:9015
MULTICLOUD_K8S_PORT_9015_TCP=tcp://10.96.79.66:9015
MULTICLOUD_K8S_PORT_9015_TCP_ADDR=10.96.79.66
MULTICLOUD_K8S_PORT_9015_TCP_PORT=9015
MULTICLOUD_K8S_PORT_9015_TCP_PROTO=tcp
MULTICLOUD_K8S_SERVICE_HOST=10.96.79.66
MULTICLOUD_K8S_SERVICE_PORT=9015
MULTICLOUD_PIKE_PORT=tcp://10.96.166.146:9007
MULTICLOUD_PIKE_PORT_9007_TCP=tcp://10.96.166.146:9007
MULTICLOUD_PIKE_PORT_9007_TCP_ADDR=10.96.166.146
MULTICLOUD_PIKE_PORT_9007_TCP_PORT=9007
MULTICLOUD_PIKE_PORT_9007_TCP_PROTO=tcp
MULTICLOUD_PIKE_SERVICE_HOST=10.96.166.146
MULTICLOUD_PIKE_SERVICE_PORT=9007
MULTICLOUD_PIKE_SERVICE_PORT_MULTICLOUD_PIKE=9007
MULTICLOUD_STARLINGX_PORT=tcp://10.96.254.34:9009
MULTICLOUD_STARLINGX_PORT_9009_TCP=tcp://10.96.254.34:9009
MULTICLOUD_STARLINGX_PORT_9009_TCP_ADDR=10.96.254.34
MULTICLOUD_STARLINGX_PORT_9009_TCP_PORT=9009
MULTICLOUD_STARLINGX_PORT_9009_TCP_PROTO=tcp
MULTICLOUD_STARLINGX_SERVICE_HOST=10.96.254.34
MULTICLOUD_STARLINGX_SERVICE_PORT=9009
MULTICLOUD_STARLINGX_SERVICE_PORT_MULTICLOUD_STARLINGX=9009
MULTICLOUD_TITANIUMCLOUD_PORT=tcp://10.96.183.49:9005
MULTICLOUD_TITANIUMCLOUD_PORT_9005_TCP=tcp://10.96.183.49:9005
MULTICLOUD_TITANIUMCLOUD_PORT_9005_TCP_ADDR=10.96.183.49
MULTICLOUD_TITANIUMCLOUD_PORT_9005_TCP_PORT=9005
MULTICLOUD_TITANIUMCLOUD_PORT_9005_TCP_PROTO=tcp
MULTICLOUD_TITANIUMCLOUD_SERVICE_HOST=10.96.183.49
MULTICLOUD_TITANIUMCLOUD_SERVICE_PORT=9005
MULTICLOUD_TITANIUMCLOUD_SERVICE_PORT_MULTICLOUD_TITANIUMCLOUD=9005
MULTICLOUD_VIO_PORT=tcp://10.96.65.186:9004
MULTICLOUD_VIO_PORT_9004_TCP=tcp://10.96.65.186:9004
MULTICLOUD_VIO_PORT_9004_TCP_ADDR=10.96.65.186
MULTICLOUD_VIO_PORT_9004_TCP_PORT=9004
MULTICLOUD_VIO_PORT_9004_TCP_PROTO=tcp
MULTICLOUD_VIO_SERVICE_HOST=10.96.65.186
MULTICLOUD_VIO_SERVICE_PORT=9004
MULTICLOUD_VIO_SERVICE_PORT_MULTICLOUD_VIO=9004
NBI_MONGOHOST_READ_PORT=tcp://10.96.152.46:27017
NBI_MONGOHOST_READ_PORT_27017_TCP=tcp://10.96.152.46:27017
NBI_MONGOHOST_READ_PORT_27017_TCP_ADDR=10.96.152.46
NBI_MONGOHOST_READ_PORT_27017_TCP_PORT=27017
NBI_MONGOHOST_READ_PORT_27017_TCP_PROTO=tcp
NBI_MONGOHOST_READ_SERVICE_HOST=10.96.152.46
NBI_MONGOHOST_READ_SERVICE_PORT=27017
NBI_MONGOHOST_READ_SERVICE_PORT_MONGO=27017
NBI_PORT=tcp://10.96.202.147:8443
NBI_PORT_8443_TCP=tcp://10.96.202.147:8443
NBI_PORT_8443_TCP_ADDR=10.96.202.147
NBI_PORT_8443_TCP_PORT=8443
NBI_PORT_8443_TCP_PROTO=tcp
NBI_SERVICE_HOST=10.96.202.147
NBI_SERVICE_PORT=8443
NBI_SERVICE_PORT_API_8443=8443
NENG_SERV_PORT=tcp://10.96.107.182:8080
NENG_SERV_PORT_8080_TCP=tcp://10.96.107.182:8080
NENG_SERV_PORT_8080_TCP_ADDR=10.96.107.182
NENG_SERV_PORT_8080_TCP_PORT=8080
NENG_SERV_PORT_8080_TCP_PROTO=tcp
NENG_SERV_SERVICE_HOST=10.96.107.182
NENG_SERV_SERVICE_PORT=8080
NENG_SERV_SERVICE_PORT_NENG_SERV_PORT=8080
NETBOX_APP_PORT=tcp://10.96.109.253:8001
NETBOX_APP_PORT_8001_TCP=tcp://10.96.109.253:8001
NETBOX_APP_PORT_8001_TCP_ADDR=10.96.109.253
NETBOX_APP_PORT_8001_TCP_PORT=8001
NETBOX_APP_PORT_8001_TCP_PROTO=tcp
NETBOX_APP_SERVICE_HOST=10.96.109.253
NETBOX_APP_SERVICE_PORT=8001
NETBOX_APP_SERVICE_PORT_NETBOX_APP=8001
NETBOX_NGINX_PORT=tcp://10.96.234.245:8080
NETBOX_NGINX_PORT_8080_TCP=tcp://10.96.234.245:8080
NETBOX_NGINX_PORT_8080_TCP_ADDR=10.96.234.245
NETBOX_NGINX_PORT_8080_TCP_PORT=8080
NETBOX_NGINX_PORT_8080_TCP_PROTO=tcp
NETBOX_NGINX_SERVICE_HOST=10.96.234.245
NETBOX_NGINX_SERVICE_PORT=8080
NETBOX_POSTGRES_PORT=tcp://10.96.120.118:5432
NETBOX_POSTGRES_PORT_5432_TCP=tcp://10.96.120.118:5432
NETBOX_POSTGRES_PORT_5432_TCP_ADDR=10.96.120.118
NETBOX_POSTGRES_PORT_5432_TCP_PORT=5432
NETBOX_POSTGRES_PORT_5432_TCP_PROTO=tcp
NETBOX_POSTGRES_SERVICE_HOST=10.96.120.118
NETBOX_POSTGRES_SERVICE_PORT=5432
NETBOX_POSTGRES_SERVICE_PORT_NETBOX_POSTGRES=5432
OOF_CMSO_OPTIMIZER_PORT=tcp://10.96.26.20:7997
OOF_CMSO_OPTIMIZER_PORT_7997_TCP=tcp://10.96.26.20:7997
OOF_CMSO_OPTIMIZER_PORT_7997_TCP_ADDR=10.96.26.20
OOF_CMSO_OPTIMIZER_PORT_7997_TCP_PORT=7997
OOF_CMSO_OPTIMIZER_PORT_7997_TCP_PROTO=tcp
OOF_CMSO_OPTIMIZER_SERVICE_HOST=10.96.26.20
OOF_CMSO_OPTIMIZER_SERVICE_PORT=7997
OOF_CMSO_OPTIMIZER_SERVICE_PORT_CMSO=7997
OOF_CMSO_PORT=tcp://10.96.13.140:8080
OOF_CMSO_PORT_8080_TCP=tcp://10.96.13.140:8080
OOF_CMSO_PORT_8080_TCP_ADDR=10.96.13.140
OOF_CMSO_PORT_8080_TCP_PORT=8080
OOF_CMSO_PORT_8080_TCP_PROTO=tcp
OOF_CMSO_SERVICE_HOST=10.96.13.140
OOF_CMSO_SERVICE_PORT=8080
OOF_CMSO_SERVICE_PORT_CMSO=8080
OOF_CMSO_TICKETMGT_PORT=tcp://10.96.4.227:7999
OOF_CMSO_TICKETMGT_PORT_7999_TCP=tcp://10.96.4.227:7999
OOF_CMSO_TICKETMGT_PORT_7999_TCP_ADDR=10.96.4.227
OOF_CMSO_TICKETMGT_PORT_7999_TCP_PORT=7999
OOF_CMSO_TICKETMGT_PORT_7999_TCP_PROTO=tcp
OOF_CMSO_TICKETMGT_SERVICE_HOST=10.96.4.227
OOF_CMSO_TICKETMGT_SERVICE_PORT=7999
OOF_CMSO_TICKETMGT_SERVICE_PORT_CMSO_TICKETMGT=7999
OOF_CMSO_TOPOLOGY_PORT=tcp://10.96.8.172:7998
OOF_CMSO_TOPOLOGY_PORT_7998_TCP=tcp://10.96.8.172:7998
OOF_CMSO_TOPOLOGY_PORT_7998_TCP_ADDR=10.96.8.172
OOF_CMSO_TOPOLOGY_PORT_7998_TCP_PORT=7998
OOF_CMSO_TOPOLOGY_PORT_7998_TCP_PROTO=tcp
OOF_CMSO_TOPOLOGY_SERVICE_HOST=10.96.8.172
OOF_CMSO_TOPOLOGY_SERVICE_PORT=7998
OOF_CMSO_TOPOLOGY_SERVICE_PORT_CMSO_TOPOLOGY=7998
OOF_HAS_API_PORT=tcp://10.96.124.58:8091
OOF_HAS_API_PORT_8091_TCP=tcp://10.96.124.58:8091
OOF_HAS_API_PORT_8091_TCP_ADDR=10.96.124.58
OOF_HAS_API_PORT_8091_TCP_PORT=8091
OOF_HAS_API_PORT_8091_TCP_PROTO=tcp
OOF_HAS_API_SERVICE_HOST=10.96.124.58
OOF_HAS_API_SERVICE_PORT=8091
OOF_HAS_API_SERVICE_PORT_OOF_HAS_API=8091
OOF_OSDF_PORT=tcp://10.96.117.3:8698
OOF_OSDF_PORT_8698_TCP=tcp://10.96.117.3:8698
OOF_OSDF_PORT_8698_TCP_ADDR=10.96.117.3
OOF_OSDF_PORT_8698_TCP_PORT=8698
OOF_OSDF_PORT_8698_TCP_PROTO=tcp
OOF_OSDF_SERVICE_HOST=10.96.117.3
OOF_OSDF_SERVICE_PORT=8698
OOM_CERTSERVICE_CMPV2ISSUER_METRICS_SERVICE_PORT=tcp://10.96.90.89:8443
OOM_CERTSERVICE_CMPV2ISSUER_METRICS_SERVICE_PORT_8443_TCP=tcp://10.96.90.89:8443
OOM_CERTSERVICE_CMPV2ISSUER_METRICS_SERVICE_PORT_8443_TCP_ADDR=10.96.90.89
OOM_CERTSERVICE_CMPV2ISSUER_METRICS_SERVICE_PORT_8443_TCP_PORT=8443
OOM_CERTSERVICE_CMPV2ISSUER_METRICS_SERVICE_PORT_8443_TCP_PROTO=tcp
OOM_CERTSERVICE_CMPV2ISSUER_METRICS_SERVICE_SERVICE_HOST=10.96.90.89
OOM_CERTSERVICE_CMPV2ISSUER_METRICS_SERVICE_SERVICE_PORT=8443
OOM_CERTSERVICE_CMPV2ISSUER_METRICS_SERVICE_SERVICE_PORT_HTTPS=8443
OOM_CERT_SERVICE_PORT=tcp://10.96.115.246:8443
OOM_CERT_SERVICE_PORT_8443_TCP=tcp://10.96.115.246:8443
OOM_CERT_SERVICE_PORT_8443_TCP_ADDR=10.96.115.246
OOM_CERT_SERVICE_PORT_8443_TCP_PORT=8443
OOM_CERT_SERVICE_PORT_8443_TCP_PROTO=tcp
OOM_CERT_SERVICE_SERVICE_HOST=10.96.115.246
OOM_CERT_SERVICE_SERVICE_PORT=8443
OOM_CERT_SERVICE_SERVICE_PORT_HTTPS_HTTP=8443
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
POLICY_APEX_PDP_PORT=tcp://10.96.185.57:6969
POLICY_APEX_PDP_PORT_6969_TCP=tcp://10.96.185.57:6969
POLICY_APEX_PDP_PORT_6969_TCP_ADDR=10.96.185.57
POLICY_APEX_PDP_PORT_6969_TCP_PORT=6969
POLICY_APEX_PDP_PORT_6969_TCP_PROTO=tcp
POLICY_APEX_PDP_SERVICE_HOST=10.96.185.57
POLICY_APEX_PDP_SERVICE_PORT=6969
POLICY_APEX_PDP_SERVICE_PORT_POLICY_APEX_PDP=6969
POLICY_API_PORT=tcp://10.96.61.93:6969
POLICY_API_PORT_6969_TCP=tcp://10.96.61.93:6969
POLICY_API_PORT_6969_TCP_ADDR=10.96.61.93
POLICY_API_PORT_6969_TCP_PORT=6969
POLICY_API_PORT_6969_TCP_PROTO=tcp
POLICY_API_SERVICE_HOST=10.96.61.93
POLICY_API_SERVICE_PORT=6969
POLICY_API_SERVICE_PORT_POLICY_API=6969
POLICY_CLAMP_BE_PORT=tcp://10.96.112.120:8443
POLICY_CLAMP_BE_PORT_8443_TCP=tcp://10.96.112.120:8443
POLICY_CLAMP_BE_PORT_8443_TCP_ADDR=10.96.112.120
POLICY_CLAMP_BE_PORT_8443_TCP_PORT=8443
POLICY_CLAMP_BE_PORT_8443_TCP_PROTO=tcp
POLICY_CLAMP_BE_SERVICE_HOST=10.96.112.120
POLICY_CLAMP_BE_SERVICE_PORT=8443
POLICY_CLAMP_BE_SERVICE_PORT_POLICY_CLAMP_BE=8443
POLICY_CLAMP_FE_PORT=tcp://10.96.188.242:2443
POLICY_CLAMP_FE_PORT_2443_TCP=tcp://10.96.188.242:2443
POLICY_CLAMP_FE_PORT_2443_TCP_ADDR=10.96.188.242
POLICY_CLAMP_FE_PORT_2443_TCP_PORT=2443
POLICY_CLAMP_FE_PORT_2443_TCP_PROTO=tcp
POLICY_CLAMP_FE_SERVICE_HOST=10.96.188.242
POLICY_CLAMP_FE_SERVICE_PORT=2443
POLICY_CLAMP_FE_SERVICE_PORT_POLICY_CLAMP_FE=2443
POLICY_DISTRIBUTION_PORT=tcp://10.96.154.65:6969
POLICY_DISTRIBUTION_PORT_6969_TCP=tcp://10.96.154.65:6969
POLICY_DISTRIBUTION_PORT_6969_TCP_ADDR=10.96.154.65
POLICY_DISTRIBUTION_PORT_6969_TCP_PORT=6969
POLICY_DISTRIBUTION_PORT_6969_TCP_PROTO=tcp
POLICY_DISTRIBUTION_SERVICE_HOST=10.96.154.65
POLICY_DISTRIBUTION_SERVICE_PORT=6969
POLICY_DISTRIBUTION_SERVICE_PORT_POLICY_DISTRIBUTION=6969
POLICY_DROOLS_PDP_PORT=tcp://10.96.144.157:6969
POLICY_DROOLS_PDP_PORT_6969_TCP=tcp://10.96.144.157:6969
POLICY_DROOLS_PDP_PORT_6969_TCP_ADDR=10.96.144.157
POLICY_DROOLS_PDP_PORT_6969_TCP_PORT=6969
POLICY_DROOLS_PDP_PORT_6969_TCP_PROTO=tcp
POLICY_DROOLS_PDP_PORT_9696_TCP=tcp://10.96.144.157:9696
POLICY_DROOLS_PDP_PORT_9696_TCP_ADDR=10.96.144.157
POLICY_DROOLS_PDP_PORT_9696_TCP_PORT=9696
POLICY_DROOLS_PDP_PORT_9696_TCP_PROTO=tcp
POLICY_DROOLS_PDP_SERVICE_HOST=10.96.144.157
POLICY_DROOLS_PDP_SERVICE_PORT=6969
POLICY_DROOLS_PDP_SERVICE_PORT_POLICY_DROOLS_PDP_6969=6969
POLICY_DROOLS_PDP_SERVICE_PORT_POLICY_DROOLS_PDP_9696=9696
POLICY_HANDLER_PORT=tcp://10.96.131.81:80
POLICY_HANDLER_PORT_80_TCP=tcp://10.96.131.81:80
POLICY_HANDLER_PORT_80_TCP_ADDR=10.96.131.81
POLICY_HANDLER_PORT_80_TCP_PORT=80
POLICY_HANDLER_PORT_80_TCP_PROTO=tcp
POLICY_HANDLER_SERVICE_HOST=10.96.131.81
POLICY_HANDLER_SERVICE_PORT=80
POLICY_HANDLER_SERVICE_PORT_POLICY_HANDLER=80
POLICY_MARIADB_PORT=tcp://10.96.43.117:3306
POLICY_MARIADB_PORT_3306_TCP=tcp://10.96.43.117:3306
POLICY_MARIADB_PORT_3306_TCP_ADDR=10.96.43.117
POLICY_MARIADB_PORT_3306_TCP_PORT=3306
POLICY_MARIADB_PORT_3306_TCP_PROTO=tcp
POLICY_MARIADB_SERVICE_HOST=10.96.43.117
POLICY_MARIADB_SERVICE_PORT=3306
POLICY_MARIADB_SERVICE_PORT_MYSQL=3306
POLICY_PAP_PORT=tcp://10.96.26.7:6969
POLICY_PAP_PORT_6969_TCP=tcp://10.96.26.7:6969
POLICY_PAP_PORT_6969_TCP_ADDR=10.96.26.7
POLICY_PAP_PORT_6969_TCP_PORT=6969
POLICY_PAP_PORT_6969_TCP_PROTO=tcp
POLICY_PAP_SERVICE_HOST=10.96.26.7
POLICY_PAP_SERVICE_PORT=6969
POLICY_PAP_SERVICE_PORT_HTTP_API=6969
POLICY_XACML_PDP_PORT=tcp://10.96.43.39:6969
POLICY_XACML_PDP_PORT_6969_TCP=tcp://10.96.43.39:6969
POLICY_XACML_PDP_PORT_6969_TCP_ADDR=10.96.43.39
POLICY_XACML_PDP_PORT_6969_TCP_PORT=6969
POLICY_XACML_PDP_PORT_6969_TCP_PROTO=tcp
POLICY_XACML_PDP_SERVICE_HOST=10.96.43.39
POLICY_XACML_PDP_SERVICE_PORT=6969
POLICY_XACML_PDP_SERVICE_PORT_POLICY_XACML_PDP=6969
PORTAL_APP_PORT=tcp://10.96.74.180:8443
PORTAL_APP_PORT_8443_TCP=tcp://10.96.74.180:8443
PORTAL_APP_PORT_8443_TCP_ADDR=10.96.74.180
PORTAL_APP_PORT_8443_TCP_PORT=8443
PORTAL_APP_PORT_8443_TCP_PROTO=tcp
PORTAL_APP_SERVICE_HOST=10.96.74.180
PORTAL_APP_SERVICE_PORT=8443
PORTAL_APP_SERVICE_PORT_PORTAL_APP4=8443
PORTAL_CASSANDRA_PORT=tcp://10.96.252.65:9160
PORTAL_CASSANDRA_PORT_7000_TCP=tcp://10.96.252.65:7000
PORTAL_CASSANDRA_PORT_7000_TCP_ADDR=10.96.252.65
PORTAL_CASSANDRA_PORT_7000_TCP_PORT=7000
PORTAL_CASSANDRA_PORT_7000_TCP_PROTO=tcp
PORTAL_CASSANDRA_PORT_7001_TCP=tcp://10.96.252.65:7001
PORTAL_CASSANDRA_PORT_7001_TCP_ADDR=10.96.252.65
PORTAL_CASSANDRA_PORT_7001_TCP_PORT=7001
PORTAL_CASSANDRA_PORT_7001_TCP_PROTO=tcp
PORTAL_CASSANDRA_PORT_7199_TCP=tcp://10.96.252.65:7199
PORTAL_CASSANDRA_PORT_7199_TCP_ADDR=10.96.252.65
PORTAL_CASSANDRA_PORT_7199_TCP_PORT=7199
PORTAL_CASSANDRA_PORT_7199_TCP_PROTO=tcp
PORTAL_CASSANDRA_PORT_9042_TCP=tcp://10.96.252.65:9042
PORTAL_CASSANDRA_PORT_9042_TCP_ADDR=10.96.252.65
PORTAL_CASSANDRA_PORT_9042_TCP_PORT=9042
PORTAL_CASSANDRA_PORT_9042_TCP_PROTO=tcp
PORTAL_CASSANDRA_PORT_9160_TCP=tcp://10.96.252.65:9160
PORTAL_CASSANDRA_PORT_9160_TCP_ADDR=10.96.252.65
PORTAL_CASSANDRA_PORT_9160_TCP_PORT=9160
PORTAL_CASSANDRA_PORT_9160_TCP_PROTO=tcp
PORTAL_CASSANDRA_SERVICE_HOST=10.96.252.65
PORTAL_CASSANDRA_SERVICE_PORT=9160
PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA2=7000
PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA3=7001
PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA4=7199
PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA5=9042
PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA=9160
PORTAL_DB_PORT=tcp://10.96.116.83:3306
PORTAL_DB_PORT_3306_TCP=tcp://10.96.116.83:3306
PORTAL_DB_PORT_3306_TCP_ADDR=10.96.116.83
PORTAL_DB_PORT_3306_TCP_PORT=3306
PORTAL_DB_PORT_3306_TCP_PROTO=tcp
PORTAL_DB_SERVICE_HOST=10.96.116.83
PORTAL_DB_SERVICE_PORT=3306
PORTAL_DB_SERVICE_PORT_PORTAL_DB=3306
PORTAL_SDK_PORT=tcp://10.96.67.170:8443
PORTAL_SDK_PORT_8443_TCP=tcp://10.96.67.170:8443
PORTAL_SDK_PORT_8443_TCP_ADDR=10.96.67.170
PORTAL_SDK_PORT_8443_TCP_PORT=8443
PORTAL_SDK_PORT_8443_TCP_PROTO=tcp
PORTAL_SDK_SERVICE_HOST=10.96.67.170
PORTAL_SDK_SERVICE_PORT=8443
PORTAL_SDK_SERVICE_PORT_PORTAL_SDK=8443
PORTAL_WIDGET_PORT=tcp://10.96.4.176:8082
PORTAL_WIDGET_PORT_8082_TCP=tcp://10.96.4.176:8082
PORTAL_WIDGET_PORT_8082_TCP_ADDR=10.96.4.176
PORTAL_WIDGET_PORT_8082_TCP_PORT=8082
PORTAL_WIDGET_PORT_8082_TCP_PROTO=tcp
PORTAL_WIDGET_SERVICE_HOST=10.96.4.176
PORTAL_WIDGET_SERVICE_PORT=8082
PORTAL_WIDGET_SERVICE_PORT_PORTAL_WIDGET=8082
PWD=/
PYTHON_PIP_VERSION=8.1.2
PYTHON_VERSION=2.7.9-1
ROBOT_PORT=tcp://10.96.209.205:443
ROBOT_PORT_443_TCP=tcp://10.96.209.205:443
ROBOT_PORT_443_TCP_ADDR=10.96.209.205
ROBOT_PORT_443_TCP_PORT=443
ROBOT_PORT_443_TCP_PROTO=tcp
ROBOT_SERVICE_HOST=10.96.209.205
ROBOT_SERVICE_PORT=443
ROBOT_SERVICE_PORT_HTTPD=443
SCALA_VERSION=2.12
SDC_BE_EXTERNAL_PORT=tcp://10.96.53.176:8443
SDC_BE_EXTERNAL_PORT_8443_TCP=tcp://10.96.53.176:8443
SDC_BE_EXTERNAL_PORT_8443_TCP_ADDR=10.96.53.176
SDC_BE_EXTERNAL_PORT_8443_TCP_PORT=8443
SDC_BE_EXTERNAL_PORT_8443_TCP_PROTO=tcp
SDC_BE_EXTERNAL_SERVICE_HOST=10.96.53.176
SDC_BE_EXTERNAL_SERVICE_PORT=8443
SDC_BE_EXTERNAL_SERVICE_PORT_HTTPS_API=8443
SDC_BE_PORT=tcp://10.96.177.18:8443
SDC_BE_PORT_8080_TCP=tcp://10.96.177.18:8080
SDC_BE_PORT_8080_TCP_ADDR=10.96.177.18
SDC_BE_PORT_8080_TCP_PORT=8080
SDC_BE_PORT_8080_TCP_PROTO=tcp
SDC_BE_PORT_8443_TCP=tcp://10.96.177.18:8443
SDC_BE_PORT_8443_TCP_ADDR=10.96.177.18
SDC_BE_PORT_8443_TCP_PORT=8443
SDC_BE_PORT_8443_TCP_PROTO=tcp
SDC_BE_SERVICE_HOST=10.96.177.18
SDC_BE_SERVICE_PORT=8443
SDC_BE_SERVICE_PORT_HTTPS_API=8443
SDC_BE_SERVICE_PORT_HTTP_API=8080
SDC_FE_PORT=tcp://10.96.34.187:9443
SDC_FE_PORT_9443_TCP=tcp://10.96.34.187:9443
SDC_FE_PORT_9443_TCP_ADDR=10.96.34.187
SDC_FE_PORT_9443_TCP_PORT=9443
SDC_FE_PORT_9443_TCP_PROTO=tcp
SDC_FE_SERVICE_HOST=10.96.34.187
SDC_FE_SERVICE_PORT=9443
SDC_FE_SERVICE_PORT_SDC_FE2=9443
SDC_HELM_VALIDATOR_PORT=tcp://10.96.89.176:8080
SDC_HELM_VALIDATOR_PORT_8080_TCP=tcp://10.96.89.176:8080
SDC_HELM_VALIDATOR_PORT_8080_TCP_ADDR=10.96.89.176
SDC_HELM_VALIDATOR_PORT_8080_TCP_PORT=8080
SDC_HELM_VALIDATOR_PORT_8080_TCP_PROTO=tcp
SDC_HELM_VALIDATOR_SERVICE_HOST=10.96.89.176
SDC_HELM_VALIDATOR_SERVICE_PORT=8080
SDC_HELM_VALIDATOR_SERVICE_PORT_HTTP=8080
SDC_ONBOARDING_BE_PORT=tcp://10.96.138.215:8445
SDC_ONBOARDING_BE_PORT_8081_TCP=tcp://10.96.138.215:8081
SDC_ONBOARDING_BE_PORT_8081_TCP_ADDR=10.96.138.215
SDC_ONBOARDING_BE_PORT_8081_TCP_PORT=8081
SDC_ONBOARDING_BE_PORT_8081_TCP_PROTO=tcp
SDC_ONBOARDING_BE_PORT_8445_TCP=tcp://10.96.138.215:8445
SDC_ONBOARDING_BE_PORT_8445_TCP_ADDR=10.96.138.215
SDC_ONBOARDING_BE_PORT_8445_TCP_PORT=8445
SDC_ONBOARDING_BE_PORT_8445_TCP_PROTO=tcp
SDC_ONBOARDING_BE_SERVICE_HOST=10.96.138.215
SDC_ONBOARDING_BE_SERVICE_PORT=8445
SDC_ONBOARDING_BE_SERVICE_PORT_SDC_ONBOARDING_BE2=8081
SDC_ONBOARDING_BE_SERVICE_PORT_SDC_ONBOARDING_BE=8445
SDC_WFD_BE_PORT=tcp://10.96.82.220:8443
SDC_WFD_BE_PORT_8443_TCP=tcp://10.96.82.220:8443
SDC_WFD_BE_PORT_8443_TCP_ADDR=10.96.82.220
SDC_WFD_BE_PORT_8443_TCP_PORT=8443
SDC_WFD_BE_PORT_8443_TCP_PROTO=tcp
SDC_WFD_BE_SERVICE_HOST=10.96.82.220
SDC_WFD_BE_SERVICE_PORT=8443
SDC_WFD_BE_SERVICE_PORT_SDC_WFD_BE=8443
SDC_WFD_FE_PORT=tcp://10.96.128.207:8443
SDC_WFD_FE_PORT_8443_TCP=tcp://10.96.128.207:8443
SDC_WFD_FE_PORT_8443_TCP_ADDR=10.96.128.207
SDC_WFD_FE_PORT_8443_TCP_PORT=8443
SDC_WFD_FE_PORT_8443_TCP_PROTO=tcp
SDC_WFD_FE_SERVICE_HOST=10.96.128.207
SDC_WFD_FE_SERVICE_PORT=8443
SDC_WFD_FE_SERVICE_PORT_SDC_WFD_FE=8443
SDNC_ANSIBLE_SERVER_PORT=tcp://10.96.64.27:8000
SDNC_ANSIBLE_SERVER_PORT_8000_TCP=tcp://10.96.64.27:8000
SDNC_ANSIBLE_SERVER_PORT_8000_TCP_ADDR=10.96.64.27
SDNC_ANSIBLE_SERVER_PORT_8000_TCP_PORT=8000
SDNC_ANSIBLE_SERVER_PORT_8000_TCP_PROTO=tcp
SDNC_ANSIBLE_SERVER_SERVICE_HOST=10.96.64.27
SDNC_ANSIBLE_SERVER_SERVICE_PORT=8000
SDNC_ANSIBLE_SERVER_SERVICE_PORT_SDNC_ANSIBLE_SERVER=8000
SDNC_CALLHOME_PORT=tcp://10.96.133.11:6666
SDNC_CALLHOME_PORT_6666_TCP=tcp://10.96.133.11:6666
SDNC_CALLHOME_PORT_6666_TCP_ADDR=10.96.133.11
SDNC_CALLHOME_PORT_6666_TCP_PORT=6666
SDNC_CALLHOME_PORT_6666_TCP_PROTO=tcp
SDNC_CALLHOME_SERVICE_HOST=10.96.133.11
SDNC_CALLHOME_SERVICE_PORT=6666
SDNC_CALLHOME_SERVICE_PORT_SDNC_CALLHOME=6666
SDNC_DGBUILDER_PORT=tcp://10.96.166.45:3000
SDNC_DGBUILDER_PORT_3000_TCP=tcp://10.96.166.45:3000
SDNC_DGBUILDER_PORT_3000_TCP_ADDR=10.96.166.45
SDNC_DGBUILDER_PORT_3000_TCP_PORT=3000
SDNC_DGBUILDER_PORT_3000_TCP_PROTO=tcp
SDNC_DGBUILDER_SERVICE_HOST=10.96.166.45
SDNC_DGBUILDER_SERVICE_PORT=3000
SDNC_DGBUILDER_SERVICE_PORT_DGBUILDER=3000
SDNC_OAM_PORT=tcp://10.96.85.176:8282
SDNC_OAM_PORT_8202_TCP=tcp://10.96.85.176:8202
SDNC_OAM_PORT_8202_TCP_ADDR=10.96.85.176
SDNC_OAM_PORT_8202_TCP_PORT=8202
SDNC_OAM_PORT_8202_TCP_PROTO=tcp
SDNC_OAM_PORT_8282_TCP=tcp://10.96.85.176:8282
SDNC_OAM_PORT_8282_TCP_ADDR=10.96.85.176
SDNC_OAM_PORT_8282_TCP_PORT=8282
SDNC_OAM_PORT_8282_TCP_PROTO=tcp
SDNC_OAM_SERVICE_HOST=10.96.85.176
SDNC_OAM_SERVICE_PORT=8282
SDNC_OAM_SERVICE_PORT_SDNC_KARAF=8202
SDNC_OAM_SERVICE_PORT_SDNC_RESTCONF_ALT=8282
SDNC_PORT=tcp://10.96.232.20:8443
SDNC_PORT_8443_TCP=tcp://10.96.232.20:8443
SDNC_PORT_8443_TCP_ADDR=10.96.232.20
SDNC_PORT_8443_TCP_PORT=8443
SDNC_PORT_8443_TCP_PROTO=tcp
SDNC_SERVICE_HOST=10.96.232.20
SDNC_SERVICE_PORT=8443
SDNC_SERVICE_PORT_SDNC_RESTCONF=8443
SDNC_WEB_SERVICE_PORT=tcp://10.96.212.230:8443
SDNC_WEB_SERVICE_PORT_8443_TCP=tcp://10.96.212.230:8443
SDNC_WEB_SERVICE_PORT_8443_TCP_ADDR=10.96.212.230
SDNC_WEB_SERVICE_PORT_8443_TCP_PORT=8443
SDNC_WEB_SERVICE_PORT_8443_TCP_PROTO=tcp
SDNC_WEB_SERVICE_SERVICE_HOST=10.96.212.230
SDNC_WEB_SERVICE_SERVICE_PORT=8443
SDNC_WEB_SERVICE_SERVICE_PORT_SDNC_WEB=8443
SDNRDB_PORT=tcp://10.96.30.123:9200
SDNRDB_PORT_9200_TCP=tcp://10.96.30.123:9200
SDNRDB_PORT_9200_TCP_ADDR=10.96.30.123
SDNRDB_PORT_9200_TCP_PORT=9200
SDNRDB_PORT_9200_TCP_PROTO=tcp
SDNRDB_SERVICE_HOST=10.96.30.123
SDNRDB_SERVICE_PORT=9200
SDNRDB_SERVICE_PORT_9300_TCP=tcp://10.96.70.34:9300
SDNRDB_SERVICE_PORT_9300_TCP_ADDR=10.96.70.34
SDNRDB_SERVICE_PORT_9300_TCP_PORT=9300
SDNRDB_SERVICE_PORT_9300_TCP_PROTO=tcp
SDNRDB_SERVICE_PORT_ELASTICSEARCH=9200
SDNRDB_SERVICE_SERVICE_HOST=10.96.70.34
SDNRDB_SERVICE_SERVICE_PORT=9300
SDNRDB_SERVICE_SERVICE_PORT_HTTP_TRANSPORT=9300
SHLVL=1
ZULU_OPENJDK_VERSION=8=8.38.0.13
_=/usr/bin/env
aaf_locate_url=https://aaf-locate.onap:8095
enableCadi=true
===> User
uid=1000(mrkafka) gid=0(root) groups=0(root)
===> Configuring ...
SASL is enabled.
===> Running preflight checks ... 
===> Check if /var/lib/kafka/data is writable ...
===> Check if Zookeeper is healthy ...
[main] INFO io.confluent.admin.utils.ClusterStatus - SASL is enabled. java.security.auth.login.config=/etc/kafka/secrets/jaas/kafka_server_jaas.conf
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:zookeeper.version=3.4.14-4c25d480e66aadd371de8bd2fd8da255ac140bcf, built on 03/06/2019 16:18 GMT
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:host.name=dev-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.version=1.8.0_212
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.vendor=Azul Systems, Inc.
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.class.path=/etc/confluent/docker/docker-utils.jar
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.io.tmpdir=/tmp
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.compiler=
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.name=Linux
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.arch=amd64
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.version=4.15.0-117-generic
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.name=mrkafka
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.home=/home/mrkafka
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.dir=/
[main] INFO org.apache.zookeeper.ZooKeeper - Initiating client connection, connectString=dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 sessionTimeout=40000 watcher=io.confluent.admin.utils.ZookeeperConnectionWatcher@30dae81
[main-SendThread(dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.Login - Client successfully logged in.
[main-SendThread(dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.client.ZooKeeperSaslClient - Client will use DIGEST-MD5 as SASL mechanism.
[main-SendThread(dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.ClientCnxn - Opening socket connection to server dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.242.153.218:2181. Will attempt to SASL-authenticate using Login Context section 'Client'
[main-SendThread(dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.ClientCnxn - Socket connection established to dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.242.153.218:2181, initiating session
[main-SendThread(dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.ClientCnxn - Session establishment complete on server dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.242.153.218:2181, sessionid = 0x200001c5b5f0000, negotiated timeout = 40000
[main] INFO org.apache.zookeeper.ZooKeeper - Session: 0x200001c5b5f0000 closed
[main-EventThread] INFO org.apache.zookeeper.ClientCnxn - EventThread shut down for session: 0x200001c5b5f0000
===> Launching ... 
===> Launching kafka ... 
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/share/java/kafka/slf4j-log4j12-1.7.26.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/share/java/kafka/kafka11aaf-jar-with-dependencies.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
[2021-06-10 10:59:21,747] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)
[2021-06-10 10:59:22,217] INFO KafkaConfig values: 
	advertised.host.name = null
	advertised.listeners = EXTERNAL_SASL_PLAINTEXT://172.16.10.103:30490,INTERNAL_SASL_PLAINTEXT://:9092
	advertised.port = null
	alter.config.policy.class.name = null
	alter.log.dirs.replication.quota.window.num = 11
	alter.log.dirs.replication.quota.window.size.seconds = 1
	authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer
	auto.create.topics.enable = true
	auto.leader.rebalance.enable = true
	background.threads = 10
	broker.id = 0
	broker.id.generation.enable = true
	broker.rack = null
	client.quota.callback.class = null
	compression.type = producer
	connection.failed.authentication.delay.ms = 100
	connections.max.idle.ms = 600000
	connections.max.reauth.ms = 0
	control.plane.listener.name = null
	controlled.shutdown.enable = true
	controlled.shutdown.max.retries = 3
	controlled.shutdown.retry.backoff.ms = 5000
	controller.socket.timeout.ms = 30000
	create.topic.policy.class.name = null
	default.replication.factor = 3
	delegation.token.expiry.check.interval.ms = 3600000
	delegation.token.expiry.time.ms = 86400000
	delegation.token.master.key = null
	delegation.token.max.lifetime.ms = 604800000
	delete.records.purgatory.purge.interval.requests = 1
	delete.topic.enable = true
	fetch.purgatory.purge.interval.requests = 1000
	group.initial.rebalance.delay.ms = 3000
	group.max.session.timeout.ms = 1800000
	group.max.size = 2147483647
	group.min.session.timeout.ms = 6000
	host.name = 
	inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT
	inter.broker.protocol.version = 2.3-IV1
	kafka.metrics.polling.interval.secs = 10
	kafka.metrics.reporters = []
	leader.imbalance.check.interval.seconds = 300
	leader.imbalance.per.broker.percentage = 10
	listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT
	listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092
	log.cleaner.backoff.ms = 15000
	log.cleaner.dedupe.buffer.size = 134217728
	log.cleaner.delete.retention.ms = 86400000
	log.cleaner.enable = true
	log.cleaner.io.buffer.load.factor = 0.9
	log.cleaner.io.buffer.size = 524288
	log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
	log.cleaner.max.compaction.lag.ms = 9223372036854775807
	log.cleaner.min.cleanable.ratio = 0.5
	log.cleaner.min.compaction.lag.ms = 0
	log.cleaner.threads = 1
	log.cleanup.policy = [delete]
	log.dir = /tmp/kafka-logs
	log.dirs = /var/lib/kafka/data
	log.flush.interval.messages = 9223372036854775807
	log.flush.interval.ms = null
	log.flush.offset.checkpoint.interval.ms = 60000
	log.flush.scheduler.interval.ms = 9223372036854775807
	log.flush.start.offset.checkpoint.interval.ms = 60000
	log.index.interval.bytes = 4096
	log.index.size.max.bytes = 10485760
	log.message.downconversion.enable = true
	log.message.format.version = 2.3-IV1
	log.message.timestamp.difference.max.ms = 9223372036854775807
	log.message.timestamp.type = CreateTime
	log.preallocate = false
	log.retention.bytes = -1
	log.retention.check.interval.ms = 300000
	log.retention.hours = 168
	log.retention.minutes = null
	log.retention.ms = null
	log.roll.hours = 168
	log.roll.jitter.hours = 0
	log.roll.jitter.ms = null
	log.roll.ms = null
	log.segment.bytes = 1073741824
	log.segment.delete.delay.ms = 60000
	max.connections = 2147483647
	max.connections.per.ip = 2147483647
	max.connections.per.ip.overrides = 
	max.incremental.fetch.session.cache.slots = 1000
	message.max.bytes = 1000012
	metric.reporters = []
	metrics.num.samples = 2
	metrics.recording.level = INFO
	metrics.sample.window.ms = 30000
	min.insync.replicas = 1
	num.io.threads = 8
	num.network.threads = 3
	num.partitions = 3
	num.recovery.threads.per.data.dir = 5
	num.replica.alter.log.dirs.threads = null
	num.replica.fetchers = 1
	offset.metadata.max.bytes = 4096
	offsets.commit.required.acks = -1
	offsets.commit.timeout.ms = 5000
	offsets.load.buffer.size = 5242880
	offsets.retention.check.interval.ms = 600000
	offsets.retention.minutes = 10080
	offsets.topic.compression.codec = 0
	offsets.topic.num.partitions = 50
	offsets.topic.replication.factor = 3
	offsets.topic.segment.bytes = 104857600
	password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
	password.encoder.iterations = 4096
	password.encoder.key.length = 128
	password.encoder.keyfactory.algorithm = null
	password.encoder.old.secret = null
	password.encoder.secret = null
	port = 9092
	principal.builder.class = null
	producer.purgatory.purge.interval.requests = 1000
	queued.max.request.bytes = -1
	queued.max.requests = 500
	quota.consumer.default = 9223372036854775807
	quota.producer.default = 9223372036854775807
	quota.window.num = 11
	quota.window.size.seconds = 1
	replica.fetch.backoff.ms = 1000
	replica.fetch.max.bytes = 1048576
	replica.fetch.min.bytes = 1
	replica.fetch.response.max.bytes = 10485760
	replica.fetch.wait.max.ms = 500
	replica.high.watermark.checkpoint.interval.ms = 5000
	replica.lag.time.max.ms = 10000
	replica.socket.receive.buffer.bytes = 65536
	replica.socket.timeout.ms = 30000
	replication.quota.window.num = 11
	replication.quota.window.size.seconds = 1
	request.timeout.ms = 30000
	reserved.broker.max.id = 1000
	sasl.client.callback.handler.class = null
	sasl.enabled.mechanisms = [PLAIN]
	sasl.jaas.config = null
	sasl.kerberos.kinit.cmd = /usr/bin/kinit
	sasl.kerberos.min.time.before.relogin = 60000
	sasl.kerberos.principal.to.local.rules = [DEFAULT]
	sasl.kerberos.service.name = null
	sasl.kerberos.ticket.renew.jitter = 0.05
	sasl.kerberos.ticket.renew.window.factor = 0.8
	sasl.login.callback.handler.class = null
	sasl.login.class = null
	sasl.login.refresh.buffer.seconds = 300
	sasl.login.refresh.min.period.seconds = 60
	sasl.login.refresh.window.factor = 0.8
	sasl.login.refresh.window.jitter = 0.05
	sasl.mechanism.inter.broker.protocol = PLAIN
	sasl.server.callback.handler.class = null
	security.inter.broker.protocol = PLAINTEXT
	socket.receive.buffer.bytes = 102400
	socket.request.max.bytes = 104857600
	socket.send.buffer.bytes = 102400
	ssl.cipher.suites = []
	ssl.client.auth = none
	ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
	ssl.endpoint.identification.algorithm = https
	ssl.key.password = null
	ssl.keymanager.algorithm = SunX509
	ssl.keystore.location = null
	ssl.keystore.password = null
	ssl.keystore.type = JKS
	ssl.principal.mapping.rules = [DEFAULT]
	ssl.protocol = TLS
	ssl.provider = null
	ssl.secure.random.implementation = null
	ssl.trustmanager.algorithm = PKIX
	ssl.truststore.location = null
	ssl.truststore.password = null
	ssl.truststore.type = JKS
	transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
	transaction.max.timeout.ms = 900000
	transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
	transaction.state.log.load.buffer.size = 5242880
	transaction.state.log.min.isr = 1
	transaction.state.log.num.partitions = 50
	transaction.state.log.replication.factor = 1
	transaction.state.log.segment.bytes = 104857600
	transactional.id.expiration.ms = 604800000
	unclean.leader.election.enable = false
	zookeeper.connect = dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181
	zookeeper.connection.timeout.ms = 6000
	zookeeper.max.in.flight.requests = 10
	zookeeper.session.timeout.ms = 6000
	zookeeper.set.acl = true
	zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig)
[2021-06-10 10:59:22,313] WARN The package io.confluent.support.metrics.collectors.FullCollector for collecting the full set of support metrics could not be loaded, so we are reverting to anonymous, basic metric collection. If you are a Confluent customer, please refer to the Confluent Platform documentation, section Proactive Support, on how to activate full metrics collection. (io.confluent.support.metrics.KafkaSupportConfig)
[2021-06-10 10:59:22,314] WARN The support metrics collection feature ("Metrics") of Proactive Support is disabled. (io.confluent.support.metrics.SupportedServerStartable)
[2021-06-10 10:59:22,317] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler)
[2021-06-10 10:59:22,318] INFO starting (kafka.server.KafkaServer)
[2021-06-10 10:59:22,319] INFO Connecting to zookeeper on dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 (kafka.server.KafkaServer)
[2021-06-10 10:59:22,347] INFO [ZooKeeperClient Kafka server] Initializing a new session to dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181. (kafka.zookeeper.ZooKeeperClient)
[2021-06-10 10:59:22,353] INFO Client environment:zookeeper.version=3.4.14-4c25d480e66aadd371de8bd2fd8da255ac140bcf, built on 03/06/2019 16:18 GMT (org.apache.zookeeper.ZooKeeper)
[2021-06-10 10:59:22,353] INFO Client environment:host.name=dev-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local (org.apache.zookeeper.ZooKeeper)
[2021-06-10 10:59:22,353] INFO Client environment:java.version=1.8.0_212 (org.apache.zookeeper.ZooKeeper)
[2021-06-10 10:59:22,353] INFO Client environment:java.vendor=Azul Systems, Inc. (org.apache.zookeeper.ZooKeeper)
[2021-06-10 10:59:22,353] INFO Client environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre (org.apache.zookeeper.ZooKeeper)
[2021-06-10 10:59:22,354] INFO Client environment:java.class.path=/usr/bin/../share/java/kafka/audience-annotations-0.5.0.jar:/usr/bin/../share/java/kafka/jetty-util-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/javax.servlet-api-3.1.0.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-2.28.jar:/usr/bin/../share/java/kafka/jersey-media-jaxb-2.28.jar:/usr/bin/../share/java/kafka/lz4-java-1.6.0.jar:/usr/bin/../share/java/kafka/jetty-io-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/commons-compress-1.8.1.jar:/usr/bin/../share/java/kafka/httpmime-4.5.7.jar:/usr/bin/../share/java/kafka/connect-json-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jackson-annotations-2.9.9.jar:/usr/bin/../share/java/kafka/paranamer-2.8.jar:/usr/bin/../share/java/kafka/scala-logging_2.11-3.9.0.jar:/usr/bin/../share/java/kafka/jackson-databind-2.9.9.3.jar:/usr/bin/../share/java/kafka/kafka-streams-examples-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-base-2.9.9.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-core-2.28.jar:/usr/bin/../share/java/kafka/jackson-datatype-jdk8-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-json-provider-2.9.9.jar:/usr/bin/../share/java/kafka/reflections-0.9.11.jar:/usr/bin/../share/java/kafka/jackson-mapper-asl-1.9.13.jar:/usr/bin/../share/java/kafka/paranamer-2.7.jar:/usr/bin/../share/java/kafka/slf4j-api-1.7.26.jar:/usr/bin/../share/java/kafka/zookeeper-3.4.14.jar:/usr/bin/../share/java/kafka/xz-1.5.jar:/usr/bin/../share/java/kafka/httpcore-4.4.11.jar:/usr/bin/../share/java/kafka/metrics-core-2.2.0.jar:/usr/bin/../share/java/kafka/jetty-servlet-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-server-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/commons-lang3-3.8.1.jar:/usr/bin/../share/java/kafka/osgi-resource-locator-1.0.1.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-api-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/log4j-1.2.17.jar:/usr/bin/../share/java/kafka/jersey-common-2.28.jar:/usr/bin/../share/java/kafka/hk2-locator-2.5.0.jar:/usr/bin/../share/java/kafka/kafka-clients-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/commons-logging-1.2.jar:/usr/bin/../share/java/kafka/jackson-core-asl-1.9.13.jar:/usr/bin/../share/java/kafka/jakarta.ws.rs-api-2.1.5.jar:/usr/bin/../share/java/kafka/jopt-simple-5.0.4.jar:/usr/bin/../share/java/kafka/connect-file-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jersey-client-2.28.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-test-sources.jar:/usr/bin/../share/java/kafka/plexus-utils-3.2.0.jar:/usr/bin/../share/java/kafka/kafka-streams-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jetty-client-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jackson-core-2.9.9.jar:/usr/bin/../share/java/kafka/jsr305-3.0.2.jar:/usr/bin/../share/java/kafka/commons-codec-1.11.jar:/usr/bin/../share/java/kafka/javassist-3.22.0-CR2.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-sources.jar:/usr/bin/../share/java/kafka/zstd-jni-1.4.0-1.jar:/usr/bin/../share/java/kafka/connect-runtime-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jersey-server-2.28.jar:/usr/bin/../share/java/kafka/kafka-log4j-appender-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/httpclient-4.5.7.jar:/usr/bin/../share/java/kafka/support-metrics-client-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/maven-artifact-3.6.1.jar:/usr/bin/../share/java/kafka/avro-1.8.1.jar:/usr/bin/../share/java/kafka/aopalliance-repackaged-2.5.0.jar:/usr/bin/../share/java/kafka/jakarta.inject-2.5.0.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-javadoc.jar:/usr/bin/../share/java/kafka/slf4j-log4j12-1.7.26.jar:/usr/bin/../share/java/kafka/jakarta.annotation-api-1.3.4.jar:/usr/bin/../share/java/kafka/spotbugs-annotations-3.1.9.jar:/usr/bin/../share/java/kafka/jetty-servlets-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/snappy-java-1.1.7.3.jar:/usr/bin/../share/java/kafka/jackson-module-scala_2.11-2.9.9.jar:/usr/bin/../share/java/kafka/javax.ws.rs-api-2.1.1.jar:/usr/bin/../share/java/kafka/hk2-api-2.5.0.jar:/usr/bin/../share/java/kafka/hk2-utils-2.5.0.jar:/usr/bin/../share/java/kafka/support-metrics-common-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jetty-continuation-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/kafka-streams-test-utils-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jersey-hk2-2.28.jar:/usr/bin/../share/java/kafka/guava-20.0.jar:/usr/bin/../share/java/kafka/jackson-module-paranamer-2.9.9.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-scaladoc.jar:/usr/bin/../share/java/kafka/activation-1.1.1.jar:/usr/bin/../share/java/kafka/jetty-http-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/kafka-streams-scala_2.11-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jetty-security-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/kafka.jar:/usr/bin/../share/java/kafka/connect-transforms-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jackson-dataformat-csv-2.9.9.jar:/usr/bin/../share/java/kafka/jaxb-api-2.3.0.jar:/usr/bin/../share/java/kafka/scala-library-2.11.12.jar:/usr/bin/../share/java/kafka/validation-api-2.0.1.Final.jar:/usr/bin/../share/java/kafka/kafka-tools-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/jackson-module-jaxb-annotations-2.9.9.jar:/usr/bin/../share/java/kafka/rocksdbjni-5.18.3.jar:/usr/bin/../share/java/kafka/connect-basic-auth-extension-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/scala-reflect-2.11.12.jar:/usr/bin/../share/java/kafka/argparse4j-0.7.0.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-test.jar:/usr/bin/../share/java/kafka/zkclient-0.11.jar:/usr/bin/../share/java/kafka/kafka11aaf-jar-with-dependencies.jar:/usr/bin/../support-metrics-client/build/dependant-libs-2.12/*:/usr/bin/../support-metrics-client/build/libs/*:/usr/share/java/support-metrics-client/* (org.apache.zookeeper.ZooKeeper)
[2021-06-10 10:59:22,354] INFO Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper)
[2021-06-10 10:59:22,354] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper)
[2021-06-10 10:59:22,354] INFO Client environment:java.compiler= (org.apache.zookeeper.ZooKeeper)
[2021-06-10 10:59:22,354] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper)
[2021-06-10 10:59:22,354] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper)
[2021-06-10 10:59:22,354] INFO Client environment:os.version=4.15.0-117-generic (org.apache.zookeeper.ZooKeeper)
[2021-06-10 10:59:22,354] INFO Client environment:user.name=mrkafka (org.apache.zookeeper.ZooKeeper)
[2021-06-10 10:59:22,354] INFO Client environment:user.home=/home/mrkafka (org.apache.zookeeper.ZooKeeper)
[2021-06-10 10:59:22,354] INFO Client environment:user.dir=/ (org.apache.zookeeper.ZooKeeper)
[2021-06-10 10:59:22,355] INFO Initiating client connection, connectString=dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 sessionTimeout=6000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@28701274 (org.apache.zookeeper.ZooKeeper)
[2021-06-10 10:59:22,369] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)
[2021-06-10 10:59:22,385] INFO Client successfully logged in. (org.apache.zookeeper.Login)
[2021-06-10 10:59:22,387] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient)
[2021-06-10 10:59:22,424] INFO Opening socket connection to server dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.242.153.218:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn)
[2021-06-10 10:59:22,430] INFO Socket connection established to dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.242.153.218:2181, initiating session (org.apache.zookeeper.ClientCnxn)
[2021-06-10 10:59:22,441] INFO Session establishment complete on server dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.242.153.218:2181, sessionid = 0x200001c5b5f0003, negotiated timeout = 6000 (org.apache.zookeeper.ClientCnxn)
[2021-06-10 10:59:22,445] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient)
[2021-06-10 10:59:22,905] INFO Cluster ID = P3Yzo-7gQzaXtwUehK5FWw (kafka.server.KafkaServer)
[2021-06-10 10:59:22,909] WARN No meta.properties file under dir /var/lib/kafka/data/meta.properties (kafka.server.BrokerMetadataCheckpoint)
[2021-06-10 10:59:23,008] INFO KafkaConfig values: 
	advertised.host.name = null
	advertised.listeners = EXTERNAL_SASL_PLAINTEXT://172.16.10.103:30490,INTERNAL_SASL_PLAINTEXT://:9092
	advertised.port = null
	alter.config.policy.class.name = null
	alter.log.dirs.replication.quota.window.num = 11
	alter.log.dirs.replication.quota.window.size.seconds = 1
	authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer
	auto.create.topics.enable = true
	auto.leader.rebalance.enable = true
	background.threads = 10
	broker.id = 0
	broker.id.generation.enable = true
	broker.rack = null
	client.quota.callback.class = null
	compression.type = producer
	connection.failed.authentication.delay.ms = 100
	connections.max.idle.ms = 600000
	connections.max.reauth.ms = 0
	control.plane.listener.name = null
	controlled.shutdown.enable = true
	controlled.shutdown.max.retries = 3
	controlled.shutdown.retry.backoff.ms = 5000
	controller.socket.timeout.ms = 30000
	create.topic.policy.class.name = null
	default.replication.factor = 3
	delegation.token.expiry.check.interval.ms = 3600000
	delegation.token.expiry.time.ms = 86400000
	delegation.token.master.key = null
	delegation.token.max.lifetime.ms = 604800000
	delete.records.purgatory.purge.interval.requests = 1
	delete.topic.enable = true
	fetch.purgatory.purge.interval.requests = 1000
	group.initial.rebalance.delay.ms = 3000
	group.max.session.timeout.ms = 1800000
	group.max.size = 2147483647
	group.min.session.timeout.ms = 6000
	host.name = 
	inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT
	inter.broker.protocol.version = 2.3-IV1
	kafka.metrics.polling.interval.secs = 10
	kafka.metrics.reporters = []
	leader.imbalance.check.interval.seconds = 300
	leader.imbalance.per.broker.percentage = 10
	listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT
	listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092
	log.cleaner.backoff.ms = 15000
	log.cleaner.dedupe.buffer.size = 134217728
	log.cleaner.delete.retention.ms = 86400000
	log.cleaner.enable = true
	log.cleaner.io.buffer.load.factor = 0.9
	log.cleaner.io.buffer.size = 524288
	log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
	log.cleaner.max.compaction.lag.ms = 9223372036854775807
	log.cleaner.min.cleanable.ratio = 0.5
	log.cleaner.min.compaction.lag.ms = 0
	log.cleaner.threads = 1
	log.cleanup.policy = [delete]
	log.dir = /tmp/kafka-logs
	log.dirs = /var/lib/kafka/data
	log.flush.interval.messages = 9223372036854775807
	log.flush.interval.ms = null
	log.flush.offset.checkpoint.interval.ms = 60000
	log.flush.scheduler.interval.ms = 9223372036854775807
	log.flush.start.offset.checkpoint.interval.ms = 60000
	log.index.interval.bytes = 4096
	log.index.size.max.bytes = 10485760
	log.message.downconversion.enable = true
	log.message.format.version = 2.3-IV1
	log.message.timestamp.difference.max.ms = 9223372036854775807
	log.message.timestamp.type = CreateTime
	log.preallocate = false
	log.retention.bytes = -1
	log.retention.check.interval.ms = 300000
	log.retention.hours = 168
	log.retention.minutes = null
	log.retention.ms = null
	log.roll.hours = 168
	log.roll.jitter.hours = 0
	log.roll.jitter.ms = null
	log.roll.ms = null
	log.segment.bytes = 1073741824
	log.segment.delete.delay.ms = 60000
	max.connections = 2147483647
	max.connections.per.ip = 2147483647
	max.connections.per.ip.overrides = 
	max.incremental.fetch.session.cache.slots = 1000
	message.max.bytes = 1000012
	metric.reporters = []
	metrics.num.samples = 2
	metrics.recording.level = INFO
	metrics.sample.window.ms = 30000
	min.insync.replicas = 1
	num.io.threads = 8
	num.network.threads = 3
	num.partitions = 3
	num.recovery.threads.per.data.dir = 5
	num.replica.alter.log.dirs.threads = null
	num.replica.fetchers = 1
	offset.metadata.max.bytes = 4096
	offsets.commit.required.acks = -1
	offsets.commit.timeout.ms = 5000
	offsets.load.buffer.size = 5242880
	offsets.retention.check.interval.ms = 600000
	offsets.retention.minutes = 10080
	offsets.topic.compression.codec = 0
	offsets.topic.num.partitions = 50
	offsets.topic.replication.factor = 3
	offsets.topic.segment.bytes = 104857600
	password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
	password.encoder.iterations = 4096
	password.encoder.key.length = 128
	password.encoder.keyfactory.algorithm = null
	password.encoder.old.secret = null
	password.encoder.secret = null
	port = 9092
	principal.builder.class = null
	producer.purgatory.purge.interval.requests = 1000
	queued.max.request.bytes = -1
	queued.max.requests = 500
	quota.consumer.default = 9223372036854775807
	quota.producer.default = 9223372036854775807
	quota.window.num = 11
	quota.window.size.seconds = 1
	replica.fetch.backoff.ms = 1000
	replica.fetch.max.bytes = 1048576
	replica.fetch.min.bytes = 1
	replica.fetch.response.max.bytes = 10485760
	replica.fetch.wait.max.ms = 500
	replica.high.watermark.checkpoint.interval.ms = 5000
	replica.lag.time.max.ms = 10000
	replica.socket.receive.buffer.bytes = 65536
	replica.socket.timeout.ms = 30000
	replication.quota.window.num = 11
	replication.quota.window.size.seconds = 1
	request.timeout.ms = 30000
	reserved.broker.max.id = 1000
	sasl.client.callback.handler.class = null
	sasl.enabled.mechanisms = [PLAIN]
	sasl.jaas.config = null
	sasl.kerberos.kinit.cmd = /usr/bin/kinit
	sasl.kerberos.min.time.before.relogin = 60000
	sasl.kerberos.principal.to.local.rules = [DEFAULT]
	sasl.kerberos.service.name = null
	sasl.kerberos.ticket.renew.jitter = 0.05
	sasl.kerberos.ticket.renew.window.factor = 0.8
	sasl.login.callback.handler.class = null
	sasl.login.class = null
	sasl.login.refresh.buffer.seconds = 300
	sasl.login.refresh.min.period.seconds = 60
	sasl.login.refresh.window.factor = 0.8
	sasl.login.refresh.window.jitter = 0.05
	sasl.mechanism.inter.broker.protocol = PLAIN
	sasl.server.callback.handler.class = null
	security.inter.broker.protocol = PLAINTEXT
	socket.receive.buffer.bytes = 102400
	socket.request.max.bytes = 104857600
	socket.send.buffer.bytes = 102400
	ssl.cipher.suites = []
	ssl.client.auth = none
	ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
	ssl.endpoint.identification.algorithm = https
	ssl.key.password = null
	ssl.keymanager.algorithm = SunX509
	ssl.keystore.location = null
	ssl.keystore.password = null
	ssl.keystore.type = JKS
	ssl.principal.mapping.rules = [DEFAULT]
	ssl.protocol = TLS
	ssl.provider = null
	ssl.secure.random.implementation = null
	ssl.trustmanager.algorithm = PKIX
	ssl.truststore.location = null
	ssl.truststore.password = null
	ssl.truststore.type = JKS
	transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
	transaction.max.timeout.ms = 900000
	transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
	transaction.state.log.load.buffer.size = 5242880
	transaction.state.log.min.isr = 1
	transaction.state.log.num.partitions = 50
	transaction.state.log.replication.factor = 1
	transaction.state.log.segment.bytes = 104857600
	transactional.id.expiration.ms = 604800000
	unclean.leader.election.enable = false
	zookeeper.connect = dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181
	zookeeper.connection.timeout.ms = 6000
	zookeeper.max.in.flight.requests = 10
	zookeeper.session.timeout.ms = 6000
	zookeeper.set.acl = true
	zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig)
[2021-06-10 10:59:23,026] INFO KafkaConfig values: 
	advertised.host.name = null
	advertised.listeners = EXTERNAL_SASL_PLAINTEXT://172.16.10.103:30490,INTERNAL_SASL_PLAINTEXT://:9092
	advertised.port = null
	alter.config.policy.class.name = null
	alter.log.dirs.replication.quota.window.num = 11
	alter.log.dirs.replication.quota.window.size.seconds = 1
	authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer
	auto.create.topics.enable = true
	auto.leader.rebalance.enable = true
	background.threads = 10
	broker.id = 0
	broker.id.generation.enable = true
	broker.rack = null
	client.quota.callback.class = null
	compression.type = producer
	connection.failed.authentication.delay.ms = 100
	connections.max.idle.ms = 600000
	connections.max.reauth.ms = 0
	control.plane.listener.name = null
	controlled.shutdown.enable = true
	controlled.shutdown.max.retries = 3
	controlled.shutdown.retry.backoff.ms = 5000
	controller.socket.timeout.ms = 30000
	create.topic.policy.class.name = null
	default.replication.factor = 3
	delegation.token.expiry.check.interval.ms = 3600000
	delegation.token.expiry.time.ms = 86400000
	delegation.token.master.key = null
	delegation.token.max.lifetime.ms = 604800000
	delete.records.purgatory.purge.interval.requests = 1
	delete.topic.enable = true
	fetch.purgatory.purge.interval.requests = 1000
	group.initial.rebalance.delay.ms = 3000
	group.max.session.timeout.ms = 1800000
	group.max.size = 2147483647
	group.min.session.timeout.ms = 6000
	host.name = 
	inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT
	inter.broker.protocol.version = 2.3-IV1
	kafka.metrics.polling.interval.secs = 10
	kafka.metrics.reporters = []
	leader.imbalance.check.interval.seconds = 300
	leader.imbalance.per.broker.percentage = 10
	listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT
	listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092
	log.cleaner.backoff.ms = 15000
	log.cleaner.dedupe.buffer.size = 134217728
	log.cleaner.delete.retention.ms = 86400000
	log.cleaner.enable = true
	log.cleaner.io.buffer.load.factor = 0.9
	log.cleaner.io.buffer.size = 524288
	log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
	log.cleaner.max.compaction.lag.ms = 9223372036854775807
	log.cleaner.min.cleanable.ratio = 0.5
	log.cleaner.min.compaction.lag.ms = 0
	log.cleaner.threads = 1
	log.cleanup.policy = [delete]
	log.dir = /tmp/kafka-logs
	log.dirs = /var/lib/kafka/data
	log.flush.interval.messages = 9223372036854775807
	log.flush.interval.ms = null
	log.flush.offset.checkpoint.interval.ms = 60000
	log.flush.scheduler.interval.ms = 9223372036854775807
	log.flush.start.offset.checkpoint.interval.ms = 60000
	log.index.interval.bytes = 4096
	log.index.size.max.bytes = 10485760
	log.message.downconversion.enable = true
	log.message.format.version = 2.3-IV1
	log.message.timestamp.difference.max.ms = 9223372036854775807
	log.message.timestamp.type = CreateTime
	log.preallocate = false
	log.retention.bytes = -1
	log.retention.check.interval.ms = 300000
	log.retention.hours = 168
	log.retention.minutes = null
	log.retention.ms = null
	log.roll.hours = 168
	log.roll.jitter.hours = 0
	log.roll.jitter.ms = null
	log.roll.ms = null
	log.segment.bytes = 1073741824
	log.segment.delete.delay.ms = 60000
	max.connections = 2147483647
	max.connections.per.ip = 2147483647
	max.connections.per.ip.overrides = 
	max.incremental.fetch.session.cache.slots = 1000
	message.max.bytes = 1000012
	metric.reporters = []
	metrics.num.samples = 2
	metrics.recording.level = INFO
	metrics.sample.window.ms = 30000
	min.insync.replicas = 1
	num.io.threads = 8
	num.network.threads = 3
	num.partitions = 3
	num.recovery.threads.per.data.dir = 5
	num.replica.alter.log.dirs.threads = null
	num.replica.fetchers = 1
	offset.metadata.max.bytes = 4096
	offsets.commit.required.acks = -1
	offsets.commit.timeout.ms = 5000
	offsets.load.buffer.size = 5242880
	offsets.retention.check.interval.ms = 600000
	offsets.retention.minutes = 10080
	offsets.topic.compression.codec = 0
	offsets.topic.num.partitions = 50
	offsets.topic.replication.factor = 3
	offsets.topic.segment.bytes = 104857600
	password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
	password.encoder.iterations = 4096
	password.encoder.key.length = 128
	password.encoder.keyfactory.algorithm = null
	password.encoder.old.secret = null
	password.encoder.secret = null
	port = 9092
	principal.builder.class = null
	producer.purgatory.purge.interval.requests = 1000
	queued.max.request.bytes = -1
	queued.max.requests = 500
	quota.consumer.default = 9223372036854775807
	quota.producer.default = 9223372036854775807
	quota.window.num = 11
	quota.window.size.seconds = 1
	replica.fetch.backoff.ms = 1000
	replica.fetch.max.bytes = 1048576
	replica.fetch.min.bytes = 1
	replica.fetch.response.max.bytes = 10485760
	replica.fetch.wait.max.ms = 500
	replica.high.watermark.checkpoint.interval.ms = 5000
	replica.lag.time.max.ms = 10000
	replica.socket.receive.buffer.bytes = 65536
	replica.socket.timeout.ms = 30000
	replication.quota.window.num = 11
	replication.quota.window.size.seconds = 1
	request.timeout.ms = 30000
	reserved.broker.max.id = 1000
	sasl.client.callback.handler.class = null
	sasl.enabled.mechanisms = [PLAIN]
	sasl.jaas.config = null
	sasl.kerberos.kinit.cmd = /usr/bin/kinit
	sasl.kerberos.min.time.before.relogin = 60000
	sasl.kerberos.principal.to.local.rules = [DEFAULT]
	sasl.kerberos.service.name = null
	sasl.kerberos.ticket.renew.jitter = 0.05
	sasl.kerberos.ticket.renew.window.factor = 0.8
	sasl.login.callback.handler.class = null
	sasl.login.class = null
	sasl.login.refresh.buffer.seconds = 300
	sasl.login.refresh.min.period.seconds = 60
	sasl.login.refresh.window.factor = 0.8
	sasl.login.refresh.window.jitter = 0.05
	sasl.mechanism.inter.broker.protocol = PLAIN
	sasl.server.callback.handler.class = null
	security.inter.broker.protocol = PLAINTEXT
	socket.receive.buffer.bytes = 102400
	socket.request.max.bytes = 104857600
	socket.send.buffer.bytes = 102400
	ssl.cipher.suites = []
	ssl.client.auth = none
	ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
	ssl.endpoint.identification.algorithm = https
	ssl.key.password = null
	ssl.keymanager.algorithm = SunX509
	ssl.keystore.location = null
	ssl.keystore.password = null
	ssl.keystore.type = JKS
	ssl.principal.mapping.rules = [DEFAULT]
	ssl.protocol = TLS
	ssl.provider = null
	ssl.secure.random.implementation = null
	ssl.trustmanager.algorithm = PKIX
	ssl.truststore.location = null
	ssl.truststore.password = null
	ssl.truststore.type = JKS
	transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
	transaction.max.timeout.ms = 900000
	transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
	transaction.state.log.load.buffer.size = 5242880
	transaction.state.log.min.isr = 1
	transaction.state.log.num.partitions = 50
	transaction.state.log.replication.factor = 1
	transaction.state.log.segment.bytes = 104857600
	transactional.id.expiration.ms = 604800000
	unclean.leader.election.enable = false
	zookeeper.connect = dev-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181
	zookeeper.connection.timeout.ms = 6000
	zookeeper.max.in.flight.requests = 10
	zookeeper.session.timeout.ms = 6000
	zookeeper.set.acl = true
	zookeeper.sync.time.ms = 2000
 (kafka.server.KafkaConfig)
[2021-06-10 10:59:23,056] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-06-10 10:59:23,056] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-06-10 10:59:23,057] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper)
[2021-06-10 10:59:23,096] INFO Loading logs. (kafka.log.LogManager)
[2021-06-10 10:59:23,105] INFO Logs loading complete in 9 ms. (kafka.log.LogManager)
[2021-06-10 10:59:23,122] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager)
[2021-06-10 10:59:23,125] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager)
[2021-06-10 10:59:23,129] INFO Starting the log cleaner (kafka.log.LogCleaner)
[2021-06-10 10:59:23,436] INFO [kafka-log-cleaner-thread-0]: Starting (kafka.log.LogCleaner)
[2021-06-10 10:59:23,844] INFO Awaiting socket connections on 0.0.0.0:9091. (kafka.network.Acceptor)
[2021-06-10 10:59:23,862] INFO Successfully logged in. (org.apache.kafka.common.security.authenticator.AbstractLogin)
[2021-06-10 10:59:23,887] INFO [SocketServer brokerId=0] Created data-plane acceptor and processors for endpoint : EndPoint(0.0.0.0,9091,ListenerName(EXTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT) (kafka.network.SocketServer)
[2021-06-10 10:59:23,887] INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.Acceptor)
[2021-06-10 10:59:23,901] INFO [SocketServer brokerId=0] Created data-plane acceptor and processors for endpoint : EndPoint(0.0.0.0,9092,ListenerName(INTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT) (kafka.network.SocketServer)
[2021-06-10 10:59:23,903] INFO [SocketServer brokerId=0] Started 2 acceptor threads for data-plane (kafka.network.SocketServer)
[2021-06-10 10:59:23,925] INFO [ExpirationReaper-0-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-06-10 10:59:23,927] INFO [ExpirationReaper-0-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-06-10 10:59:23,929] INFO [ExpirationReaper-0-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-06-10 10:59:23,930] INFO [ExpirationReaper-0-ElectPreferredLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-06-10 10:59:23,950] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler)
[2021-06-10 10:59:24,038] INFO Creating /brokers/ids/0 (is it secure? true) (kafka.zk.KafkaZkClient)
[2021-06-10 10:59:24,057] INFO Stat of the created znode at /brokers/ids/0 is: 12884901950,12884901950,1623322764049,1623322764049,1,0,0,144115309867892739,366,0,12884901950
 (kafka.zk.KafkaZkClient)
[2021-06-10 10:59:24,058] INFO Registered broker 0 at path /brokers/ids/0 with addresses: ArrayBuffer(EndPoint(172.16.10.103,30490,ListenerName(EXTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT), EndPoint(dev-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local,9092,ListenerName(INTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT)), czxid (broker epoch): 12884901950 (kafka.zk.KafkaZkClient)
[2021-06-10 10:59:24,059] WARN No meta.properties file under dir /var/lib/kafka/data/meta.properties (kafka.server.BrokerMetadataCheckpoint)
[2021-06-10 10:59:24,122] INFO [ControllerEventThread controllerId=0] Starting (kafka.controller.ControllerEventManager$ControllerEventThread)
[2021-06-10 10:59:24,128] INFO [ExpirationReaper-0-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-06-10 10:59:24,131] INFO [ExpirationReaper-0-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-06-10 10:59:24,132] INFO [ExpirationReaper-0-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2021-06-10 10:59:24,142] DEBUG [Controller id=0] Broker 1 has been elected as the controller, so stopping the election process. (kafka.controller.KafkaController)
[2021-06-10 10:59:24,145] INFO [GroupCoordinator 0]: Starting up. (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 10:59:24,146] INFO [GroupCoordinator 0]: Startup complete. (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 10:59:24,147] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 10:59:24,162] INFO [ProducerId Manager 0]: Acquired new producerId block (brokerId:0,blockStartProducerId:11000,blockEndProducerId:11999) by writing to Zk with path version 12 (kafka.coordinator.transaction.ProducerIdManager)
[2021-06-10 10:59:24,184] INFO [TransactionCoordinator id=0] Starting up. (kafka.coordinator.transaction.TransactionCoordinator)
[2021-06-10 10:59:24,186] INFO [TransactionCoordinator id=0] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator)
[2021-06-10 10:59:24,186] INFO [Transaction Marker Channel Manager 0]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager)
[2021-06-10 10:59:24,229] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread)
[2021-06-10 10:59:24,282] INFO [SocketServer brokerId=0] Started data-plane processors for 2 acceptors (kafka.network.SocketServer)
[2021-06-10 10:59:24,284] INFO Kafka version: 5.3.1-ccs (org.apache.kafka.common.utils.AppInfoParser)
[2021-06-10 10:59:24,284] INFO Kafka commitId: 03799faf9878a999 (org.apache.kafka.common.utils.AppInfoParser)
[2021-06-10 10:59:24,284] INFO Kafka startTimeMs: 1623322764282 (org.apache.kafka.common.utils.AppInfoParser)
[2021-06-10 10:59:24,286] INFO [KafkaServer id=0] started (kafka.server.KafkaServer)
2021-06-10T11:00:54.803+0000 INIT [cadi] Loading CADI Properties from /opt/app/osaaf/local/org.onap.dmaap.mr.location.props
2021-06-10T11:00:54.804+0000 INIT [cadi] Loading CADI Properties from /opt/app/osaaf/local/org.onap.dmaap.mr.cred.props
2021-06-10T11:00:54.811+0000 INIT [cadi] cadi_keyfile points to /opt/app/osaaf/local/org.onap.dmaap.mr.keyfile
2021-06-10T11:00:55.663+0000 INIT [cadi] cadi_protocols is set to TLSv1.1,TLSv1.2
2021-06-10T11:00:55.838+0000 INIT [cadi] AAFLocator for https://aaf-locate.onap:8095/locate/%CNS.%AAF_NS.service:2.1 could not be created. java.net.URISyntaxException: Malformed escape pair at index 36: https://aaf-locate.onap:8095/locate/%CNS.%AAF_NS.service:2.1
2021-06-10T11:00:55.839+0000 ERROR [cadi] Null Locator passed [Ljava.lang.Object;@29fd3b2e
org.onap.aaf.cadi.LocatorException: Null Locator passed
	at org.onap.aaf.cadi.http.HMangr.(HMangr.java:53)
	at org.onap.aaf.cadi.aaf.v2_0.AAFConHttp.(AAFConHttp.java:54)
	at org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider.setup(Cadi3AAFProvider.java:141)
	at org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider.(Cadi3AAFProvider.java:111)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
	at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
	at java.lang.Class.newInstance(Class.java:442)
	at java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:380)
	at java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404)
	at java.util.ServiceLoader$1.next(ServiceLoader.java:480)
	at org.onap.dmaap.commonauth.kafka.base.authorization.AuthorizationProviderFactory.(AuthorizationProviderFactory.java:34)
	at org.onap.dmaap.commonauth.kafka.base.authorization.AuthorizationProviderFactory.(AuthorizationProviderFactory.java:29)
	at org.onap.dmaap.kafkaAuthorize.PlainSaslServer1.evaluateResponse(PlainSaslServer1.java:106)
	at org.apache.kafka.common.security.authenticator.SaslServerAuthenticator.handleSaslToken(SaslServerAuthenticator.java:451)
	at org.apache.kafka.common.security.authenticator.SaslServerAuthenticator.authenticate(SaslServerAuthenticator.java:291)
	at org.apache.kafka.common.network.KafkaChannel.prepare(KafkaChannel.java:173)
	at org.apache.kafka.common.network.Selector.pollSelectionKeys(Selector.java:547)
	at org.apache.kafka.common.network.Selector.poll(Selector.java:483)
	at kafka.network.Processor.poll(SocketServer.scala:863)
	at kafka.network.Processor.run(SocketServer.scala:762)
	at java.lang.Thread.run(Thread.java:748)
[2021-06-10 11:00:55,840] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:00:55,840] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:04:40,114] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0, zkVersion=0, replicas=0, isNew=true) correlation id 1 from controller 1 epoch 8 for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (state.change.logger)
[2021-06-10 11:04:40,126] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 1 from controller 1 epoch 8 starting the become-leader transition for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (state.change.logger)
[2021-06-10 11:04:40,129] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(SDC-DISTR-NOTIF-TOPIC-AUTO-0) (kafka.server.ReplicaFetcherManager)
[2021-06-10 11:04:40,245] INFO [Log partition=SDC-DISTR-NOTIF-TOPIC-AUTO-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:40,256] INFO [Log partition=SDC-DISTR-NOTIF-TOPIC-AUTO-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 96 ms (kafka.log.Log)
[2021-06-10 11:04:40,260] INFO Created log for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:40,262] INFO [Partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 broker=0] No checkpointed highwatermark is found for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (kafka.cluster.Partition)
[2021-06-10 11:04:40,265] INFO Replica loaded for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:40,268] INFO [Partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 broker=0] SDC-DISTR-NOTIF-TOPIC-AUTO-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:40,285] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 1 for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:40,286] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 1 from controller 1 epoch 8 for the become-leader transition for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 (state.change.logger)
[2021-06-10 11:04:40,310] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 2 (state.change.logger)
[2021-06-10 11:04:40,536] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 3 (state.change.logger)
[2021-06-10 11:04:42,261] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-13 (state.change.logger)
[2021-06-10 11:04:42,261] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-46 (state.change.logger)
[2021-06-10 11:04:42,261] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-9 (state.change.logger)
[2021-06-10 11:04:42,261] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-42 (state.change.logger)
[2021-06-10 11:04:42,261] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-21 (state.change.logger)
[2021-06-10 11:04:42,261] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-17 (state.change.logger)
[2021-06-10 11:04:42,261] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-30 (state.change.logger)
[2021-06-10 11:04:42,261] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-26 (state.change.logger)
[2021-06-10 11:04:42,261] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-5 (state.change.logger)
[2021-06-10 11:04:42,261] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-38 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-1 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-34 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-16 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-45 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-12 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-41 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-24 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-20 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-49 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-0 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-29 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-25 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-8 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-37 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-4 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-33 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-15 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-48 (state.change.logger)
[2021-06-10 11:04:42,262] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-11 (state.change.logger)
[2021-06-10 11:04:42,263] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-44 (state.change.logger)
[2021-06-10 11:04:42,263] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-23 (state.change.logger)
[2021-06-10 11:04:42,263] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-19 (state.change.logger)
[2021-06-10 11:04:42,263] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-32 (state.change.logger)
[2021-06-10 11:04:42,263] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-28 (state.change.logger)
[2021-06-10 11:04:42,263] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-7 (state.change.logger)
[2021-06-10 11:04:42,263] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-40 (state.change.logger)
[2021-06-10 11:04:42,263] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-3 (state.change.logger)
[2021-06-10 11:04:42,263] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-36 (state.change.logger)
[2021-06-10 11:04:42,263] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-47 (state.change.logger)
[2021-06-10 11:04:42,263] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-14 (state.change.logger)
[2021-06-10 11:04:42,263] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-43 (state.change.logger)
[2021-06-10 11:04:42,263] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-10 (state.change.logger)
[2021-06-10 11:04:42,263] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-22 (state.change.logger)
[2021-06-10 11:04:42,263] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-18 (state.change.logger)
[2021-06-10 11:04:42,263] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-31 (state.change.logger)
[2021-06-10 11:04:42,264] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-27 (state.change.logger)
[2021-06-10 11:04:42,264] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-39 (state.change.logger)
[2021-06-10 11:04:42,264] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-6 (state.change.logger)
[2021-06-10 11:04:42,264] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-35 (state.change.logger)
[2021-06-10 11:04:42,264] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 1 epoch 8 for partition __consumer_offsets-2 (state.change.logger)
[2021-06-10 11:04:42,404] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-10 (state.change.logger)
[2021-06-10 11:04:42,404] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-7 (state.change.logger)
[2021-06-10 11:04:42,405] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-4 (state.change.logger)
[2021-06-10 11:04:42,405] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-1 (state.change.logger)
[2021-06-10 11:04:42,405] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-49 (state.change.logger)
[2021-06-10 11:04:42,405] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-46 (state.change.logger)
[2021-06-10 11:04:42,406] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-43 (state.change.logger)
[2021-06-10 11:04:42,406] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-40 (state.change.logger)
[2021-06-10 11:04:42,406] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-37 (state.change.logger)
[2021-06-10 11:04:42,406] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-34 (state.change.logger)
[2021-06-10 11:04:42,406] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-31 (state.change.logger)
[2021-06-10 11:04:42,406] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-19 (state.change.logger)
[2021-06-10 11:04:42,406] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-28 (state.change.logger)
[2021-06-10 11:04:42,406] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-16 (state.change.logger)
[2021-06-10 11:04:42,406] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-25 (state.change.logger)
[2021-06-10 11:04:42,407] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-22 (state.change.logger)
[2021-06-10 11:04:42,407] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-leader transition for partition __consumer_offsets-13 (state.change.logger)
[2021-06-10 11:04:42,407] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(__consumer_offsets-22, __consumer_offsets-4, __consumer_offsets-7, __consumer_offsets-46, __consumer_offsets-25, __consumer_offsets-49, __consumer_offsets-16, __consumer_offsets-28, __consumer_offsets-31, __consumer_offsets-37, __consumer_offsets-19, __consumer_offsets-13, __consumer_offsets-43, __consumer_offsets-1, __consumer_offsets-34, __consumer_offsets-10, __consumer_offsets-40) (kafka.server.ReplicaFetcherManager)
[2021-06-10 11:04:42,448] INFO [Log partition=__consumer_offsets-10, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:42,451] INFO [Log partition=__consumer_offsets-10, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 33 ms (kafka.log.Log)
[2021-06-10 11:04:42,453] INFO Created log for partition __consumer_offsets-10 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:42,455] INFO [Partition __consumer_offsets-10 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-10 (kafka.cluster.Partition)
[2021-06-10 11:04:42,456] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,456] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,457] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,457] INFO [Partition __consumer_offsets-10 broker=0] __consumer_offsets-10 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:42,466] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-10 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:42,526] INFO [Log partition=__consumer_offsets-7, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:42,528] INFO [Log partition=__consumer_offsets-7, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 54 ms (kafka.log.Log)
[2021-06-10 11:04:42,529] INFO Created log for partition __consumer_offsets-7 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:42,530] INFO [Partition __consumer_offsets-7 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-7 (kafka.cluster.Partition)
[2021-06-10 11:04:42,530] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,530] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,530] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,530] INFO [Partition __consumer_offsets-7 broker=0] __consumer_offsets-7 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:42,537] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-7 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:42,564] INFO [Log partition=__consumer_offsets-4, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:42,568] INFO [Log partition=__consumer_offsets-4, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 25 ms (kafka.log.Log)
[2021-06-10 11:04:42,569] INFO Created log for partition __consumer_offsets-4 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:42,571] INFO [Partition __consumer_offsets-4 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-4 (kafka.cluster.Partition)
[2021-06-10 11:04:42,571] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,571] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,571] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,571] INFO [Partition __consumer_offsets-4 broker=0] __consumer_offsets-4 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:42,576] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-4 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:42,618] INFO [Log partition=__consumer_offsets-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:42,620] INFO [Log partition=__consumer_offsets-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 38 ms (kafka.log.Log)
[2021-06-10 11:04:42,621] INFO Created log for partition __consumer_offsets-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:42,622] INFO [Partition __consumer_offsets-1 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-1 (kafka.cluster.Partition)
[2021-06-10 11:04:42,622] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,622] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,622] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,622] INFO [Partition __consumer_offsets-1 broker=0] __consumer_offsets-1 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:42,629] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-1 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:42,651] INFO [Log partition=__consumer_offsets-49, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:42,655] INFO [Log partition=__consumer_offsets-49, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 22 ms (kafka.log.Log)
[2021-06-10 11:04:42,657] INFO Created log for partition __consumer_offsets-49 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:42,657] INFO [Partition __consumer_offsets-49 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-49 (kafka.cluster.Partition)
[2021-06-10 11:04:42,658] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,658] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,658] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,658] INFO [Partition __consumer_offsets-49 broker=0] __consumer_offsets-49 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:42,663] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-49 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:42,684] INFO [Log partition=__consumer_offsets-46, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:42,687] INFO [Log partition=__consumer_offsets-46, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 20 ms (kafka.log.Log)
[2021-06-10 11:04:42,692] INFO Created log for partition __consumer_offsets-46 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:42,704] INFO [Partition __consumer_offsets-46 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-46 (kafka.cluster.Partition)
[2021-06-10 11:04:42,704] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,704] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,704] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,705] INFO [Partition __consumer_offsets-46 broker=0] __consumer_offsets-46 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:42,710] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-46 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:42,733] INFO [Log partition=__consumer_offsets-43, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:42,735] INFO [Log partition=__consumer_offsets-43, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 21 ms (kafka.log.Log)
[2021-06-10 11:04:42,736] INFO Created log for partition __consumer_offsets-43 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:42,737] INFO [Partition __consumer_offsets-43 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-43 (kafka.cluster.Partition)
[2021-06-10 11:04:42,737] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,737] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,737] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,737] INFO [Partition __consumer_offsets-43 broker=0] __consumer_offsets-43 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:42,743] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-43 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:42,769] INFO [Log partition=__consumer_offsets-40, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:42,772] INFO [Log partition=__consumer_offsets-40, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 26 ms (kafka.log.Log)
[2021-06-10 11:04:42,773] INFO Created log for partition __consumer_offsets-40 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:42,773] INFO [Partition __consumer_offsets-40 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-40 (kafka.cluster.Partition)
[2021-06-10 11:04:42,773] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,773] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,773] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,773] INFO [Partition __consumer_offsets-40 broker=0] __consumer_offsets-40 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:42,778] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-40 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:42,811] INFO [Log partition=__consumer_offsets-37, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:42,813] INFO [Log partition=__consumer_offsets-37, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 31 ms (kafka.log.Log)
[2021-06-10 11:04:42,814] INFO Created log for partition __consumer_offsets-37 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:42,815] INFO [Partition __consumer_offsets-37 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-37 (kafka.cluster.Partition)
[2021-06-10 11:04:42,815] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,815] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,815] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,816] INFO [Partition __consumer_offsets-37 broker=0] __consumer_offsets-37 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:42,821] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-37 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:42,844] INFO [Log partition=__consumer_offsets-34, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:42,846] INFO [Log partition=__consumer_offsets-34, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 20 ms (kafka.log.Log)
[2021-06-10 11:04:42,847] INFO Created log for partition __consumer_offsets-34 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:42,847] INFO [Partition __consumer_offsets-34 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-34 (kafka.cluster.Partition)
[2021-06-10 11:04:42,847] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,848] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,848] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,848] INFO [Partition __consumer_offsets-34 broker=0] __consumer_offsets-34 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:42,854] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-34 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:42,887] INFO [Log partition=__consumer_offsets-31, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:42,901] INFO [Log partition=__consumer_offsets-31, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 33 ms (kafka.log.Log)
[2021-06-10 11:04:42,902] INFO Created log for partition __consumer_offsets-31 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:42,902] INFO [Partition __consumer_offsets-31 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-31 (kafka.cluster.Partition)
[2021-06-10 11:04:42,903] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,903] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,903] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,903] INFO [Partition __consumer_offsets-31 broker=0] __consumer_offsets-31 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:42,908] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-31 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:42,937] INFO [Log partition=__consumer_offsets-19, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:42,940] INFO [Log partition=__consumer_offsets-19, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 23 ms (kafka.log.Log)
[2021-06-10 11:04:42,941] INFO Created log for partition __consumer_offsets-19 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:42,942] INFO [Partition __consumer_offsets-19 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-19 (kafka.cluster.Partition)
[2021-06-10 11:04:42,942] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,942] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,942] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,942] INFO [Partition __consumer_offsets-19 broker=0] __consumer_offsets-19 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:42,947] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-19 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:42,971] INFO [Log partition=__consumer_offsets-28, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:42,974] INFO [Log partition=__consumer_offsets-28, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 23 ms (kafka.log.Log)
[2021-06-10 11:04:42,976] INFO Created log for partition __consumer_offsets-28 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:42,976] INFO [Partition __consumer_offsets-28 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-28 (kafka.cluster.Partition)
[2021-06-10 11:04:42,976] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,977] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,977] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:42,977] INFO [Partition __consumer_offsets-28 broker=0] __consumer_offsets-28 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:42,983] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-28 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:43,018] INFO [Log partition=__consumer_offsets-16, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,022] INFO [Log partition=__consumer_offsets-16, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 35 ms (kafka.log.Log)
[2021-06-10 11:04:43,023] INFO Created log for partition __consumer_offsets-16 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,024] INFO [Partition __consumer_offsets-16 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-16 (kafka.cluster.Partition)
[2021-06-10 11:04:43,024] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,024] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,024] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,024] INFO [Partition __consumer_offsets-16 broker=0] __consumer_offsets-16 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:43,031] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-16 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:43,056] INFO [Log partition=__consumer_offsets-25, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,059] INFO [Log partition=__consumer_offsets-25, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 22 ms (kafka.log.Log)
[2021-06-10 11:04:43,060] INFO Created log for partition __consumer_offsets-25 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,062] INFO [Partition __consumer_offsets-25 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-25 (kafka.cluster.Partition)
[2021-06-10 11:04:43,062] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,062] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,062] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,063] INFO [Partition __consumer_offsets-25 broker=0] __consumer_offsets-25 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:43,069] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-25 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:43,110] INFO [Log partition=__consumer_offsets-22, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,112] INFO [Log partition=__consumer_offsets-22, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 37 ms (kafka.log.Log)
[2021-06-10 11:04:43,114] INFO Created log for partition __consumer_offsets-22 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,114] INFO [Partition __consumer_offsets-22 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-22 (kafka.cluster.Partition)
[2021-06-10 11:04:43,114] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,114] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,114] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,114] INFO [Partition __consumer_offsets-22 broker=0] __consumer_offsets-22 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:43,120] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-22 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:43,147] INFO [Log partition=__consumer_offsets-13, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,150] INFO [Log partition=__consumer_offsets-13, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 24 ms (kafka.log.Log)
[2021-06-10 11:04:43,151] INFO Created log for partition __consumer_offsets-13 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,151] INFO [Partition __consumer_offsets-13 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-13 (kafka.cluster.Partition)
[2021-06-10 11:04:43,151] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,151] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,151] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,152] INFO [Partition __consumer_offsets-13 broker=0] __consumer_offsets-13 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-13 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-10 (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-7 (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-4 (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-1 (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-49 (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-46 (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-43 (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-40 (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-37 (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-34 (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-31 (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-19 (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-28 (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-16 (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-25 (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-22 (state.change.logger)
[2021-06-10 11:04:43,162] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-leader transition for partition __consumer_offsets-13 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-0 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-29 with leader 1 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-48 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-45 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-26 with leader 1 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-42 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-23 with leader 1 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-20 with leader 1 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-39 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-17 with leader 1 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-36 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-14 with leader 1 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-33 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-11 with leader 1 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-30 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-27 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-8 with leader 1 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-24 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-5 with leader 1 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-21 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-2 with leader 1 (state.change.logger)
[2021-06-10 11:04:43,164] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-18 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,165] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-15 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,165] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-12 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,165] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-9 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,165] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-47 with leader 1 (state.change.logger)
[2021-06-10 11:04:43,165] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-38 with leader 1 (state.change.logger)
[2021-06-10 11:04:43,165] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-35 with leader 1 (state.change.logger)
[2021-06-10 11:04:43,165] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-44 with leader 1 (state.change.logger)
[2021-06-10 11:04:43,165] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-6 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,165] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-41 with leader 1 (state.change.logger)
[2021-06-10 11:04:43,165] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-32 with leader 1 (state.change.logger)
[2021-06-10 11:04:43,165] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 4 from controller 1 epoch 8 starting the become-follower transition for partition __consumer_offsets-3 with leader 2 (state.change.logger)
[2021-06-10 11:04:43,168] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,205] INFO [Log partition=__consumer_offsets-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,208] INFO [Log partition=__consumer_offsets-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 35 ms (kafka.log.Log)
[2021-06-10 11:04:43,209] INFO Created log for partition __consumer_offsets-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,210] INFO [Partition __consumer_offsets-0 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-0 (kafka.cluster.Partition)
[2021-06-10 11:04:43,210] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,210] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,211] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,237] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,240] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 24 ms (kafka.log.Log)
[2021-06-10 11:04:43,242] INFO Created log for partition __consumer_offsets-29 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,243] INFO [Partition __consumer_offsets-29 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-29 (kafka.cluster.Partition)
[2021-06-10 11:04:43,243] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,243] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,244] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,268] INFO [Log partition=__consumer_offsets-48, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,271] INFO [Log partition=__consumer_offsets-48, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 20 ms (kafka.log.Log)
[2021-06-10 11:04:43,272] INFO Created log for partition __consumer_offsets-48 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,272] INFO [Partition __consumer_offsets-48 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-48 (kafka.cluster.Partition)
[2021-06-10 11:04:43,272] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,272] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,273] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,273] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,300] INFO [Log partition=__consumer_offsets-45, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,303] INFO [Log partition=__consumer_offsets-45, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 26 ms (kafka.log.Log)
[2021-06-10 11:04:43,304] INFO Created log for partition __consumer_offsets-45 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,305] INFO [Partition __consumer_offsets-45 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-45 (kafka.cluster.Partition)
[2021-06-10 11:04:43,305] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,306] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,306] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,328] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,331] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 21 ms (kafka.log.Log)
[2021-06-10 11:04:43,332] INFO Created log for partition __consumer_offsets-26 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,333] INFO [Partition __consumer_offsets-26 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-26 (kafka.cluster.Partition)
[2021-06-10 11:04:43,333] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,333] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,355] INFO [Log partition=__consumer_offsets-42, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,358] INFO [Log partition=__consumer_offsets-42, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 21 ms (kafka.log.Log)
[2021-06-10 11:04:43,359] INFO Created log for partition __consumer_offsets-42 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,359] INFO [Partition __consumer_offsets-42 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-42 (kafka.cluster.Partition)
[2021-06-10 11:04:43,359] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,360] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,360] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,382] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,384] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 19 ms (kafka.log.Log)
[2021-06-10 11:04:43,385] INFO Created log for partition __consumer_offsets-23 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,386] INFO [Partition __consumer_offsets-23 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-23 (kafka.cluster.Partition)
[2021-06-10 11:04:43,386] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,386] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,386] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,386] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,410] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,412] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 20 ms (kafka.log.Log)
[2021-06-10 11:04:43,413] INFO Created log for partition __consumer_offsets-20 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,414] INFO [Partition __consumer_offsets-20 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-20 (kafka.cluster.Partition)
[2021-06-10 11:04:43,414] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,414] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,414] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,434] INFO [Log partition=__consumer_offsets-39, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,436] INFO [Log partition=__consumer_offsets-39, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 19 ms (kafka.log.Log)
[2021-06-10 11:04:43,438] INFO Created log for partition __consumer_offsets-39 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,438] INFO [Partition __consumer_offsets-39 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-39 (kafka.cluster.Partition)
[2021-06-10 11:04:43,438] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,439] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,460] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,462] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 19 ms (kafka.log.Log)
[2021-06-10 11:04:43,463] INFO Created log for partition __consumer_offsets-17 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,464] INFO [Partition __consumer_offsets-17 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-17 (kafka.cluster.Partition)
[2021-06-10 11:04:43,464] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,464] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,464] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,494] INFO [Log partition=__consumer_offsets-36, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,497] INFO [Log partition=__consumer_offsets-36, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 29 ms (kafka.log.Log)
[2021-06-10 11:04:43,498] INFO Created log for partition __consumer_offsets-36 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,499] INFO [Partition __consumer_offsets-36 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-36 (kafka.cluster.Partition)
[2021-06-10 11:04:43,499] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,499] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,499] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,499] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,522] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,524] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 20 ms (kafka.log.Log)
[2021-06-10 11:04:43,525] INFO Created log for partition __consumer_offsets-14 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,526] INFO [Partition __consumer_offsets-14 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-14 (kafka.cluster.Partition)
[2021-06-10 11:04:43,526] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,526] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,526] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,721] INFO [Log partition=__consumer_offsets-33, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,725] INFO [Log partition=__consumer_offsets-33, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 190 ms (kafka.log.Log)
[2021-06-10 11:04:43,726] INFO Created log for partition __consumer_offsets-33 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,727] INFO [Partition __consumer_offsets-33 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-33 (kafka.cluster.Partition)
[2021-06-10 11:04:43,727] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,727] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,765] INFO [Log partition=__consumer_offsets-11, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:43,768] INFO [Log partition=__consumer_offsets-11, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 37 ms (kafka.log.Log)
[2021-06-10 11:04:43,769] INFO Created log for partition __consumer_offsets-11 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:43,770] INFO [Partition __consumer_offsets-11 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-11 (kafka.cluster.Partition)
[2021-06-10 11:04:43,770] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,770] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:43,770] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,478] INFO [Log partition=__consumer_offsets-30, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:44,482] INFO [Log partition=__consumer_offsets-30, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 498 ms (kafka.log.Log)
[2021-06-10 11:04:44,484] INFO Created log for partition __consumer_offsets-30 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:44,484] INFO [Partition __consumer_offsets-30 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-30 (kafka.cluster.Partition)
[2021-06-10 11:04:44,484] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,484] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,485] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,485] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,582] INFO [Log partition=__consumer_offsets-27, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:44,585] INFO [Log partition=__consumer_offsets-27, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 77 ms (kafka.log.Log)
[2021-06-10 11:04:44,586] INFO Created log for partition __consumer_offsets-27 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:44,587] INFO [Partition __consumer_offsets-27 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-27 (kafka.cluster.Partition)
[2021-06-10 11:04:44,587] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,587] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,587] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,672] INFO [Log partition=__consumer_offsets-8, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:44,675] INFO [Log partition=__consumer_offsets-8, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 63 ms (kafka.log.Log)
[2021-06-10 11:04:44,676] INFO Created log for partition __consumer_offsets-8 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:44,676] INFO [Partition __consumer_offsets-8 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-8 (kafka.cluster.Partition)
[2021-06-10 11:04:44,676] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,677] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,712] INFO [Log partition=__consumer_offsets-24, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:44,715] INFO [Log partition=__consumer_offsets-24, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 29 ms (kafka.log.Log)
[2021-06-10 11:04:44,717] INFO Created log for partition __consumer_offsets-24 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:44,717] INFO [Partition __consumer_offsets-24 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-24 (kafka.cluster.Partition)
[2021-06-10 11:04:44,717] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,718] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,718] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,744] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:44,748] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 26 ms (kafka.log.Log)
[2021-06-10 11:04:44,749] INFO Created log for partition __consumer_offsets-5 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:44,750] INFO [Partition __consumer_offsets-5 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-5 (kafka.cluster.Partition)
[2021-06-10 11:04:44,750] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,750] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,750] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,750] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,772] INFO [Log partition=__consumer_offsets-21, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:44,775] INFO [Log partition=__consumer_offsets-21, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 20 ms (kafka.log.Log)
[2021-06-10 11:04:44,776] INFO Created log for partition __consumer_offsets-21 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:44,776] INFO [Partition __consumer_offsets-21 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-21 (kafka.cluster.Partition)
[2021-06-10 11:04:44,776] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,777] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,777] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,805] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:44,808] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 27 ms (kafka.log.Log)
[2021-06-10 11:04:44,809] INFO Created log for partition __consumer_offsets-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:44,810] INFO [Partition __consumer_offsets-2 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-2 (kafka.cluster.Partition)
[2021-06-10 11:04:44,810] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,810] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,839] INFO [Log partition=__consumer_offsets-18, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:44,842] INFO [Log partition=__consumer_offsets-18, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 27 ms (kafka.log.Log)
[2021-06-10 11:04:44,843] INFO Created log for partition __consumer_offsets-18 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:44,843] INFO [Partition __consumer_offsets-18 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-18 (kafka.cluster.Partition)
[2021-06-10 11:04:44,844] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,844] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,844] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,844] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,894] INFO [Log partition=__consumer_offsets-15, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:44,897] INFO [Log partition=__consumer_offsets-15, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 48 ms (kafka.log.Log)
[2021-06-10 11:04:44,898] INFO Created log for partition __consumer_offsets-15 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:44,898] INFO [Partition __consumer_offsets-15 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-15 (kafka.cluster.Partition)
[2021-06-10 11:04:44,898] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,899] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,926] INFO [Log partition=__consumer_offsets-12, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:44,929] INFO [Log partition=__consumer_offsets-12, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 25 ms (kafka.log.Log)
[2021-06-10 11:04:44,931] INFO Created log for partition __consumer_offsets-12 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:44,932] INFO [Partition __consumer_offsets-12 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-12 (kafka.cluster.Partition)
[2021-06-10 11:04:44,932] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,932] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,932] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,932] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,963] INFO [Log partition=__consumer_offsets-9, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:44,965] INFO [Log partition=__consumer_offsets-9, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 22 ms (kafka.log.Log)
[2021-06-10 11:04:44,967] INFO Created log for partition __consumer_offsets-9 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:44,968] INFO [Partition __consumer_offsets-9 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-9 (kafka.cluster.Partition)
[2021-06-10 11:04:44,968] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:44,968] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,000] INFO [Log partition=__consumer_offsets-47, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:45,003] INFO [Log partition=__consumer_offsets-47, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 21 ms (kafka.log.Log)
[2021-06-10 11:04:45,004] INFO Created log for partition __consumer_offsets-47 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:45,005] INFO [Partition __consumer_offsets-47 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-47 (kafka.cluster.Partition)
[2021-06-10 11:04:45,005] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,005] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,005] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,005] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,037] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:45,039] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 30 ms (kafka.log.Log)
[2021-06-10 11:04:45,040] INFO Created log for partition __consumer_offsets-38 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:45,040] INFO [Partition __consumer_offsets-38 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-38 (kafka.cluster.Partition)
[2021-06-10 11:04:45,040] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,040] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,067] INFO [Log partition=__consumer_offsets-35, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:45,069] INFO [Log partition=__consumer_offsets-35, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 25 ms (kafka.log.Log)
[2021-06-10 11:04:45,071] INFO Created log for partition __consumer_offsets-35 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:45,071] INFO [Partition __consumer_offsets-35 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-35 (kafka.cluster.Partition)
[2021-06-10 11:04:45,072] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,072] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,072] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,072] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,098] INFO [Log partition=__consumer_offsets-44, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:45,103] INFO [Log partition=__consumer_offsets-44, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 26 ms (kafka.log.Log)
[2021-06-10 11:04:45,105] INFO Created log for partition __consumer_offsets-44 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:45,107] INFO [Partition __consumer_offsets-44 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-44 (kafka.cluster.Partition)
[2021-06-10 11:04:45,107] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,107] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,141] INFO [Log partition=__consumer_offsets-6, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:45,145] INFO [Log partition=__consumer_offsets-6, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 27 ms (kafka.log.Log)
[2021-06-10 11:04:45,146] INFO Created log for partition __consumer_offsets-6 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:45,146] INFO [Partition __consumer_offsets-6 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-6 (kafka.cluster.Partition)
[2021-06-10 11:04:45,146] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,147] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,147] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,168] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:45,171] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 19 ms (kafka.log.Log)
[2021-06-10 11:04:45,172] INFO Created log for partition __consumer_offsets-41 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:45,173] INFO [Partition __consumer_offsets-41 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-41 (kafka.cluster.Partition)
[2021-06-10 11:04:45,173] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,173] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,173] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,173] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,197] INFO [Log partition=__consumer_offsets-32, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:45,201] INFO [Log partition=__consumer_offsets-32, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 24 ms (kafka.log.Log)
[2021-06-10 11:04:45,205] INFO Created log for partition __consumer_offsets-32 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:45,206] INFO [Partition __consumer_offsets-32 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-32 (kafka.cluster.Partition)
[2021-06-10 11:04:45,206] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,207] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,207] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,259] INFO [Log partition=__consumer_offsets-3, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:04:45,262] INFO [Log partition=__consumer_offsets-3, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 41 ms (kafka.log.Log)
[2021-06-10 11:04:45,263] INFO Created log for partition __consumer_offsets-3 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:04:45,264] INFO [Partition __consumer_offsets-3 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-3 (kafka.cluster.Partition)
[2021-06-10 11:04:45,264] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:04:45,265] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(__consumer_offsets-6, __consumer_offsets-32, __consumer_offsets-14, __consumer_offsets-36, __consumer_offsets-18, __consumer_offsets-0, __consumer_offsets-41, __consumer_offsets-26, __consumer_offsets-23, __consumer_offsets-45, __consumer_offsets-8, __consumer_offsets-27, __consumer_offsets-12, __consumer_offsets-9, __consumer_offsets-35, __consumer_offsets-17, __consumer_offsets-39, __consumer_offsets-21, __consumer_offsets-47, __consumer_offsets-44, __consumer_offsets-3, __consumer_offsets-29, __consumer_offsets-48, __consumer_offsets-11, __consumer_offsets-30, __consumer_offsets-33, __consumer_offsets-15, __consumer_offsets-38, __consumer_offsets-20, __consumer_offsets-42, __consumer_offsets-5, __consumer_offsets-2, __consumer_offsets-24) (kafka.server.ReplicaFetcherManager)
[2021-06-10 11:04:45,268] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-41 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,268] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-44 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,268] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-47 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,268] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-2 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,268] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-5 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,268] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,268] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-11 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,268] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-14 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,268] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-17 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,268] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-20 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,268] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-23 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-26 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-29 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-32 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-35 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-38 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-0 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-3 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-6 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-9 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-12 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-15 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-18 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-21 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-24 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-27 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-30 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-33 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-36 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-39 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-42 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-45 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,269] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-48 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,275] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-41 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,275] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-44 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,275] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-47 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,275] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-2 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,275] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-5 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,275] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-8 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,275] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-11 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-14 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-17 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-20 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-23 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-26 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-29 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-32 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-35 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-38 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-0 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-3 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-6 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-9 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-12 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-15 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-18 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-21 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-24 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-27 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-30 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-33 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-36 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-39 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-42 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-45 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,276] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-48 as part of become-follower request with correlation id 4 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,332] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Starting (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,345] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:04:45,346] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:04:45,349] INFO [ReplicaFetcherManager on broker 0] Added fetcher to broker BrokerEndPoint(id=1, host=dev-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(__consumer_offsets-8 -> (offset=0, leaderEpoch=0), __consumer_offsets-35 -> (offset=0, leaderEpoch=0), __consumer_offsets-41 -> (offset=0, leaderEpoch=0), __consumer_offsets-23 -> (offset=0, leaderEpoch=0), __consumer_offsets-47 -> (offset=0, leaderEpoch=0), __consumer_offsets-38 -> (offset=0, leaderEpoch=0), __consumer_offsets-17 -> (offset=0, leaderEpoch=0), __consumer_offsets-11 -> (offset=0, leaderEpoch=0), __consumer_offsets-2 -> (offset=0, leaderEpoch=0), __consumer_offsets-14 -> (offset=0, leaderEpoch=0), __consumer_offsets-20 -> (offset=0, leaderEpoch=0), __consumer_offsets-44 -> (offset=0, leaderEpoch=0), __consumer_offsets-5 -> (offset=0, leaderEpoch=0), __consumer_offsets-26 -> (offset=0, leaderEpoch=0), __consumer_offsets-29 -> (offset=0, leaderEpoch=0), __consumer_offsets-32 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-06-10 11:04:45,372] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-17 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,379] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,380] INFO [ReplicaFetcherManager on broker 0] Added fetcher to broker BrokerEndPoint(id=2, host=dev-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(__consumer_offsets-30 -> (offset=0, leaderEpoch=0), __consumer_offsets-21 -> (offset=0, leaderEpoch=0), __consumer_offsets-27 -> (offset=0, leaderEpoch=0), __consumer_offsets-9 -> (offset=0, leaderEpoch=0), __consumer_offsets-33 -> (offset=0, leaderEpoch=0), __consumer_offsets-36 -> (offset=0, leaderEpoch=0), __consumer_offsets-42 -> (offset=0, leaderEpoch=0), __consumer_offsets-3 -> (offset=0, leaderEpoch=0), __consumer_offsets-18 -> (offset=0, leaderEpoch=0), __consumer_offsets-15 -> (offset=0, leaderEpoch=0), __consumer_offsets-24 -> (offset=0, leaderEpoch=0), __consumer_offsets-48 -> (offset=0, leaderEpoch=0), __consumer_offsets-6 -> (offset=0, leaderEpoch=0), __consumer_offsets-0 -> (offset=0, leaderEpoch=0), __consumer_offsets-39 -> (offset=0, leaderEpoch=0), __consumer_offsets-12 -> (offset=0, leaderEpoch=0), __consumer_offsets-45 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-06-10 11:04:45,380] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Starting (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,381] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-6 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,381] INFO [Log partition=__consumer_offsets-6, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,383] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-41 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-44 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-47 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-2 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-5 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-11 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-14 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-17 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-20 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-23 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-26 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-29 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-32 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-35 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-38 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-0 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-3 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-6 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-9 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-12 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-15 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-18 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-21 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-24 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-27 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-30 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-33 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-36 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-39 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-42 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-45 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,384] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 4 for partition __consumer_offsets-48 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,385] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-0 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,385] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-29 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,385] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-48 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,385] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-45 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-26 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-42 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-23 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-20 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-39 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-17 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-36 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-14 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-33 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-11 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,386] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-32 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-30 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-27 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,386] INFO [Log partition=__consumer_offsets-32, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-8 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,386] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-47 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-24 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,386] INFO [Log partition=__consumer_offsets-47, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-5 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,386] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-14 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-21 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,386] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-2 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-18 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-15 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-12 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,386] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-9 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,386] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-44 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,387] INFO [Log partition=__consumer_offsets-44, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,387] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-29 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,387] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,387] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-11 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,387] INFO [Log partition=__consumer_offsets-11, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,387] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-41 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,387] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,387] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-26 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,387] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,387] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-23 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,387] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,387] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-8 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,387] INFO [Log partition=__consumer_offsets-8, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,387] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-38 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,387] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,387] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-20 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,387] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,388] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-5 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,388] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,388] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-35 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,388] INFO [Log partition=__consumer_offsets-35, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,388] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition __consumer_offsets-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,388] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,387] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-47 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,389] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-38 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,389] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-35 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,389] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-39 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,389] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-44 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,389] INFO [Log partition=__consumer_offsets-39, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,389] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-6 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,389] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-41 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,389] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-21 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,389] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-32 with leader 1 (state.change.logger)
[2021-06-10 11:04:45,390] INFO [Log partition=__consumer_offsets-21, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,390] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 4 from controller 1 epoch 8 for the become-follower transition for partition __consumer_offsets-3 with leader 2 (state.change.logger)
[2021-06-10 11:04:45,390] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-36 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,390] INFO [Log partition=__consumer_offsets-36, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,390] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-3 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,390] INFO [Log partition=__consumer_offsets-3, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,390] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-18 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,390] INFO [Log partition=__consumer_offsets-18, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,390] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-48 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,390] INFO [Log partition=__consumer_offsets-48, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,390] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-33 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,390] INFO [Log partition=__consumer_offsets-33, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,390] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-30 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,390] INFO [Log partition=__consumer_offsets-30, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,392] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,392] INFO [Log partition=__consumer_offsets-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,400] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-15 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,401] INFO [Log partition=__consumer_offsets-15, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,402] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-45 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,402] INFO [Log partition=__consumer_offsets-45, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,402] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-27 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,402] INFO [Log partition=__consumer_offsets-27, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,402] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-12 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,402] INFO [Log partition=__consumer_offsets-12, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,402] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-9 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,402] INFO [Log partition=__consumer_offsets-9, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,402] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-42 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,402] INFO [Log partition=__consumer_offsets-42, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,402] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-24 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:04:45,402] INFO [Log partition=__consumer_offsets-24, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:04:45,406] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-22 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,406] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-25 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,406] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-28 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,406] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-31 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,406] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-34 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,406] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-37 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,406] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-40 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,406] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-43 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,406] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-46 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,406] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-49 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,406] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-1 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,406] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-4 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,406] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-7 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,407] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-10 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,407] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-13 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,407] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-16 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,407] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-19 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,434] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-41 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-44 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-47 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-2 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-5 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-8 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-11 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-14 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-17 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-20 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-23 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-26 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-29 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-32 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-35 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-38 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-0 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-3 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-6 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-9 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-12 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-15 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-18 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-21 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-24 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-27 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-30 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-33 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-36 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-39 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-42 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-45 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,436] INFO [GroupMetadataManager brokerId=0] Scheduling unloading of offsets and group metadata from __consumer_offsets-48 (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,443] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-22 in 35 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,444] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-25 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,444] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-28 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,445] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-31 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,445] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-34 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,445] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-37 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,445] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-40 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,445] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-43 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,446] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-46 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,446] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-49 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,446] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-1 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,446] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-4 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,446] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-7 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,446] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-10 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,447] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-13 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,447] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-16 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,447] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-19 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,450] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-41. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,450] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-44. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,450] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-47. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,450] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-2. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,450] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-5. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-8. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-11. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-14. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-17. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-20. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-23. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-26. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-29. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-32. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-35. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-38. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-0. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-3. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-6. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-9. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-12. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-15. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-18. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-21. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-24. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-27. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-30. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-33. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-36. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-39. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-42. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-45. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,451] INFO [GroupMetadataManager brokerId=0] Finished unloading __consumer_offsets-48. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:04:45,480] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-13 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-46 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-9 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-42 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-21 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-17 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-30 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-26 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-5 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-38 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-34 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-16 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-45 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-12 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-41 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-24 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-20 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-49 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-29 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-25 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-8 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-37 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-4 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-33 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-15 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-48 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,481] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-11 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-44 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-23 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-19 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-32 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-28 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-7 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-40 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-3 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-36 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-47 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-14 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-43 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-10 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-22 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-18 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-31 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-27 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-39 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-6 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-35 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,482] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 5 (state.change.logger)
[2021-06-10 11:04:45,510] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-30 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,514] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-21 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,514] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-27 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,514] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-9 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,515] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-33 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,515] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-36 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,515] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-42 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,515] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-3 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,515] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-18 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,515] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-15 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,515] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-24 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,515] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-48 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,515] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-6 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,515] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-0 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,515] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-39 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,515] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-12 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,515] ERROR [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Error for partition __consumer_offsets-45 at offset 0 (kafka.server.ReplicaFetcherThread)
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition.
[2021-06-10 11:04:45,666] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:04:45,667] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:04:48,839] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:04:48,839] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:04:50,567] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:04:50,567] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:04:55,566] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:04:55,566] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:04:55,598] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-28) (reason: Adding new member sdc-COpenSource-Env11-sdnc-dockero-1c47b7e8-4c2f-4017-9b49-8cc5e67e0519 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:04:58,372] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:04:58,372] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:04:58,605] INFO [GroupCoordinator 0]: Stabilized group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:04:58,617] INFO [GroupCoordinator 0]: Assignment received from leader for group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:04:58,833] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:04:58,833] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:03,317] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:03,317] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:07,047] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:07,047] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:07,087] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:07,087] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:07,093] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-19) (reason: Adding new member ves-openapi-manager-256e2f84-016e-4e34-a50b-8d650ca95d28 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:05:08,260] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:08,261] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:10,095] INFO [GroupCoordinator 0]: Stabilized group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:05:10,098] INFO [GroupCoordinator 0]: Assignment received from leader for group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:05:10,507] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:10,507] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:20,815] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:20,815] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:23,977] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:23,977] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:52,630] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:52,632] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:52,638] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-46) (reason: Adding new member policy-id-daba92db-b12f-4270-9351-4209b2a17a91 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:05:55,640] INFO [GroupCoordinator 0]: Stabilized group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:05:55,642] INFO [GroupCoordinator 0]: Assignment received from leader for group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:05:55,773] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:05:55,773] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:12,131] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 6 from controller 1 epoch 8 for partition POLICY-PDP-PAP-1 (state.change.logger)
[2021-06-10 11:06:12,133] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 6 from controller 1 epoch 8 for partition POLICY-PDP-PAP-2 (state.change.logger)
[2021-06-10 11:06:12,133] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 6 from controller 1 epoch 8 for partition POLICY-PDP-PAP-0 (state.change.logger)
[2021-06-10 11:06:12,135] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 6 from controller 1 epoch 8 starting the become-leader transition for partition POLICY-PDP-PAP-1 (state.change.logger)
[2021-06-10 11:06:12,136] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(POLICY-PDP-PAP-1) (kafka.server.ReplicaFetcherManager)
[2021-06-10 11:06:12,165] INFO [Log partition=POLICY-PDP-PAP-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:06:12,168] INFO [Log partition=POLICY-PDP-PAP-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 25 ms (kafka.log.Log)
[2021-06-10 11:06:12,169] INFO Created log for partition POLICY-PDP-PAP-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:06:12,172] INFO [Partition POLICY-PDP-PAP-1 broker=0] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-1 (kafka.cluster.Partition)
[2021-06-10 11:06:12,175] INFO Replica loaded for partition POLICY-PDP-PAP-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:12,176] INFO Replica loaded for partition POLICY-PDP-PAP-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:12,176] INFO Replica loaded for partition POLICY-PDP-PAP-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:12,176] INFO [Partition POLICY-PDP-PAP-1 broker=0] POLICY-PDP-PAP-1 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:06:12,183] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 6 for partition POLICY-PDP-PAP-1 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:06:12,183] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 6 from controller 1 epoch 8 for the become-leader transition for partition POLICY-PDP-PAP-1 (state.change.logger)
[2021-06-10 11:06:12,183] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 6 from controller 1 epoch 8 starting the become-follower transition for partition POLICY-PDP-PAP-0 with leader 2 (state.change.logger)
[2021-06-10 11:06:12,183] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 6 from controller 1 epoch 8 starting the become-follower transition for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger)
[2021-06-10 11:06:12,183] INFO Replica loaded for partition POLICY-PDP-PAP-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:12,224] INFO [Log partition=POLICY-PDP-PAP-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:06:12,226] INFO [Log partition=POLICY-PDP-PAP-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 35 ms (kafka.log.Log)
[2021-06-10 11:06:12,227] INFO Created log for partition POLICY-PDP-PAP-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:06:12,232] INFO [Partition POLICY-PDP-PAP-0 broker=0] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-0 (kafka.cluster.Partition)
[2021-06-10 11:06:12,232] INFO Replica loaded for partition POLICY-PDP-PAP-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:12,232] INFO Replica loaded for partition POLICY-PDP-PAP-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:12,232] INFO Replica loaded for partition POLICY-PDP-PAP-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:12,232] INFO Replica loaded for partition POLICY-PDP-PAP-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:12,257] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:06:12,259] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 22 ms (kafka.log.Log)
[2021-06-10 11:06:12,260] INFO Created log for partition POLICY-PDP-PAP-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:06:12,263] INFO [Partition POLICY-PDP-PAP-2 broker=0] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-2 (kafka.cluster.Partition)
[2021-06-10 11:06:12,263] INFO Replica loaded for partition POLICY-PDP-PAP-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:12,264] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(POLICY-PDP-PAP-2, POLICY-PDP-PAP-0) (kafka.server.ReplicaFetcherManager)
[2021-06-10 11:06:12,264] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 6 for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger)
[2021-06-10 11:06:12,264] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 6 for partition POLICY-PDP-PAP-0 with leader 2 (state.change.logger)
[2021-06-10 11:06:12,264] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition POLICY-PDP-PAP-2 as part of become-follower request with correlation id 6 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:06:12,264] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition POLICY-PDP-PAP-0 as part of become-follower request with correlation id 6 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:06:12,265] INFO [ReplicaFetcherManager on broker 0] Added fetcher to broker BrokerEndPoint(id=1, host=dev-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-PDP-PAP-2 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-06-10 11:06:12,265] INFO [ReplicaFetcherManager on broker 0] Added fetcher to broker BrokerEndPoint(id=2, host=dev-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-PDP-PAP-0 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-06-10 11:06:12,265] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 6 for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger)
[2021-06-10 11:06:12,265] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 6 for partition POLICY-PDP-PAP-0 with leader 2 (state.change.logger)
[2021-06-10 11:06:12,266] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 6 from controller 1 epoch 8 for the become-follower transition for partition POLICY-PDP-PAP-0 with leader 2 (state.change.logger)
[2021-06-10 11:06:12,266] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 6 from controller 1 epoch 8 for the become-follower transition for partition POLICY-PDP-PAP-2 with leader 1 (state.change.logger)
[2021-06-10 11:06:12,269] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition POLICY-PDP-PAP-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 7 (state.change.logger)
[2021-06-10 11:06:12,269] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition POLICY-PDP-PAP-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 7 (state.change.logger)
[2021-06-10 11:06:12,269] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition POLICY-PDP-PAP-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 7 (state.change.logger)
[2021-06-10 11:06:12,566] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition POLICY-PDP-PAP-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:06:12,566] INFO [Log partition=POLICY-PDP-PAP-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:06:12,741] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition POLICY-PDP-PAP-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:06:12,742] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:06:17,882] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:17,882] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:21,051] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:21,051] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:23,223] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:23,223] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:23,232] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:23,232] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:23,237] INFO [GroupCoordinator 0]: Preparing to rebalance group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-34) (reason: Adding new member dev-policy-drools-pdp-0-7aa37f67-6635-4e7e-a4b0-3e16cf372403 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:06:25,100] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 8 (state.change.logger)
[2021-06-10 11:06:25,100] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 8 (state.change.logger)
[2021-06-10 11:06:25,546] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0, zkVersion=0, replicas=0, isNew=true) correlation id 9 from controller 1 epoch 8 for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 (state.change.logger)
[2021-06-10 11:06:25,548] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 9 from controller 1 epoch 8 starting the become-leader transition for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 (state.change.logger)
[2021-06-10 11:06:25,548] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(org.onap.dmaap.mr.PNF_REGISTRATION-0) (kafka.server.ReplicaFetcherManager)
[2021-06-10 11:06:25,575] INFO [Log partition=org.onap.dmaap.mr.PNF_REGISTRATION-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:06:25,577] INFO [Log partition=org.onap.dmaap.mr.PNF_REGISTRATION-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 23 ms (kafka.log.Log)
[2021-06-10 11:06:25,579] INFO Created log for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:06:25,582] INFO [Partition org.onap.dmaap.mr.PNF_REGISTRATION-0 broker=0] No checkpointed highwatermark is found for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 (kafka.cluster.Partition)
[2021-06-10 11:06:25,582] INFO Replica loaded for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:25,582] INFO [Partition org.onap.dmaap.mr.PNF_REGISTRATION-0 broker=0] org.onap.dmaap.mr.PNF_REGISTRATION-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:06:25,590] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 9 for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:06:25,590] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 9 from controller 1 epoch 8 for the become-leader transition for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 (state.change.logger)
[2021-06-10 11:06:25,595] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 10 (state.change.logger)
[2021-06-10 11:06:25,600] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 10 (state.change.logger)
[2021-06-10 11:06:25,944] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.mirrormakeragent-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 11 (state.change.logger)
[2021-06-10 11:06:26,238] INFO [GroupCoordinator 0]: Stabilized group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP generation 1 (__consumer_offsets-34) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:06:26,240] INFO [GroupCoordinator 0]: Assignment received from leader for group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:06:26,374] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:26,374] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:27,514] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:27,514] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:30,678] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:30,679] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:33,054] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:33,054] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:46,092] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:46,092] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:46,117] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:46,117] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:46,122] INFO [GroupCoordinator 0]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-1) (reason: Adding new member clamp-2b479eef-c5d7-494b-95de-2a3b6e0f1620 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:06:49,123] INFO [GroupCoordinator 0]: Stabilized group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:06:49,125] INFO [GroupCoordinator 0]: Assignment received from leader for group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:06:49,249] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:49,249] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:50,694] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:50,694] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:06:53,934] INFO Creating topic POLICY-NOTIFICATION with configuration {} and initial partition assignment Map(2 -> ArrayBuffer(1, 2, 0), 1 -> ArrayBuffer(0, 1, 2), 0 -> ArrayBuffer(2, 0, 1)) (kafka.zk.AdminZkClient)
[2021-06-10 11:06:53,966] INFO [KafkaApi-0] Auto creation of topic POLICY-NOTIFICATION with 3 partitions and replication factor 3 is successful (kafka.server.KafkaApis)
[2021-06-10 11:06:54,016] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 12 from controller 1 epoch 8 for partition POLICY-NOTIFICATION-0 (state.change.logger)
[2021-06-10 11:06:54,017] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 12 from controller 1 epoch 8 for partition POLICY-NOTIFICATION-1 (state.change.logger)
[2021-06-10 11:06:54,017] TRACE [Broker id=0] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 12 from controller 1 epoch 8 for partition POLICY-NOTIFICATION-2 (state.change.logger)
[2021-06-10 11:06:54,019] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 12 from controller 1 epoch 8 starting the become-leader transition for partition POLICY-NOTIFICATION-1 (state.change.logger)
[2021-06-10 11:06:54,019] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(POLICY-NOTIFICATION-1) (kafka.server.ReplicaFetcherManager)
[2021-06-10 11:06:54,042] INFO [Log partition=POLICY-NOTIFICATION-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:06:54,045] INFO [Log partition=POLICY-NOTIFICATION-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 22 ms (kafka.log.Log)
[2021-06-10 11:06:54,046] INFO Created log for partition POLICY-NOTIFICATION-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:06:54,050] INFO [Partition POLICY-NOTIFICATION-1 broker=0] No checkpointed highwatermark is found for partition POLICY-NOTIFICATION-1 (kafka.cluster.Partition)
[2021-06-10 11:06:54,050] INFO Replica loaded for partition POLICY-NOTIFICATION-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:54,050] INFO Replica loaded for partition POLICY-NOTIFICATION-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:54,051] INFO Replica loaded for partition POLICY-NOTIFICATION-1 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:54,051] INFO [Partition POLICY-NOTIFICATION-1 broker=0] POLICY-NOTIFICATION-1 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2021-06-10 11:06:54,055] TRACE [Broker id=0] Stopped fetchers as part of become-leader request from controller 1 epoch 8 with correlation id 12 for partition POLICY-NOTIFICATION-1 (last update controller epoch 8) (state.change.logger)
[2021-06-10 11:06:54,056] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 12 from controller 1 epoch 8 for the become-leader transition for partition POLICY-NOTIFICATION-1 (state.change.logger)
[2021-06-10 11:06:54,056] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 12 from controller 1 epoch 8 starting the become-follower transition for partition POLICY-NOTIFICATION-0 with leader 2 (state.change.logger)
[2021-06-10 11:06:54,059] TRACE [Broker id=0] Handling LeaderAndIsr request correlationId 12 from controller 1 epoch 8 starting the become-follower transition for partition POLICY-NOTIFICATION-2 with leader 1 (state.change.logger)
[2021-06-10 11:06:54,059] INFO Replica loaded for partition POLICY-NOTIFICATION-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:54,085] INFO [Log partition=POLICY-NOTIFICATION-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:06:54,087] INFO [Log partition=POLICY-NOTIFICATION-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 22 ms (kafka.log.Log)
[2021-06-10 11:06:54,088] INFO Created log for partition POLICY-NOTIFICATION-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:06:54,089] INFO [Partition POLICY-NOTIFICATION-0 broker=0] No checkpointed highwatermark is found for partition POLICY-NOTIFICATION-0 (kafka.cluster.Partition)
[2021-06-10 11:06:54,090] INFO Replica loaded for partition POLICY-NOTIFICATION-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:54,090] INFO Replica loaded for partition POLICY-NOTIFICATION-0 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:54,090] INFO Replica loaded for partition POLICY-NOTIFICATION-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:54,090] INFO Replica loaded for partition POLICY-NOTIFICATION-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:54,123] INFO [Log partition=POLICY-NOTIFICATION-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log)
[2021-06-10 11:06:54,127] INFO [Log partition=POLICY-NOTIFICATION-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 30 ms (kafka.log.Log)
[2021-06-10 11:06:54,128] INFO Created log for partition POLICY-NOTIFICATION-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager)
[2021-06-10 11:06:54,129] INFO [Partition POLICY-NOTIFICATION-2 broker=0] No checkpointed highwatermark is found for partition POLICY-NOTIFICATION-2 (kafka.cluster.Partition)
[2021-06-10 11:06:54,129] INFO Replica loaded for partition POLICY-NOTIFICATION-2 with initial high watermark 0 (kafka.cluster.Replica)
[2021-06-10 11:06:54,130] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(POLICY-NOTIFICATION-2, POLICY-NOTIFICATION-0) (kafka.server.ReplicaFetcherManager)
[2021-06-10 11:06:54,130] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 12 for partition POLICY-NOTIFICATION-2 with leader 1 (state.change.logger)
[2021-06-10 11:06:54,130] TRACE [Broker id=0] Stopped fetchers as part of become-follower request from controller 1 epoch 8 with correlation id 12 for partition POLICY-NOTIFICATION-0 with leader 2 (state.change.logger)
[2021-06-10 11:06:54,130] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition POLICY-NOTIFICATION-2 as part of become-follower request with correlation id 12 from controller 1 epoch 8 with leader 1 (state.change.logger)
[2021-06-10 11:06:54,130] TRACE [Broker id=0] Truncated logs and checkpointed recovery boundaries for partition POLICY-NOTIFICATION-0 as part of become-follower request with correlation id 12 from controller 1 epoch 8 with leader 2 (state.change.logger)
[2021-06-10 11:06:54,131] INFO [ReplicaFetcherManager on broker 0] Added fetcher to broker BrokerEndPoint(id=1, host=dev-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-NOTIFICATION-2 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-06-10 11:06:54,131] INFO [ReplicaFetcherManager on broker 0] Added fetcher to broker BrokerEndPoint(id=2, host=dev-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-NOTIFICATION-0 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager)
[2021-06-10 11:06:54,131] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 12 for partition POLICY-NOTIFICATION-2 with leader 1 (state.change.logger)
[2021-06-10 11:06:54,131] TRACE [Broker id=0] Started fetcher to new leader as part of become-follower request from controller 1 epoch 8 with correlation id 12 for partition POLICY-NOTIFICATION-0 with leader 2 (state.change.logger)
[2021-06-10 11:06:54,131] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 12 from controller 1 epoch 8 for the become-follower transition for partition POLICY-NOTIFICATION-0 with leader 2 (state.change.logger)
[2021-06-10 11:06:54,131] TRACE [Broker id=0] Completed LeaderAndIsr request correlationId 12 from controller 1 epoch 8 for the become-follower transition for partition POLICY-NOTIFICATION-2 with leader 1 (state.change.logger)
[2021-06-10 11:06:54,133] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition POLICY-NOTIFICATION-0 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 13 (state.change.logger)
[2021-06-10 11:06:54,133] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition POLICY-NOTIFICATION-1 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 13 (state.change.logger)
[2021-06-10 11:06:54,134] TRACE [Broker id=0] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition POLICY-NOTIFICATION-2 in response to UpdateMetadata request sent by controller 1 epoch 8 with correlation id 13 (state.change.logger)
[2021-06-10 11:06:54,348] INFO [ReplicaFetcher replicaId=0, leaderId=2, fetcherId=0] Truncating partition POLICY-NOTIFICATION-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:06:54,348] INFO [Log partition=POLICY-NOTIFICATION-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:06:54,370] INFO [ReplicaFetcher replicaId=0, leaderId=1, fetcherId=0] Truncating partition POLICY-NOTIFICATION-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread)
[2021-06-10 11:06:54,370] INFO [Log partition=POLICY-NOTIFICATION-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log)
[2021-06-10 11:07:39,244] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:07:39,245] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:09:21,369] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:09:21,369] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:09:24,151] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 5 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:09:28,268] WARN Client session timed out, have not heard from server in 4000ms for sessionid 0x200001c5b5f0003 (org.apache.zookeeper.ClientCnxn)
[2021-06-10 11:09:28,269] INFO Client session timed out, have not heard from server in 4000ms for sessionid 0x200001c5b5f0003, closing socket connection and attempting reconnect (org.apache.zookeeper.ClientCnxn)
[2021-06-10 11:09:29,349] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient)
[2021-06-10 11:09:29,350] INFO Opening socket connection to server dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.242.74.76:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn)
[2021-06-10 11:09:29,351] INFO Socket connection established to dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.242.74.76:2181, initiating session (org.apache.zookeeper.ClientCnxn)
[2021-06-10 11:09:29,364] INFO Session establishment complete on server dev-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local/10.242.74.76:2181, sessionid = 0x200001c5b5f0003, negotiated timeout = 6000 (org.apache.zookeeper.ClientCnxn)
[2021-06-10 11:13:18,251] INFO [GroupCoordinator 0]: Member policy-id-daba92db-b12f-4270-9351-4209b2a17a91 in group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:18,255] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 1 (__consumer_offsets-46) (reason: removing member policy-id-daba92db-b12f-4270-9351-4209b2a17a91 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:18,259] INFO [GroupCoordinator 0]: Group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 2 is now empty (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:18,449] INFO [GroupCoordinator 0]: Member ves-openapi-manager-256e2f84-016e-4e34-a50b-8d650ca95d28 in group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:18,449] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 1 (__consumer_offsets-19) (reason: removing member ves-openapi-manager-256e2f84-016e-4e34-a50b-8d650ca95d28 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:18,450] INFO [GroupCoordinator 0]: Group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 2 is now empty (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:18,707] INFO [GroupCoordinator 0]: Member sdc-COpenSource-Env11-sdnc-dockero-1c47b7e8-4c2f-4017-9b49-8cc5e67e0519 in group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:18,707] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 1 (__consumer_offsets-28) (reason: removing member sdc-COpenSource-Env11-sdnc-dockero-1c47b7e8-4c2f-4017-9b49-8cc5e67e0519 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:18,708] INFO [GroupCoordinator 0]: Group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 2 is now empty (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:18,771] INFO [GroupCoordinator 0]: Member dev-policy-drools-pdp-0-7aa37f67-6635-4e7e-a4b0-3e16cf372403 in group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:18,771] INFO [GroupCoordinator 0]: Preparing to rebalance group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-34) (reason: removing member dev-policy-drools-pdp-0-7aa37f67-6635-4e7e-a4b0-3e16cf372403 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:18,771] INFO [GroupCoordinator 0]: Group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-34) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:18,811] INFO [GroupCoordinator 0]: Member clamp-2b479eef-c5d7-494b-95de-2a3b6e0f1620 in group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:18,812] INFO [GroupCoordinator 0]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 1 (__consumer_offsets-1) (reason: removing member clamp-2b479eef-c5d7-494b-95de-2a3b6e0f1620 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:18,812] INFO [GroupCoordinator 0]: Group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 2 is now empty (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:23,416] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:23,416] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:24,032] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:24,033] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:24,057] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:24,057] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:24,466] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:24,466] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:24,995] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:24,995] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:25,552] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:25,552] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:25,578] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:25,578] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:25,584] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 2 (__consumer_offsets-28) (reason: Adding new member sdc-COpenSource-Env11-sdnc-dockero-155a513b-cd18-428a-bb22-de23c3556ec2 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:26,936] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:26,936] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:26,943] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 2 (__consumer_offsets-19) (reason: Adding new member ves-openapi-manager-9bdea114-e786-4574-b9ac-2a167a0aba30 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:27,362] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:27,363] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:28,173] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:28,173] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:28,585] INFO [GroupCoordinator 0]: Stabilized group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO generation 3 (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:28,591] INFO [GroupCoordinator 0]: Assignment received from leader for group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:28,755] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:28,755] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:29,645] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:29,645] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:29,944] INFO [GroupCoordinator 0]: Stabilized group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO generation 3 (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:29,949] INFO [GroupCoordinator 0]: Assignment received from leader for group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:30,087] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:30,088] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:30,527] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:30,527] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:32,552] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:32,552] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:32,568] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:32,568] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:32,572] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 2 (__consumer_offsets-46) (reason: Adding new member policy-id-669b4c65-bcf1-4344-b25b-d4e03378c07b with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:34,259] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:34,259] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:34,284] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:34,284] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:34,296] INFO [GroupCoordinator 0]: Preparing to rebalance group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP in state PreparingRebalance with old generation 2 (__consumer_offsets-34) (reason: Adding new member dev-policy-drools-pdp-0-c7e0bf67-b7f4-40c8-8ef0-0665e97c5e18 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:35,274] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:35,274] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:35,573] INFO [GroupCoordinator 0]: Stabilized group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 3 (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:35,577] INFO [GroupCoordinator 0]: Assignment received from leader for group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:35,934] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:35,934] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:36,908] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:36,908] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:37,039] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:37,039] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:37,304] INFO [GroupCoordinator 0]: Stabilized group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP generation 3 (__consumer_offsets-34) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:37,306] INFO [GroupCoordinator 0]: Assignment received from leader for group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:37,551] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:37,551] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:46,099] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:46,100] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:46,286] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:46,286] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:46,293] INFO [GroupCoordinator 0]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 2 (__consumer_offsets-1) (reason: Adding new member clamp-503a3ff5-0757-466c-a374-993688265cf4 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:49,294] INFO [GroupCoordinator 0]: Stabilized group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO generation 3 (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:49,296] INFO [GroupCoordinator 0]: Assignment received from leader for group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:13:50,612] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:50,613] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:51,089] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:13:51,089] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:19:24,146] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:20:17,428] INFO [GroupCoordinator 0]: Member clamp-503a3ff5-0757-466c-a374-993688265cf4 in group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:20:17,428] INFO [GroupCoordinator 0]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 3 (__consumer_offsets-1) (reason: removing member clamp-503a3ff5-0757-466c-a374-993688265cf4 on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:20:17,429] INFO [GroupCoordinator 0]: Group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 4 is now empty (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:22:46,908] INFO [GroupCoordinator 0]: Member ves-openapi-manager-9bdea114-e786-4574-b9ac-2a167a0aba30 in group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:22:46,909] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 3 (__consumer_offsets-19) (reason: removing member ves-openapi-manager-9bdea114-e786-4574-b9ac-2a167a0aba30 on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:22:46,909] INFO [GroupCoordinator 0]: Group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 4 is now empty (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:22:46,914] INFO [GroupCoordinator 0]: Member policy-id-669b4c65-bcf1-4344-b25b-d4e03378c07b in group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:22:46,914] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 3 (__consumer_offsets-46) (reason: removing member policy-id-669b4c65-bcf1-4344-b25b-d4e03378c07b on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:22:46,915] INFO [GroupCoordinator 0]: Group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 4 is now empty (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:22:46,915] INFO [GroupCoordinator 0]: Member sdc-COpenSource-Env11-sdnc-dockero-155a513b-cd18-428a-bb22-de23c3556ec2 in group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:22:46,915] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 3 (__consumer_offsets-28) (reason: removing member sdc-COpenSource-Env11-sdnc-dockero-155a513b-cd18-428a-bb22-de23c3556ec2 on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:22:46,915] INFO [GroupCoordinator 0]: Group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 4 is now empty (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:22:46,916] INFO [GroupCoordinator 0]: Member dev-policy-drools-pdp-0-c7e0bf67-b7f4-40c8-8ef0-0665e97c5e18 in group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:22:46,916] INFO [GroupCoordinator 0]: Preparing to rebalance group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP in state PreparingRebalance with old generation 3 (__consumer_offsets-34) (reason: removing member dev-policy-drools-pdp-0-c7e0bf67-b7f4-40c8-8ef0-0665e97c5e18 on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:22:46,916] INFO [GroupCoordinator 0]: Group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP with generation 4 is now empty (__consumer_offsets-34) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:25:22,205] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:25:22,205] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:25:22,396] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:25:22,396] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:25:32,008] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:25:32,008] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:25:32,983] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:25:32,983] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:25:34,616] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:25:34,616] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:25:34,778] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:25:34,778] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:25:40,608] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:25:40,608] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:26:47,474] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:26:47,475] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:26:48,336] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:26:48,336] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:26:48,338] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:26:48,338] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:26:48,340] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:26:48,340] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:27:02,387] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:27:02,387] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:27:02,389] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:27:02,389] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:27:02,403] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 4 (__consumer_offsets-19) (reason: Adding new member ves-openapi-manager-591fac4e-f449-4b97-bc90-d9a4c5a8bd1d with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:27:02,424] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:27:02,424] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:27:02,437] INFO [GroupCoordinator 0]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 4 (__consumer_offsets-1) (reason: Adding new member clamp-5913de6e-c329-4b39-8063-51825bb8c8d9 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:27:05,405] INFO [GroupCoordinator 0]: Stabilized group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO generation 5 (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:27:05,438] INFO [GroupCoordinator 0]: Stabilized group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO generation 5 (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:27:05,459] INFO [GroupCoordinator 0]: Assignment received from leader for group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 5 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:27:06,094] INFO [GroupCoordinator 0]: Assignment received from leader for group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 5 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:27:37,263] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:27:37,264] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:27:37,354] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 4 (__consumer_offsets-46) (reason: Adding new member policy-id-f97999f7-4932-447e-a133-a5af744440cb with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:27:37,950] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:27:37,950] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:27:37,955] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 4 (__consumer_offsets-28) (reason: Adding new member sdc-COpenSource-Env11-sdnc-dockero-29a80782-0b39-43c0-bc3e-73d349d4bce9 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:27:40,356] INFO [GroupCoordinator 0]: Stabilized group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 5 (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:27:40,360] INFO [GroupCoordinator 0]: Assignment received from leader for group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 5 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:27:40,955] INFO [GroupCoordinator 0]: Stabilized group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO generation 5 (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:29:17,321] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:29:17,322] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:29:17,391] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:29:17,392] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:29:24,151] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 6 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:29:55,374] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:29:55,375] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:29:55,378] WARN Attempting to send response via channel for which there is no open connection, connection id 10.242.150.220:9092-10.242.153.216:35048-75 (kafka.network.Processor)
[2021-06-10 11:30:22,617] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:22,617] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:22,865] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:22,865] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:23,003] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:23,004] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:29,189] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:29,189] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:42,470] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:42,470] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:43,586] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:43,587] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:55,941] INFO [GroupCoordinator 0]: Member clamp-5913de6e-c329-4b39-8063-51825bb8c8d9 in group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:30:55,942] INFO [GroupCoordinator 0]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 5 (__consumer_offsets-1) (reason: removing member clamp-5913de6e-c329-4b39-8063-51825bb8c8d9 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:30:55,942] INFO [GroupCoordinator 0]: Group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 6 is now empty (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:30:56,203] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:56,203] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:56,649] INFO [GroupCoordinator 0]: Member ves-openapi-manager-591fac4e-f449-4b97-bc90-d9a4c5a8bd1d in group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:30:56,650] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 5 (__consumer_offsets-19) (reason: removing member ves-openapi-manager-591fac4e-f449-4b97-bc90-d9a4c5a8bd1d on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:30:56,650] INFO [GroupCoordinator 0]: Group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 6 is now empty (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:30:56,705] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:56,705] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:56,751] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:56,751] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:56,883] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:30:56,883] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:31:40,956] INFO [GroupCoordinator 0]: Member sdc-COpenSource-Env11-sdnc-dockero-29a80782-0b39-43c0-bc3e-73d349d4bce9 in group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:31:40,961] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 5 (__consumer_offsets-28) (reason: removing member sdc-COpenSource-Env11-sdnc-dockero-29a80782-0b39-43c0-bc3e-73d349d4bce9 on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:31:40,962] INFO [GroupCoordinator 0]: Group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 6 is now empty (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:31:42,337] INFO [GroupCoordinator 0]: Member policy-id-f97999f7-4932-447e-a133-a5af744440cb in group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:31:42,337] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 5 (__consumer_offsets-46) (reason: removing member policy-id-f97999f7-4932-447e-a133-a5af744440cb on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:31:42,337] INFO [GroupCoordinator 0]: Group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 6 is now empty (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:01,649] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:01,649] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,372] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,372] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,385] INFO [GroupCoordinator 0]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 6 (__consumer_offsets-1) (reason: Adding new member clamp-221534db-ecc4-4c2e-9159-239e6940188d with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:02,396] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,396] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,417] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,417] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,417] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,417] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,418] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,418] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,426] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,426] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,432] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,433] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,440] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 6 (__consumer_offsets-19) (reason: Adding new member ves-openapi-manager-a157daeb-512f-48c9-8d1b-5d15739428aa with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:02,466] INFO [GroupCoordinator 0]: Preparing to rebalance group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-7) (reason: Adding new member dev-policy-pap-55f4d54b74-wpzkv-7b662af2-bb9c-45f4-bea9-a003ec36ed22 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:02,505] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,505] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,507] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,507] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,525] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,525] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:02,526] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 6 (__consumer_offsets-28) (reason: Adding new member sdc-COpenSource-Env11-sdnc-dockero-a666004e-dce3-496c-b583-e61224711eac with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:02,527] INFO [GroupCoordinator 0]: Preparing to rebalance group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP in state PreparingRebalance with old generation 4 (__consumer_offsets-34) (reason: Adding new member dev-policy-drools-pdp-0-8379cb4a-8a64-4404-bed8-6d7cc4ad32db with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:02,552] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 6 (__consumer_offsets-46) (reason: Adding new member policy-id-64369864-1fbe-42d0-a4bb-3bb86a758a86 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:03,528] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:03,528] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:05,387] INFO [GroupCoordinator 0]: Stabilized group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO generation 7 (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:05,395] INFO [GroupCoordinator 0]: Assignment received from leader for group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 7 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:05,442] INFO [GroupCoordinator 0]: Stabilized group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO generation 7 (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:05,467] INFO [GroupCoordinator 0]: Stabilized group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP generation 1 (__consumer_offsets-7) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:05,527] INFO [GroupCoordinator 0]: Stabilized group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO generation 7 (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:05,528] INFO [GroupCoordinator 0]: Stabilized group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP generation 5 (__consumer_offsets-34) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:05,555] INFO [GroupCoordinator 0]: Stabilized group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 7 (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:05,991] INFO [GroupCoordinator 0]: Assignment received from leader for group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 7 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:06,943] INFO [GroupCoordinator 0]: Assignment received from leader for group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 7 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:06,943] INFO [GroupCoordinator 0]: Assignment received from leader for group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP for generation 5 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:06,944] INFO [GroupCoordinator 0]: Assignment received from leader for group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 7 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:06,944] INFO [GroupCoordinator 0]: Assignment received from leader for group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:32:08,824] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:08,824] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:08,986] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:08,987] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:09,001] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:09,001] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:09,051] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:09,051] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:09,064] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:09,064] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:09,064] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:09,064] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:09,790] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:09,790] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:09,801] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:09,801] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:10,168] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:10,168] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:10,229] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:10,229] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:10,229] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:10,229] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:10,253] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:10,253] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:10,301] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:10,302] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:10,343] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:10,344] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:13,767] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:13,767] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:55,665] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:55,665] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:56,897] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:56,897] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:59,241] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:32:59,241] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:33:01,340] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:33:01,340] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:35:37,691] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:35:37,692] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:35:57,313] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:35:57,314] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:36:40,737] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:36:40,737] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:38:23,785] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:38:23,785] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:39:24,146] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:45:40,485] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:45:40,486] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:45:41,470] INFO [GroupCoordinator 0]: Member policy-id-64369864-1fbe-42d0-a4bb-3bb86a758a86 in group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:41,472] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 7 (__consumer_offsets-46) (reason: removing member policy-id-64369864-1fbe-42d0-a4bb-3bb86a758a86 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:41,472] INFO [GroupCoordinator 0]: Group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 8 is now empty (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:49,498] INFO [GroupCoordinator 0]: Member ves-openapi-manager-a157daeb-512f-48c9-8d1b-5d15739428aa in group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:49,498] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 7 (__consumer_offsets-19) (reason: removing member ves-openapi-manager-a157daeb-512f-48c9-8d1b-5d15739428aa on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:49,498] INFO [GroupCoordinator 0]: Group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 8 is now empty (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:58,078] INFO [GroupCoordinator 0]: Member sdc-COpenSource-Env11-sdnc-dockero-a666004e-dce3-496c-b583-e61224711eac in group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:58,078] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 7 (__consumer_offsets-28) (reason: removing member sdc-COpenSource-Env11-sdnc-dockero-a666004e-dce3-496c-b583-e61224711eac on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:58,078] INFO [GroupCoordinator 0]: Group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 8 is now empty (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:58,949] INFO [GroupCoordinator 0]: Member dev-policy-pap-55f4d54b74-wpzkv-7b662af2-bb9c-45f4-bea9-a003ec36ed22 in group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:58,949] INFO [GroupCoordinator 0]: Preparing to rebalance group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-7) (reason: removing member dev-policy-pap-55f4d54b74-wpzkv-7b662af2-bb9c-45f4-bea9-a003ec36ed22 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:58,949] INFO [GroupCoordinator 0]: Group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-7) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:59,020] INFO [GroupCoordinator 0]: Member dev-policy-drools-pdp-0-8379cb4a-8a64-4404-bed8-6d7cc4ad32db in group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:59,021] INFO [GroupCoordinator 0]: Preparing to rebalance group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP in state PreparingRebalance with old generation 5 (__consumer_offsets-34) (reason: removing member dev-policy-drools-pdp-0-8379cb4a-8a64-4404-bed8-6d7cc4ad32db on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:59,021] INFO [GroupCoordinator 0]: Group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP with generation 6 is now empty (__consumer_offsets-34) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:59,063] INFO [GroupCoordinator 0]: Member clamp-221534db-ecc4-4c2e-9159-239e6940188d in group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:59,063] INFO [GroupCoordinator 0]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 7 (__consumer_offsets-1) (reason: removing member clamp-221534db-ecc4-4c2e-9159-239e6940188d on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:45:59,063] INFO [GroupCoordinator 0]: Group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 8 is now empty (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:46:51,740] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:46:51,740] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:06,608] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:06,608] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:06,628] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:06,628] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:07,601] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:07,602] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:07,717] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:07,717] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:07,729] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 8 (__consumer_offsets-19) (reason: Adding new member ves-openapi-manager-5a405cc6-880e-4684-8932-d23ad30f79f5 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:07,755] INFO [GroupCoordinator 0]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 8 (__consumer_offsets-1) (reason: Adding new member clamp-c593fad9-1a5b-46bc-b623-0f7a5e1dc22b with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:10,730] INFO [GroupCoordinator 0]: Stabilized group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO generation 9 (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:10,738] INFO [GroupCoordinator 0]: Assignment received from leader for group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 9 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:10,756] INFO [GroupCoordinator 0]: Stabilized group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO generation 9 (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:10,760] INFO [GroupCoordinator 0]: Assignment received from leader for group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 9 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:12,483] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:12,484] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:12,512] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:12,512] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:12,550] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:12,550] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:13,236] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:13,237] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:13,274] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 8 (__consumer_offsets-46) (reason: Adding new member policy-id-dbf77633-dc03-42a7-947c-8f8c48eee626 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:16,276] INFO [GroupCoordinator 0]: Stabilized group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 9 (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:20,022] INFO [GroupCoordinator 0]: Assignment received from leader for group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 9 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:22,423] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:22,423] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:24,252] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:24,252] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:24,308] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:24,308] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:25,020] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:25,020] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:25,083] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:25,083] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:25,088] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:25,088] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:25,090] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:25,091] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:25,090] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:25,091] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:25,101] INFO [GroupCoordinator 0]: Preparing to rebalance group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP in state PreparingRebalance with old generation 2 (__consumer_offsets-7) (reason: Adding new member dev-policy-pap-55f4d54b74-wpzkv-e192fe0c-694f-4c38-9eef-dddb787e5a5a with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:25,105] INFO [GroupCoordinator 0]: Preparing to rebalance group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP in state PreparingRebalance with old generation 6 (__consumer_offsets-34) (reason: Adding new member dev-policy-drools-pdp-0-ca187f30-f891-410d-8dd6-f35496e26a08 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:25,768] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:25,768] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:25,816] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 8 (__consumer_offsets-28) (reason: Adding new member sdc-COpenSource-Env11-sdnc-dockero-b30566d8-d05b-40ad-b83c-9388361fe366 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:27,894] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:27,895] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:28,104] INFO [GroupCoordinator 0]: Stabilized group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP generation 3 (__consumer_offsets-7) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:28,105] INFO [GroupCoordinator 0]: Stabilized group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP generation 7 (__consumer_offsets-34) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:28,112] INFO [GroupCoordinator 0]: Assignment received from leader for group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP for generation 3 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:28,642] INFO [GroupCoordinator 0]: Assignment received from leader for group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP for generation 7 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:28,658] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:28,658] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:28,826] INFO [GroupCoordinator 0]: Stabilized group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO generation 9 (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:28,828] INFO [GroupCoordinator 0]: Assignment received from leader for group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 9 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:47:29,431] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:29,431] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:29,487] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:29,487] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:29,952] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:29,952] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:30,216] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:30,216] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:30,430] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:30,430] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:30,683] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:30,683] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:30,716] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:30,716] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:47,332] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:47:47,332] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:49:24,146] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 11:49:47,892] INFO [GroupCoordinator 0]: Member sdc-COpenSource-Env11-sdnc-dockero-b30566d8-d05b-40ad-b83c-9388361fe366 in group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:49:47,893] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 9 (__consumer_offsets-28) (reason: removing member sdc-COpenSource-Env11-sdnc-dockero-b30566d8-d05b-40ad-b83c-9388361fe366 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:49:47,893] INFO [GroupCoordinator 0]: Group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 10 is now empty (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:25,405] INFO [GroupCoordinator 0]: Member policy-id-dbf77633-dc03-42a7-947c-8f8c48eee626 in group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:25,405] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 9 (__consumer_offsets-46) (reason: removing member policy-id-dbf77633-dc03-42a7-947c-8f8c48eee626 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:25,406] INFO [GroupCoordinator 0]: Group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 10 is now empty (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:26,736] INFO [GroupCoordinator 0]: Member ves-openapi-manager-5a405cc6-880e-4684-8932-d23ad30f79f5 in group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:26,736] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 9 (__consumer_offsets-19) (reason: removing member ves-openapi-manager-5a405cc6-880e-4684-8932-d23ad30f79f5 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:26,737] INFO [GroupCoordinator 0]: Group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 10 is now empty (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:27,800] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:50:27,800] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:50:30,622] INFO [GroupCoordinator 0]: Member dev-policy-drools-pdp-0-ca187f30-f891-410d-8dd6-f35496e26a08 in group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:30,623] INFO [GroupCoordinator 0]: Preparing to rebalance group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP in state PreparingRebalance with old generation 7 (__consumer_offsets-34) (reason: removing member dev-policy-drools-pdp-0-ca187f30-f891-410d-8dd6-f35496e26a08 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:30,623] INFO [GroupCoordinator 0]: Group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP with generation 8 is now empty (__consumer_offsets-34) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:30,723] INFO [GroupCoordinator 0]: Member dev-policy-pap-55f4d54b74-wpzkv-e192fe0c-694f-4c38-9eef-dddb787e5a5a in group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:30,723] INFO [GroupCoordinator 0]: Preparing to rebalance group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP in state PreparingRebalance with old generation 3 (__consumer_offsets-7) (reason: removing member dev-policy-pap-55f4d54b74-wpzkv-e192fe0c-694f-4c38-9eef-dddb787e5a5a on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:30,723] INFO [GroupCoordinator 0]: Group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP with generation 4 is now empty (__consumer_offsets-7) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:30,933] INFO [GroupCoordinator 0]: Member clamp-c593fad9-1a5b-46bc-b623-0f7a5e1dc22b in group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:30,933] INFO [GroupCoordinator 0]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 9 (__consumer_offsets-1) (reason: removing member clamp-c593fad9-1a5b-46bc-b623-0f7a5e1dc22b on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:30,934] INFO [GroupCoordinator 0]: Group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 10 is now empty (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:33,084] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:50:33,084] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:50:33,088] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 10 (__consumer_offsets-19) (reason: Adding new member ves-openapi-manager-724daa98-3229-4cfe-8b89-53b70b373e2c with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:33,117] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:50:33,117] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:50:33,122] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 10 (__consumer_offsets-46) (reason: Adding new member policy-id-bcc5120c-4a3c-41fd-a8c0-3aa6956aae01 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:36,089] INFO [GroupCoordinator 0]: Stabilized group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO generation 11 (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:36,123] INFO [GroupCoordinator 0]: Stabilized group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 11 (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:45,734] INFO [GroupCoordinator 0]: Assignment received from leader for group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 11 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:45,734] INFO [GroupCoordinator 0]: Assignment received from leader for group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 11 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:49,842] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:50:49,842] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:50:49,844] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:50:49,844] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:50:49,902] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:50:49,902] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:50:50,008] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:50:50,008] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:50:50,724] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:50:50,724] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:50:50,753] INFO [GroupCoordinator 0]: Preparing to rebalance group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP in state PreparingRebalance with old generation 8 (__consumer_offsets-34) (reason: Adding new member dev-policy-drools-pdp-0-e09c1f13-34a9-47ed-9a1d-39759de4bb0a with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:53,755] INFO [GroupCoordinator 0]: Stabilized group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP generation 9 (__consumer_offsets-34) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:50:57,836] INFO [GroupCoordinator 0]: Assignment received from leader for group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP for generation 9 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:51:49,449] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:51:49,449] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:51:49,460] INFO [GroupCoordinator 0]: Preparing to rebalance group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP in state PreparingRebalance with old generation 4 (__consumer_offsets-7) (reason: Adding new member dev-policy-pap-55f4d54b74-wpzkv-eb1b0a8f-7f2b-4617-bcd5-822a706148b6 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:51:52,460] INFO [GroupCoordinator 0]: Stabilized group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP generation 5 (__consumer_offsets-7) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:52:30,098] INFO [GroupCoordinator 0]: Assignment received from leader for group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP for generation 5 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:53:49,481] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:53:49,481] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:53:50,047] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:53:50,047] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:53:53,374] INFO [GroupCoordinator 0]: Member policy-id-bcc5120c-4a3c-41fd-a8c0-3aa6956aae01 in group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:53:53,374] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 11 (__consumer_offsets-46) (reason: removing member policy-id-bcc5120c-4a3c-41fd-a8c0-3aa6956aae01 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:53:53,375] INFO [GroupCoordinator 0]: Group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 12 is now empty (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:53:53,424] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:53:53,424] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:53:55,705] INFO [GroupCoordinator 0]: Member ves-openapi-manager-724daa98-3229-4cfe-8b89-53b70b373e2c in group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:53:55,705] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 11 (__consumer_offsets-19) (reason: removing member ves-openapi-manager-724daa98-3229-4cfe-8b89-53b70b373e2c on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:53:55,705] INFO [GroupCoordinator 0]: Group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 12 is now empty (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:53:57,776] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:53:57,776] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:53:59,526] INFO [GroupCoordinator 0]: Member dev-policy-pap-55f4d54b74-wpzkv-eb1b0a8f-7f2b-4617-bcd5-822a706148b6 in group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:53:59,526] INFO [GroupCoordinator 0]: Preparing to rebalance group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP in state PreparingRebalance with old generation 5 (__consumer_offsets-7) (reason: removing member dev-policy-pap-55f4d54b74-wpzkv-eb1b0a8f-7f2b-4617-bcd5-822a706148b6 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:53:59,526] INFO [GroupCoordinator 0]: Group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP with generation 6 is now empty (__consumer_offsets-7) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:54:46,338] INFO [GroupCoordinator 0]: Member dev-policy-drools-pdp-0-e09c1f13-34a9-47ed-9a1d-39759de4bb0a in group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:54:46,339] INFO [GroupCoordinator 0]: Preparing to rebalance group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP in state PreparingRebalance with old generation 9 (__consumer_offsets-34) (reason: removing member dev-policy-drools-pdp-0-e09c1f13-34a9-47ed-9a1d-39759de4bb0a on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:54:46,339] INFO [GroupCoordinator 0]: Group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP with generation 10 is now empty (__consumer_offsets-34) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:54:57,402] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:54:57,402] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:55:12,401] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:55:12,402] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:55:12,554] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:55:12,554] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:55:24,148] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:55:24,148] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:55:24,161] INFO [GroupCoordinator 0]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 10 (__consumer_offsets-1) (reason: Adding new member clamp-b16136d8-b119-4437-aee2-78f2ae62d58e with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:55:27,163] INFO [GroupCoordinator 0]: Stabilized group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO generation 11 (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:55:27,165] INFO [GroupCoordinator 0]: Assignment received from leader for group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 11 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:55:46,649] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:55:46,649] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:07,624] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:07,624] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:20,492] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:20,492] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:20,499] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 12 (__consumer_offsets-46) (reason: Adding new member policy-id-68757ab7-934c-4d33-89a6-015377c04e30 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:56:21,544] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:21,544] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:21,544] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:21,544] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:21,547] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:21,547] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:21,549] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:21,549] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:21,550] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:21,550] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:21,602] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:21,602] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:21,605] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:21,605] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:21,740] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:21,740] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:21,760] INFO [GroupCoordinator 0]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 11 (__consumer_offsets-1) (reason: Adding new member clamp-3ad78cc7-9aac-4e77-95e7-0615d6f47f13 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:56:22,008] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:22,008] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:22,014] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 10 (__consumer_offsets-28) (reason: Adding new member sdc-COpenSource-Env11-sdnc-dockero-fe3f2f6d-66af-4ecc-96c9-18fef409a238 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:56:22,089] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:22,089] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:22,112] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:22,112] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:22,123] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 12 (__consumer_offsets-19) (reason: Adding new member ves-openapi-manager-b0983bdc-d36c-4324-9af5-ba28f351a01c with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:56:22,128] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:22,128] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:22,132] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:22,132] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:56:22,136] INFO [GroupCoordinator 0]: Preparing to rebalance group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP in state PreparingRebalance with old generation 6 (__consumer_offsets-7) (reason: Adding new member dev-policy-pap-55f4d54b74-wpzkv-121f0b38-d43d-46a9-9eea-a2212be4005b with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:56:23,508] INFO [GroupCoordinator 0]: Stabilized group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 13 (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:56:24,946] INFO [GroupCoordinator 0]: Assignment received from leader for group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 13 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:56:25,015] INFO [GroupCoordinator 0]: Stabilized group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO generation 11 (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:56:25,020] INFO [GroupCoordinator 0]: Assignment received from leader for group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 11 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:56:25,123] INFO [GroupCoordinator 0]: Stabilized group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO generation 13 (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:56:25,129] INFO [GroupCoordinator 0]: Assignment received from leader for group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 13 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:56:25,137] INFO [GroupCoordinator 0]: Stabilized group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP generation 7 (__consumer_offsets-7) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:56:25,140] INFO [GroupCoordinator 0]: Assignment received from leader for group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP for generation 7 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:57:05,027] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:05,027] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:05,036] INFO [GroupCoordinator 0]: Preparing to rebalance group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP in state PreparingRebalance with old generation 10 (__consumer_offsets-34) (reason: Adding new member dev-policy-drools-pdp-0-14eeabbc-19b3-4dc2-9ed3-627eccd95803 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:57:08,036] INFO [GroupCoordinator 0]: Stabilized group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP generation 11 (__consumer_offsets-34) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:57:27,491] INFO [GroupCoordinator 0]: Assignment received from leader for group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP for generation 11 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 11:57:31,255] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:31,256] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:31,478] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:31,479] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:36,487] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:36,487] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:37,684] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:37,684] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:37,762] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:37,763] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:37,764] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:37,764] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:37,776] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:37,776] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:37,855] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:57:37,856] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 11:59:24,146] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 12:01:01,506] INFO [GroupCoordinator 0]: Member clamp-b16136d8-b119-4437-aee2-78f2ae62d58e in group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:01:01,512] INFO [GroupCoordinator 0]: Stabilized group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO generation 12 (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:01:25,057] INFO [GroupCoordinator 0]: Member sdc-COpenSource-Env11-sdnc-dockero-fe3f2f6d-66af-4ecc-96c9-18fef409a238 in group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:01:25,057] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 11 (__consumer_offsets-28) (reason: removing member sdc-COpenSource-Env11-sdnc-dockero-fe3f2f6d-66af-4ecc-96c9-18fef409a238 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:01:25,058] INFO [GroupCoordinator 0]: Group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 12 is now empty (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:01:25,058] INFO [GroupCoordinator 0]: Member policy-id-68757ab7-934c-4d33-89a6-015377c04e30 in group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:01:25,058] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 13 (__consumer_offsets-46) (reason: removing member policy-id-68757ab7-934c-4d33-89a6-015377c04e30 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:01:25,058] INFO [GroupCoordinator 0]: Group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 14 is now empty (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:01:25,181] INFO [GroupCoordinator 0]: Member ves-openapi-manager-b0983bdc-d36c-4324-9af5-ba28f351a01c in group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:01:25,181] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 13 (__consumer_offsets-19) (reason: removing member ves-openapi-manager-b0983bdc-d36c-4324-9af5-ba28f351a01c on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:01:25,181] INFO [GroupCoordinator 0]: Group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 14 is now empty (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:01:33,048] INFO [GroupCoordinator 0]: Assignment received from leader for group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 12 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:01:33,058] WARN Attempting to send response via channel for which there is no open connection, connection id 10.242.150.220:9092-10.242.153.216:49940-172 (kafka.network.Processor)
[2021-06-10 12:01:44,947] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:01:44,948] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:01:45,023] INFO [GroupCoordinator 0]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 12 (__consumer_offsets-1) (reason: Updating metadata for member clamp-3ad78cc7-9aac-4e77-95e7-0615d6f47f13) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:01:45,024] INFO [GroupCoordinator 0]: Stabilized group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO generation 13 (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:01:45,025] INFO [GroupCoordinator 0]: Assignment received from leader for group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 13 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:01:57,214] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:01:57,214] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:01:58,321] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:01:58,321] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:01:58,322] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:01:58,322] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:01:59,736] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 12 (__consumer_offsets-28) (reason: Adding new member sdc-COpenSource-Env11-sdnc-dockero-44b712f5-08ec-44d5-aeb1-6e8a7b369aec with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:02,738] INFO [GroupCoordinator 0]: Stabilized group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO generation 13 (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:07,148] INFO [GroupCoordinator 0]: Assignment received from leader for group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 13 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:09,666] INFO [GroupCoordinator 0]: Member sdc-COpenSource-Env11-sdnc-dockero-44b712f5-08ec-44d5-aeb1-6e8a7b369aec in group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:09,666] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 13 (__consumer_offsets-28) (reason: removing member sdc-COpenSource-Env11-sdnc-dockero-44b712f5-08ec-44d5-aeb1-6e8a7b369aec on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:09,666] INFO [GroupCoordinator 0]: Group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 14 is now empty (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:20,273] INFO [GroupCoordinator 0]: Member dev-policy-drools-pdp-0-14eeabbc-19b3-4dc2-9ed3-627eccd95803 in group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:20,273] INFO [GroupCoordinator 0]: Preparing to rebalance group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP in state PreparingRebalance with old generation 11 (__consumer_offsets-34) (reason: removing member dev-policy-drools-pdp-0-14eeabbc-19b3-4dc2-9ed3-627eccd95803 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:20,273] INFO [GroupCoordinator 0]: Group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP with generation 12 is now empty (__consumer_offsets-34) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:27,765] INFO [GroupCoordinator 0]: Member clamp-3ad78cc7-9aac-4e77-95e7-0615d6f47f13 in group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:27,765] INFO [GroupCoordinator 0]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 13 (__consumer_offsets-1) (reason: removing member clamp-3ad78cc7-9aac-4e77-95e7-0615d6f47f13 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:27,765] INFO [GroupCoordinator 0]: Group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 14 is now empty (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:48,269] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:02:48,270] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:02:48,289] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 14 (__consumer_offsets-19) (reason: Adding new member ves-openapi-manager-0f20bb04-b584-4333-89eb-7f0296b6e6a9 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:48,318] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:02:48,318] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:02:48,329] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 14 (__consumer_offsets-28) (reason: Adding new member sdc-COpenSource-Env11-sdnc-dockero-25f2fc92-5023-45f6-ba3d-f1ac45e4a137 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:51,289] INFO [GroupCoordinator 0]: Stabilized group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO generation 15 (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:51,331] INFO [GroupCoordinator 0]: Stabilized group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO generation 15 (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:51,631] INFO [GroupCoordinator 0]: Assignment received from leader for group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 15 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:51,632] INFO [GroupCoordinator 0]: Assignment received from leader for group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 15 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:02:57,480] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:02:57,481] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:02:57,496] INFO [GroupCoordinator 0]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 14 (__consumer_offsets-1) (reason: Adding new member clamp-ba01608a-06ea-4bff-a1cc-f07dd0c41b19 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:03:00,500] INFO [GroupCoordinator 0]: Stabilized group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO generation 15 (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:03:04,863] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:04,863] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:05,694] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:05,694] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:05,698] INFO [GroupCoordinator 0]: Preparing to rebalance group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP in state PreparingRebalance with old generation 12 (__consumer_offsets-34) (reason: Adding new member dev-policy-drools-pdp-0-37fd62b4-5404-4e9e-aee7-ae9cbb4759e0 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:03:06,473] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:06,473] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:06,702] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:06,703] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:06,729] INFO [GroupCoordinator 0]: Preparing to rebalance group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP in state PreparingRebalance with old generation 7 (__consumer_offsets-7) (reason: Adding new member dev-policy-pap-55f4d54b74-wpzkv-c6cb175d-f557-4f12-84ef-f70dae855282 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:03:06,768] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:06,768] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:07,129] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:07,129] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:08,038] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:08,038] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:08,640] INFO [GroupCoordinator 0]: Assignment received from leader for group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 15 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:03:08,700] INFO [GroupCoordinator 0]: Stabilized group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP generation 13 (__consumer_offsets-34) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:03:11,980] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:11,981] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:12,143] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 14 (__consumer_offsets-46) (reason: Adding new member policy-id-05ae3d55-dccb-4a76-ab26-7af3469810f2 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:03:15,144] INFO [GroupCoordinator 0]: Stabilized group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 15 (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:03:15,148] INFO [GroupCoordinator 0]: Assignment received from leader for group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 15 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:03:16,141] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:16,141] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:16,148] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:16,148] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:23,052] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:23,052] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:23,780] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:23,780] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:24,461] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:24,461] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:24,511] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:24,511] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:24,587] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:24,587] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:24,708] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:24,708] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:26,094] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:26,094] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:26,108] INFO [GroupCoordinator 0]: Assignment received from leader for group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP for generation 13 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:03:27,110] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:27,110] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:27,354] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:27,354] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:27,652] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:03:27,653] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:05:01,645] INFO [GroupCoordinator 0]: Member dev-policy-pap-55f4d54b74-wpzkv-121f0b38-d43d-46a9-9eea-a2212be4005b in group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:05:01,648] INFO [GroupCoordinator 0]: Stabilized group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP generation 8 (__consumer_offsets-7) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:07:27,085] INFO [GroupCoordinator 0]: Member policy-id-05ae3d55-dccb-4a76-ab26-7af3469810f2 in group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:07:27,086] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 15 (__consumer_offsets-46) (reason: removing member policy-id-05ae3d55-dccb-4a76-ab26-7af3469810f2 on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:07:27,086] INFO [GroupCoordinator 0]: Group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 16 is now empty (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:07:41,457] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:07:41,457] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:08:48,151] INFO [GroupCoordinator 0]: Member ves-openapi-manager-0f20bb04-b584-4333-89eb-7f0296b6e6a9 in group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:08:48,152] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 15 (__consumer_offsets-19) (reason: removing member ves-openapi-manager-0f20bb04-b584-4333-89eb-7f0296b6e6a9 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:08:48,152] INFO [GroupCoordinator 0]: Group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 16 is now empty (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:08:52,820] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:08:52,820] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:08:52,834] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 16 (__consumer_offsets-46) (reason: Adding new member policy-id-c8944091-35f9-478b-9cc5-f1a565fc3d73 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:08:53,537] WARN Attempting to send response via channel for which there is no open connection, connection id 10.242.150.220:9092-10.242.153.216:51536-199 (kafka.network.Processor)
[2021-06-10 12:08:55,835] INFO [GroupCoordinator 0]: Stabilized group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 17 (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:08:57,720] INFO [GroupCoordinator 0]: Assignment received from leader for group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 17 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:08:58,576] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:08:58,576] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:09:01,649] INFO [GroupCoordinator 0]: Member dev-policy-pap-55f4d54b74-wpzkv-c6cb175d-f557-4f12-84ef-f70dae855282 in group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:09:01,650] INFO [GroupCoordinator 0]: Preparing to rebalance group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP in state PreparingRebalance with old generation 8 (__consumer_offsets-7) (reason: removing member dev-policy-pap-55f4d54b74-wpzkv-c6cb175d-f557-4f12-84ef-f70dae855282 on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:09:01,650] INFO [GroupCoordinator 0]: Group a7cbd50b-1fcf-4366-ab57-29486781b256--POLICY-PDP-PAP with generation 9 is now empty (__consumer_offsets-7) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:09:24,146] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 12:09:28,117] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:09:28,117] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:09:28,164] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:09:28,164] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:09:28,166] INFO [GroupCoordinator 0]: Member policy-id-c8944091-35f9-478b-9cc5-f1a565fc3d73 in group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:09:28,166] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:09:28,166] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:09:28,166] INFO [GroupCoordinator 0]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 17 (__consumer_offsets-46) (reason: removing member policy-id-c8944091-35f9-478b-9cc5-f1a565fc3d73 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:09:28,167] INFO [GroupCoordinator 0]: Group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 18 is now empty (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:09:53,334] INFO [GroupCoordinator 0]: Member sdc-COpenSource-Env11-sdnc-dockero-25f2fc92-5023-45f6-ba3d-f1ac45e4a137 in group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:09:53,335] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 15 (__consumer_offsets-28) (reason: removing member sdc-COpenSource-Env11-sdnc-dockero-25f2fc92-5023-45f6-ba3d-f1ac45e4a137 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:09:53,335] INFO [GroupCoordinator 0]: Group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 16 is now empty (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:09:54,814] INFO [GroupCoordinator 0]: Member dev-policy-drools-pdp-0-37fd62b4-5404-4e9e-aee7-ae9cbb4759e0 in group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:09:54,814] INFO [GroupCoordinator 0]: Preparing to rebalance group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP in state PreparingRebalance with old generation 13 (__consumer_offsets-34) (reason: removing member dev-policy-drools-pdp-0-37fd62b4-5404-4e9e-aee7-ae9cbb4759e0 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:09:54,814] INFO [GroupCoordinator 0]: Group 9bffb0a7-76a3-42ac-9a0e-6db97fc04e5e--POLICY-PDP-PAP with generation 14 is now empty (__consumer_offsets-34) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:09:54,855] INFO [GroupCoordinator 0]: Member clamp-ba01608a-06ea-4bff-a1cc-f07dd0c41b19 in group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:09:54,855] INFO [GroupCoordinator 0]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 15 (__consumer_offsets-1) (reason: removing member clamp-ba01608a-06ea-4bff-a1cc-f07dd0c41b19 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:09:54,855] INFO [GroupCoordinator 0]: Group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 16 is now empty (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:15:47,576] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:47,577] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:47,585] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 16 (__consumer_offsets-19) (reason: Adding new member ves-openapi-manager-cd7557ed-1585-4b27-a75b-1a8339dd94b1 with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:15:47,611] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:47,611] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:48,393] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:48,394] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:48,404] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 16 (__consumer_offsets-28) (reason: Adding new member sdc-COpenSource-Env11-sdnc-dockero-aef1726f-de6e-4a33-ac7b-610e12d1dc5d with group instanceid None) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:15:50,587] INFO [GroupCoordinator 0]: Stabilized group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO generation 17 (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:15:50,774] INFO [GroupCoordinator 0]: Assignment received from leader for group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 17 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:15:51,405] INFO [GroupCoordinator 0]: Stabilized group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO generation 17 (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:15:52,141] INFO [GroupCoordinator 0]: Assignment received from leader for group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 17 (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:15:52,146] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:52,146] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:52,146] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:52,146] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:52,288] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:52,288] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:52,439] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:52,439] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:52,970] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:52,970] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:53,805] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:15:53,806] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:18:56,949] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:18:56,949] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:19:24,146] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 12:20:57,447] INFO [GroupCoordinator 0]: Member ves-openapi-manager-cd7557ed-1585-4b27-a75b-1a8339dd94b1 in group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:20:57,448] INFO [GroupCoordinator 0]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 17 (__consumer_offsets-19) (reason: removing member ves-openapi-manager-cd7557ed-1585-4b27-a75b-1a8339dd94b1 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:20:57,448] INFO [GroupCoordinator 0]: Group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 18 is now empty (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:21:02,025] INFO [GroupCoordinator 0]: Member sdc-COpenSource-Env11-sdnc-dockero-aef1726f-de6e-4a33-ac7b-610e12d1dc5d in group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO has left, removing it from the group (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:21:02,026] INFO [GroupCoordinator 0]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 17 (__consumer_offsets-28) (reason: removing member sdc-COpenSource-Env11-sdnc-dockero-aef1726f-de6e-4a33-ac7b-610e12d1dc5d on LeaveGroup) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:21:02,026] INFO [GroupCoordinator 0]: Group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO with generation 18 is now empty (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator)
[2021-06-10 12:29:24,146] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 12:39:24,147] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 2 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 12:45:07,944] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:45:07,945] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:49:24,146] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 12:56:47,393] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:56:47,394] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 12:59:24,149] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 13:09:24,147] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 2 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 13:11:57,734] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 13:11:57,735] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 13:19:24,147] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 13:29:24,146] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 13:39:24,146] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 13:49:24,147] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 2 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 13:50:00,656] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 13:50:00,657] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 13:57:32,893] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 13:57:32,893] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 13:59:24,147] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 14:01:41,107] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 14:01:41,108] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 14:01:41,203] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 14:01:41,203] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 14:09:24,148] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 14:19:24,148] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 2 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2021-06-10 14:23:43,388] INFO ^Event received  with   username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)
[2021-06-10 14:23:43,388] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider)