+ export KAFKA_BROKER_ID=1 + cp /opt/app/osaaf/local/cadi.properties /etc/kafka/data/cadi.properties + export KAFKA_ADVERTISED_LISTENERS=EXTERNAL_SASL_PLAINTEXT://10.253.0.28:30491,INTERNAL_SASL_PLAINTEXT://:9092 + exec /etc/confluent/docker/run ===> ENV Variables ... A1POLICYMANAGEMENT_EXTERNAL_PORT=tcp://10.233.43.149:8433 A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP=tcp://10.233.43.149:8433 A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_ADDR=10.233.43.149 A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_PORT=8433 A1POLICYMANAGEMENT_EXTERNAL_PORT_8433_TCP_PROTO=tcp A1POLICYMANAGEMENT_EXTERNAL_SERVICE_HOST=10.233.43.149 A1POLICYMANAGEMENT_EXTERNAL_SERVICE_PORT=8433 A1POLICYMANAGEMENT_EXTERNAL_SERVICE_PORT_HTTPS_API=8433 A1POLICYMANAGEMENT_PORT=tcp://10.233.7.241:8433 A1POLICYMANAGEMENT_PORT_8081_TCP=tcp://10.233.7.241:8081 A1POLICYMANAGEMENT_PORT_8081_TCP_ADDR=10.233.7.241 A1POLICYMANAGEMENT_PORT_8081_TCP_PORT=8081 A1POLICYMANAGEMENT_PORT_8081_TCP_PROTO=tcp A1POLICYMANAGEMENT_PORT_8433_TCP=tcp://10.233.7.241:8433 A1POLICYMANAGEMENT_PORT_8433_TCP_ADDR=10.233.7.241 A1POLICYMANAGEMENT_PORT_8433_TCP_PORT=8433 A1POLICYMANAGEMENT_PORT_8433_TCP_PROTO=tcp A1POLICYMANAGEMENT_SERVICE_HOST=10.233.7.241 A1POLICYMANAGEMENT_SERVICE_PORT=8433 A1POLICYMANAGEMENT_SERVICE_PORT_HTTPS_API=8433 A1POLICYMANAGEMENT_SERVICE_PORT_HTTP_API=8081 AAF_CASS_PORT=tcp://10.233.50.96:7000 AAF_CASS_PORT_7000_TCP=tcp://10.233.50.96:7000 AAF_CASS_PORT_7000_TCP_ADDR=10.233.50.96 AAF_CASS_PORT_7000_TCP_PORT=7000 AAF_CASS_PORT_7000_TCP_PROTO=tcp AAF_CASS_PORT_7001_TCP=tcp://10.233.50.96:7001 AAF_CASS_PORT_7001_TCP_ADDR=10.233.50.96 AAF_CASS_PORT_7001_TCP_PORT=7001 AAF_CASS_PORT_7001_TCP_PROTO=tcp AAF_CASS_PORT_9042_TCP=tcp://10.233.50.96:9042 AAF_CASS_PORT_9042_TCP_ADDR=10.233.50.96 AAF_CASS_PORT_9042_TCP_PORT=9042 AAF_CASS_PORT_9042_TCP_PROTO=tcp AAF_CASS_PORT_9160_TCP=tcp://10.233.50.96:9160 AAF_CASS_PORT_9160_TCP_ADDR=10.233.50.96 AAF_CASS_PORT_9160_TCP_PORT=9160 AAF_CASS_PORT_9160_TCP_PROTO=tcp AAF_CASS_SERVICE_HOST=10.233.50.96 AAF_CASS_SERVICE_PORT=7000 AAF_CASS_SERVICE_PORT_TCP_CQL=9042 AAF_CASS_SERVICE_PORT_TCP_INTRA=7000 AAF_CASS_SERVICE_PORT_TCP_THRIFT=9160 AAF_CASS_SERVICE_PORT_TLS=7001 AAF_CM_PORT=tcp://10.233.26.209:8150 AAF_CM_PORT_8150_TCP=tcp://10.233.26.209:8150 AAF_CM_PORT_8150_TCP_ADDR=10.233.26.209 AAF_CM_PORT_8150_TCP_PORT=8150 AAF_CM_PORT_8150_TCP_PROTO=tcp AAF_CM_SERVICE_HOST=10.233.26.209 AAF_CM_SERVICE_PORT=8150 AAF_CM_SERVICE_PORT_API=8150 AAF_FS_PORT=tcp://10.233.30.11:8096 AAF_FS_PORT_8096_TCP=tcp://10.233.30.11:8096 AAF_FS_PORT_8096_TCP_ADDR=10.233.30.11 AAF_FS_PORT_8096_TCP_PORT=8096 AAF_FS_PORT_8096_TCP_PROTO=tcp AAF_FS_SERVICE_HOST=10.233.30.11 AAF_FS_SERVICE_PORT=8096 AAF_FS_SERVICE_PORT_API=8096 AAF_GUI_PORT=tcp://10.233.27.85:8200 AAF_GUI_PORT_8200_TCP=tcp://10.233.27.85:8200 AAF_GUI_PORT_8200_TCP_ADDR=10.233.27.85 AAF_GUI_PORT_8200_TCP_PORT=8200 AAF_GUI_PORT_8200_TCP_PROTO=tcp AAF_GUI_SERVICE_HOST=10.233.27.85 AAF_GUI_SERVICE_PORT=8200 AAF_GUI_SERVICE_PORT_GUI=8200 AAF_HELLO_PORT=tcp://10.233.3.235:8130 AAF_HELLO_PORT_8130_TCP=tcp://10.233.3.235:8130 AAF_HELLO_PORT_8130_TCP_ADDR=10.233.3.235 AAF_HELLO_PORT_8130_TCP_PORT=8130 AAF_HELLO_PORT_8130_TCP_PROTO=tcp AAF_HELLO_SERVICE_HOST=10.233.3.235 AAF_HELLO_SERVICE_PORT=8130 AAF_HELLO_SERVICE_PORT_API=8130 AAF_LOCATE_PORT=tcp://10.233.10.212:8095 AAF_LOCATE_PORT_8095_TCP=tcp://10.233.10.212:8095 AAF_LOCATE_PORT_8095_TCP_ADDR=10.233.10.212 AAF_LOCATE_PORT_8095_TCP_PORT=8095 AAF_LOCATE_PORT_8095_TCP_PROTO=tcp AAF_LOCATE_SERVICE_HOST=10.233.10.212 AAF_LOCATE_SERVICE_PORT=8095 AAF_LOCATE_SERVICE_PORT_API=8095 AAF_OAUTH_PORT=tcp://10.233.23.49:8140 AAF_OAUTH_PORT_8140_TCP=tcp://10.233.23.49:8140 AAF_OAUTH_PORT_8140_TCP_ADDR=10.233.23.49 AAF_OAUTH_PORT_8140_TCP_PORT=8140 AAF_OAUTH_PORT_8140_TCP_PROTO=tcp AAF_OAUTH_SERVICE_HOST=10.233.23.49 AAF_OAUTH_SERVICE_PORT=8140 AAF_OAUTH_SERVICE_PORT_API=8140 AAF_SERVICE_PORT=tcp://10.233.18.241:8100 AAF_SERVICE_PORT_8100_TCP=tcp://10.233.18.241:8100 AAF_SERVICE_PORT_8100_TCP_ADDR=10.233.18.241 AAF_SERVICE_PORT_8100_TCP_PORT=8100 AAF_SERVICE_PORT_8100_TCP_PROTO=tcp AAF_SERVICE_SERVICE_HOST=10.233.18.241 AAF_SERVICE_SERVICE_PORT=8100 AAF_SERVICE_SERVICE_PORT_API=8100 AAF_SMS_DB_PORT=tcp://10.233.11.210:8200 AAF_SMS_DB_PORT_8200_TCP=tcp://10.233.11.210:8200 AAF_SMS_DB_PORT_8200_TCP_ADDR=10.233.11.210 AAF_SMS_DB_PORT_8200_TCP_PORT=8200 AAF_SMS_DB_PORT_8200_TCP_PROTO=tcp AAF_SMS_DB_SERVICE_HOST=10.233.11.210 AAF_SMS_DB_SERVICE_PORT=8200 AAF_SMS_DB_SERVICE_PORT_AAF_SMS_DB=8200 AAF_SMS_PORT=tcp://10.233.3.97:10443 AAF_SMS_PORT_10443_TCP=tcp://10.233.3.97:10443 AAF_SMS_PORT_10443_TCP_ADDR=10.233.3.97 AAF_SMS_PORT_10443_TCP_PORT=10443 AAF_SMS_PORT_10443_TCP_PROTO=tcp AAF_SMS_SERVICE_HOST=10.233.3.97 AAF_SMS_SERVICE_PORT=10443 AAI_BABEL_PORT=tcp://10.233.57.246:9516 AAI_BABEL_PORT_9516_TCP=tcp://10.233.57.246:9516 AAI_BABEL_PORT_9516_TCP_ADDR=10.233.57.246 AAI_BABEL_PORT_9516_TCP_PORT=9516 AAI_BABEL_PORT_9516_TCP_PROTO=tcp AAI_BABEL_SERVICE_HOST=10.233.57.246 AAI_BABEL_SERVICE_PORT=9516 AAI_BABEL_SERVICE_PORT_BABEL=9516 AAI_MODELLOADER_PORT=tcp://10.233.35.4:8080 AAI_MODELLOADER_PORT_8080_TCP=tcp://10.233.35.4:8080 AAI_MODELLOADER_PORT_8080_TCP_ADDR=10.233.35.4 AAI_MODELLOADER_PORT_8080_TCP_PORT=8080 AAI_MODELLOADER_PORT_8080_TCP_PROTO=tcp AAI_MODELLOADER_PORT_8443_TCP=tcp://10.233.35.4:8443 AAI_MODELLOADER_PORT_8443_TCP_ADDR=10.233.35.4 AAI_MODELLOADER_PORT_8443_TCP_PORT=8443 AAI_MODELLOADER_PORT_8443_TCP_PROTO=tcp AAI_MODELLOADER_SERVICE_HOST=10.233.35.4 AAI_MODELLOADER_SERVICE_PORT=8080 AAI_MODELLOADER_SERVICE_PORT_AAI_MODELLOADER=8080 AAI_MODELLOADER_SERVICE_PORT_AAI_MODELLOADER_SSL=8443 AAI_PORT=tcp://10.233.35.204:8443 AAI_PORT_8443_TCP=tcp://10.233.35.204:8443 AAI_PORT_8443_TCP_ADDR=10.233.35.204 AAI_PORT_8443_TCP_PORT=8443 AAI_PORT_8443_TCP_PROTO=tcp AAI_RESOURCES_PORT=tcp://10.233.58.187:8447 AAI_RESOURCES_PORT_5005_TCP=tcp://10.233.58.187:5005 AAI_RESOURCES_PORT_5005_TCP_ADDR=10.233.58.187 AAI_RESOURCES_PORT_5005_TCP_PORT=5005 AAI_RESOURCES_PORT_5005_TCP_PROTO=tcp AAI_RESOURCES_PORT_8447_TCP=tcp://10.233.58.187:8447 AAI_RESOURCES_PORT_8447_TCP_ADDR=10.233.58.187 AAI_RESOURCES_PORT_8447_TCP_PORT=8447 AAI_RESOURCES_PORT_8447_TCP_PROTO=tcp AAI_RESOURCES_SERVICE_HOST=10.233.58.187 AAI_RESOURCES_SERVICE_PORT=8447 AAI_RESOURCES_SERVICE_PORT_AAI_RESOURCES_5005=5005 AAI_RESOURCES_SERVICE_PORT_AAI_RESOURCES_8447=8447 AAI_SERVICE_HOST=10.233.35.204 AAI_SERVICE_PORT=8443 AAI_SERVICE_PORT_AAI_SSL=8443 AAI_SPARKY_BE_PORT=tcp://10.233.1.34:8000 AAI_SPARKY_BE_PORT_8000_TCP=tcp://10.233.1.34:8000 AAI_SPARKY_BE_PORT_8000_TCP_ADDR=10.233.1.34 AAI_SPARKY_BE_PORT_8000_TCP_PORT=8000 AAI_SPARKY_BE_PORT_8000_TCP_PROTO=tcp AAI_SPARKY_BE_SERVICE_HOST=10.233.1.34 AAI_SPARKY_BE_SERVICE_PORT=8000 AAI_SPARKY_BE_SERVICE_PORT_AAI_SPARKY_BE=8000 AAI_TRAVERSAL_PORT=tcp://10.233.13.43:8446 AAI_TRAVERSAL_PORT_5005_TCP=tcp://10.233.13.43:5005 AAI_TRAVERSAL_PORT_5005_TCP_ADDR=10.233.13.43 AAI_TRAVERSAL_PORT_5005_TCP_PORT=5005 AAI_TRAVERSAL_PORT_5005_TCP_PROTO=tcp AAI_TRAVERSAL_PORT_8446_TCP=tcp://10.233.13.43:8446 AAI_TRAVERSAL_PORT_8446_TCP_ADDR=10.233.13.43 AAI_TRAVERSAL_PORT_8446_TCP_PORT=8446 AAI_TRAVERSAL_PORT_8446_TCP_PROTO=tcp AAI_TRAVERSAL_SERVICE_HOST=10.233.13.43 AAI_TRAVERSAL_SERVICE_PORT=8446 AAI_TRAVERSAL_SERVICE_PORT_AAI_TRAVERSAL_5005=5005 AAI_TRAVERSAL_SERVICE_PORT_AAI_TRAVERSAL_8446=8446 ALLOW_UNSIGNED=false APPC_ANSIBLE_SERVER_PORT=tcp://10.233.27.7:8000 APPC_ANSIBLE_SERVER_PORT_8000_TCP=tcp://10.233.27.7:8000 APPC_ANSIBLE_SERVER_PORT_8000_TCP_ADDR=10.233.27.7 APPC_ANSIBLE_SERVER_PORT_8000_TCP_PORT=8000 APPC_ANSIBLE_SERVER_PORT_8000_TCP_PROTO=tcp APPC_ANSIBLE_SERVER_SERVICE_HOST=10.233.27.7 APPC_ANSIBLE_SERVER_SERVICE_PORT=8000 APPC_ANSIBLE_SERVER_SERVICE_PORT_APPC_ANSIBLE_SERVER=8000 APPC_CDT_PORT=tcp://10.233.28.69:18080 APPC_CDT_PORT_18080_TCP=tcp://10.233.28.69:18080 APPC_CDT_PORT_18080_TCP_ADDR=10.233.28.69 APPC_CDT_PORT_18080_TCP_PORT=18080 APPC_CDT_PORT_18080_TCP_PROTO=tcp APPC_CDT_SERVICE_HOST=10.233.28.69 APPC_CDT_SERVICE_PORT=18080 APPC_CDT_SERVICE_PORT_APPC_CDT=18080 APPC_DB_PORT=tcp://10.233.29.122:3306 APPC_DB_PORT_3306_TCP=tcp://10.233.29.122:3306 APPC_DB_PORT_3306_TCP_ADDR=10.233.29.122 APPC_DB_PORT_3306_TCP_PORT=3306 APPC_DB_PORT_3306_TCP_PROTO=tcp APPC_DB_SERVICE_HOST=10.233.29.122 APPC_DB_SERVICE_PORT=3306 APPC_DB_SERVICE_PORT_MYSQL=3306 APPC_DGBUILDER_PORT=tcp://10.233.34.92:3000 APPC_DGBUILDER_PORT_3000_TCP=tcp://10.233.34.92:3000 APPC_DGBUILDER_PORT_3000_TCP_ADDR=10.233.34.92 APPC_DGBUILDER_PORT_3000_TCP_PORT=3000 APPC_DGBUILDER_PORT_3000_TCP_PROTO=tcp APPC_DGBUILDER_SERVICE_HOST=10.233.34.92 APPC_DGBUILDER_SERVICE_PORT=3000 APPC_DGBUILDER_SERVICE_PORT_DGBUILDER=3000 APPC_PORT=tcp://10.233.8.251:8443 APPC_PORT_1830_TCP=tcp://10.233.8.251:1830 APPC_PORT_1830_TCP_ADDR=10.233.8.251 APPC_PORT_1830_TCP_PORT=1830 APPC_PORT_1830_TCP_PROTO=tcp APPC_PORT_8443_TCP=tcp://10.233.8.251:8443 APPC_PORT_8443_TCP_ADDR=10.233.8.251 APPC_PORT_8443_TCP_PORT=8443 APPC_PORT_8443_TCP_PROTO=tcp APPC_PORT_9090_TCP=tcp://10.233.8.251:9090 APPC_PORT_9090_TCP_ADDR=10.233.8.251 APPC_PORT_9090_TCP_PORT=9090 APPC_PORT_9090_TCP_PROTO=tcp APPC_SERVICE_HOST=10.233.8.251 APPC_SERVICE_PORT=8443 APPC_SERVICE_PORT_APPC_1830=1830 APPC_SERVICE_PORT_APPC_8443=8443 APPC_SERVICE_PORT_APPC_9090=9090 AWX_POSTGRESQL_PORT=tcp://10.233.17.94:5432 AWX_POSTGRESQL_PORT_5432_TCP=tcp://10.233.17.94:5432 AWX_POSTGRESQL_PORT_5432_TCP_ADDR=10.233.17.94 AWX_POSTGRESQL_PORT_5432_TCP_PORT=5432 AWX_POSTGRESQL_PORT_5432_TCP_PROTO=tcp AWX_POSTGRESQL_SERVICE_HOST=10.233.17.94 AWX_POSTGRESQL_SERVICE_PORT=5432 AWX_POSTGRESQL_SERVICE_PORT_AWX_POSTGRESQL=5432 AWX_RABBITMQ_PORT=tcp://10.233.58.90:15672 AWX_RABBITMQ_PORT_15672_TCP=tcp://10.233.58.90:15672 AWX_RABBITMQ_PORT_15672_TCP_ADDR=10.233.58.90 AWX_RABBITMQ_PORT_15672_TCP_PORT=15672 AWX_RABBITMQ_PORT_15672_TCP_PROTO=tcp AWX_RABBITMQ_PORT_5672_TCP=tcp://10.233.58.90:5672 AWX_RABBITMQ_PORT_5672_TCP_ADDR=10.233.58.90 AWX_RABBITMQ_PORT_5672_TCP_PORT=5672 AWX_RABBITMQ_PORT_5672_TCP_PROTO=tcp AWX_RABBITMQ_SERVICE_HOST=10.233.58.90 AWX_RABBITMQ_SERVICE_PORT=15672 AWX_RABBITMQ_SERVICE_PORT_AMQP=5672 AWX_RABBITMQ_SERVICE_PORT_HTTP=15672 AWX_RMQ_MGMT_PORT=tcp://10.233.10.82:15672 AWX_RMQ_MGMT_PORT_15672_TCP=tcp://10.233.10.82:15672 AWX_RMQ_MGMT_PORT_15672_TCP_ADDR=10.233.10.82 AWX_RMQ_MGMT_PORT_15672_TCP_PORT=15672 AWX_RMQ_MGMT_PORT_15672_TCP_PROTO=tcp AWX_RMQ_MGMT_SERVICE_HOST=10.233.10.82 AWX_RMQ_MGMT_SERVICE_PORT=15672 AWX_RMQ_MGMT_SERVICE_PORT_RMQMGMT=15672 AWX_WEB_PORT=tcp://10.233.14.230:8052 AWX_WEB_PORT_8052_TCP=tcp://10.233.14.230:8052 AWX_WEB_PORT_8052_TCP_ADDR=10.233.14.230 AWX_WEB_PORT_8052_TCP_PORT=8052 AWX_WEB_PORT_8052_TCP_PROTO=tcp AWX_WEB_SERVICE_HOST=10.233.14.230 AWX_WEB_SERVICE_PORT=8052 AWX_WEB_SERVICE_PORT_WEB=8052 CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT=tcp://10.233.54.75:5701 CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP=tcp://10.233.54.75:5701 CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_ADDR=10.233.54.75 CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_PORT=5701 CDS_BLUEPRINTS_PROCESSOR_CLUSTER_PORT_5701_TCP_PROTO=tcp CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_HOST=10.233.54.75 CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_PORT=5701 CDS_BLUEPRINTS_PROCESSOR_CLUSTER_SERVICE_PORT_BLUEPRINTS_PROCESSOR_CLUSTER=5701 CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT=tcp://10.233.2.28:9111 CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP=tcp://10.233.2.28:9111 CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_ADDR=10.233.2.28 CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_PORT=9111 CDS_BLUEPRINTS_PROCESSOR_GRPC_PORT_9111_TCP_PROTO=tcp CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_HOST=10.233.2.28 CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_PORT=9111 CDS_BLUEPRINTS_PROCESSOR_GRPC_SERVICE_PORT_BLUEPRINTS_PROCESSOR_GRPC=9111 CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT=tcp://10.233.51.93:8080 CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP=tcp://10.233.51.93:8080 CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_ADDR=10.233.51.93 CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_PORT=8080 CDS_BLUEPRINTS_PROCESSOR_HTTP_PORT_8080_TCP_PROTO=tcp CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_HOST=10.233.51.93 CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_PORT=8080 CDS_BLUEPRINTS_PROCESSOR_HTTP_SERVICE_PORT_BLUEPRINTS_PROCESSOR_HTTP=8080 CDS_COMMAND_EXECUTOR_PORT=tcp://10.233.59.62:50051 CDS_COMMAND_EXECUTOR_PORT_50051_TCP=tcp://10.233.59.62:50051 CDS_COMMAND_EXECUTOR_PORT_50051_TCP_ADDR=10.233.59.62 CDS_COMMAND_EXECUTOR_PORT_50051_TCP_PORT=50051 CDS_COMMAND_EXECUTOR_PORT_50051_TCP_PROTO=tcp CDS_COMMAND_EXECUTOR_SERVICE_HOST=10.233.59.62 CDS_COMMAND_EXECUTOR_SERVICE_PORT=50051 CDS_COMMAND_EXECUTOR_SERVICE_PORT_COMMAND_EXECUTOR_GRPC=50051 CDS_DB_PORT=tcp://10.233.31.254:3306 CDS_DB_PORT_3306_TCP=tcp://10.233.31.254:3306 CDS_DB_PORT_3306_TCP_ADDR=10.233.31.254 CDS_DB_PORT_3306_TCP_PORT=3306 CDS_DB_PORT_3306_TCP_PROTO=tcp CDS_DB_SERVICE_HOST=10.233.31.254 CDS_DB_SERVICE_PORT=3306 CDS_DB_SERVICE_PORT_MYSQL=3306 CDS_PY_EXECUTOR_PORT=tcp://10.233.8.52:50052 CDS_PY_EXECUTOR_PORT_50052_TCP=tcp://10.233.8.52:50052 CDS_PY_EXECUTOR_PORT_50052_TCP_ADDR=10.233.8.52 CDS_PY_EXECUTOR_PORT_50052_TCP_PORT=50052 CDS_PY_EXECUTOR_PORT_50052_TCP_PROTO=tcp CDS_PY_EXECUTOR_PORT_50053_TCP=tcp://10.233.8.52:50053 CDS_PY_EXECUTOR_PORT_50053_TCP_ADDR=10.233.8.52 CDS_PY_EXECUTOR_PORT_50053_TCP_PORT=50053 CDS_PY_EXECUTOR_PORT_50053_TCP_PROTO=tcp CDS_PY_EXECUTOR_SERVICE_HOST=10.233.8.52 CDS_PY_EXECUTOR_SERVICE_PORT=50052 CDS_PY_EXECUTOR_SERVICE_PORT_EXECUTOR_GRPC=50052 CDS_PY_EXECUTOR_SERVICE_PORT_MANAGER_GRPC=50053 CDS_SDC_LISTENER_PORT=tcp://10.233.18.90:8080 CDS_SDC_LISTENER_PORT_8080_TCP=tcp://10.233.18.90:8080 CDS_SDC_LISTENER_PORT_8080_TCP_ADDR=10.233.18.90 CDS_SDC_LISTENER_PORT_8080_TCP_PORT=8080 CDS_SDC_LISTENER_PORT_8080_TCP_PROTO=tcp CDS_SDC_LISTENER_SERVICE_HOST=10.233.18.90 CDS_SDC_LISTENER_SERVICE_PORT=8080 CDS_SDC_LISTENER_SERVICE_PORT_CDS_SDC_LISTENER_HTTP=8080 CDS_UI_PORT=tcp://10.233.29.67:3000 CDS_UI_PORT_3000_TCP=tcp://10.233.29.67:3000 CDS_UI_PORT_3000_TCP_ADDR=10.233.29.67 CDS_UI_PORT_3000_TCP_PORT=3000 CDS_UI_PORT_3000_TCP_PROTO=tcp CDS_UI_SERVICE_HOST=10.233.29.67 CDS_UI_SERVICE_PORT=3000 CDS_UI_SERVICE_PORT_CDS_UI_3000=3000 CLI_PORT=tcp://10.233.20.132:443 CLI_PORT_443_TCP=tcp://10.233.20.132:443 CLI_PORT_443_TCP_ADDR=10.233.20.132 CLI_PORT_443_TCP_PORT=443 CLI_PORT_443_TCP_PROTO=tcp CLI_PORT_9090_TCP=tcp://10.233.20.132:9090 CLI_PORT_9090_TCP_ADDR=10.233.20.132 CLI_PORT_9090_TCP_PORT=9090 CLI_PORT_9090_TCP_PROTO=tcp CLI_SERVICE_HOST=10.233.20.132 CLI_SERVICE_PORT=443 CLI_SERVICE_PORT_CLI443=443 CLI_SERVICE_PORT_CLI9090=9090 CMSO_DB_PORT=tcp://10.233.38.106:3306 CMSO_DB_PORT_3306_TCP=tcp://10.233.38.106:3306 CMSO_DB_PORT_3306_TCP_ADDR=10.233.38.106 CMSO_DB_PORT_3306_TCP_PORT=3306 CMSO_DB_PORT_3306_TCP_PROTO=tcp CMSO_DB_SERVICE_HOST=10.233.38.106 CMSO_DB_SERVICE_PORT=3306 CMSO_DB_SERVICE_PORT_MYSQL=3306 COMPONENT=kafka CONFIG_BINDING_SERVICE_PORT=tcp://10.233.19.216:10000 CONFIG_BINDING_SERVICE_PORT_10000_TCP=tcp://10.233.19.216:10000 CONFIG_BINDING_SERVICE_PORT_10000_TCP_ADDR=10.233.19.216 CONFIG_BINDING_SERVICE_PORT_10000_TCP_PORT=10000 CONFIG_BINDING_SERVICE_PORT_10000_TCP_PROTO=tcp CONFIG_BINDING_SERVICE_PORT_10443_TCP=tcp://10.233.19.216:10443 CONFIG_BINDING_SERVICE_PORT_10443_TCP_ADDR=10.233.19.216 CONFIG_BINDING_SERVICE_PORT_10443_TCP_PORT=10443 CONFIG_BINDING_SERVICE_PORT_10443_TCP_PROTO=tcp CONFIG_BINDING_SERVICE_SERVICE_HOST=10.233.19.216 CONFIG_BINDING_SERVICE_SERVICE_PORT=10000 CONFIG_BINDING_SERVICE_SERVICE_PORT_CONFIG_BINDING_SERVICE_INSECURE=10000 CONFIG_BINDING_SERVICE_SERVICE_PORT_CONFIG_BINDING_SERVICE_SECURE=10443 CONFLUENT_DEB_VERSION=1 CONFLUENT_MAJOR_VERSION=5 CONFLUENT_MINOR_VERSION=3 CONFLUENT_MVN_LABEL= CONFLUENT_PATCH_VERSION=1 CONFLUENT_PLATFORM_LABEL= CONFLUENT_VERSION=5.3.1 CONSUL_SERVER_UI_PORT=tcp://10.233.51.0:8500 CONSUL_SERVER_UI_PORT_8500_TCP=tcp://10.233.51.0:8500 CONSUL_SERVER_UI_PORT_8500_TCP_ADDR=10.233.51.0 CONSUL_SERVER_UI_PORT_8500_TCP_PORT=8500 CONSUL_SERVER_UI_PORT_8500_TCP_PROTO=tcp CONSUL_SERVER_UI_SERVICE_HOST=10.233.51.0 CONSUL_SERVER_UI_SERVICE_PORT=8500 CONSUL_SERVER_UI_SERVICE_PORT_CONSUL_UI=8500 CPS_PG_PRIMARY_PORT=tcp://10.233.23.171:5432 CPS_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.23.171:5432 CPS_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.23.171 CPS_PG_PRIMARY_PORT_5432_TCP_PORT=5432 CPS_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp CPS_PG_PRIMARY_SERVICE_HOST=10.233.23.171 CPS_PG_PRIMARY_SERVICE_PORT=5432 CPS_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432 CPS_PG_REPLICA_PORT=tcp://10.233.3.237:5432 CPS_PG_REPLICA_PORT_5432_TCP=tcp://10.233.3.237:5432 CPS_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.3.237 CPS_PG_REPLICA_PORT_5432_TCP_PORT=5432 CPS_PG_REPLICA_PORT_5432_TCP_PROTO=tcp CPS_PG_REPLICA_SERVICE_HOST=10.233.3.237 CPS_PG_REPLICA_SERVICE_PORT=5432 CPS_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432 CPS_PORT=tcp://10.233.63.157:8080 CPS_PORT_8080_TCP=tcp://10.233.63.157:8080 CPS_PORT_8080_TCP_ADDR=10.233.63.157 CPS_PORT_8080_TCP_PORT=8080 CPS_PORT_8080_TCP_PROTO=tcp CPS_POSTGRES_PORT=tcp://10.233.35.124:5432 CPS_POSTGRES_PORT_5432_TCP=tcp://10.233.35.124:5432 CPS_POSTGRES_PORT_5432_TCP_ADDR=10.233.35.124 CPS_POSTGRES_PORT_5432_TCP_PORT=5432 CPS_POSTGRES_PORT_5432_TCP_PROTO=tcp CPS_POSTGRES_SERVICE_HOST=10.233.35.124 CPS_POSTGRES_SERVICE_PORT=5432 CPS_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432 CPS_SERVICE_HOST=10.233.63.157 CPS_SERVICE_PORT=8080 CPS_SERVICE_PORT_HTTP=8080 CUB_CLASSPATH=/etc/confluent/docker/docker-utils.jar DASHBOARD_PORT=tcp://10.233.55.237:8443 DASHBOARD_PORT_8443_TCP=tcp://10.233.55.237:8443 DASHBOARD_PORT_8443_TCP_ADDR=10.233.55.237 DASHBOARD_PORT_8443_TCP_PORT=8443 DASHBOARD_PORT_8443_TCP_PROTO=tcp DASHBOARD_SERVICE_HOST=10.233.55.237 DASHBOARD_SERVICE_PORT=8443 DASHBOARD_SERVICE_PORT_DASHBOARD=8443 DBC_PG_PRIMARY_PORT=tcp://10.233.10.1:5432 DBC_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.10.1:5432 DBC_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.10.1 DBC_PG_PRIMARY_PORT_5432_TCP_PORT=5432 DBC_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp DBC_PG_PRIMARY_SERVICE_HOST=10.233.10.1 DBC_PG_PRIMARY_SERVICE_PORT=5432 DBC_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432 DBC_PG_REPLICA_PORT=tcp://10.233.62.191:5432 DBC_PG_REPLICA_PORT_5432_TCP=tcp://10.233.62.191:5432 DBC_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.62.191 DBC_PG_REPLICA_PORT_5432_TCP_PORT=5432 DBC_PG_REPLICA_PORT_5432_TCP_PROTO=tcp DBC_PG_REPLICA_SERVICE_HOST=10.233.62.191 DBC_PG_REPLICA_SERVICE_PORT=5432 DBC_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432 DBC_POSTGRES_PORT=tcp://10.233.32.76:5432 DBC_POSTGRES_PORT_5432_TCP=tcp://10.233.32.76:5432 DBC_POSTGRES_PORT_5432_TCP_ADDR=10.233.32.76 DBC_POSTGRES_PORT_5432_TCP_PORT=5432 DBC_POSTGRES_PORT_5432_TCP_PROTO=tcp DBC_POSTGRES_SERVICE_HOST=10.233.32.76 DBC_POSTGRES_SERVICE_PORT=5432 DBC_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432 DCAEMOD_DESIGNTOOL_PORT=tcp://10.233.51.12:8080 DCAEMOD_DESIGNTOOL_PORT_8080_TCP=tcp://10.233.51.12:8080 DCAEMOD_DESIGNTOOL_PORT_8080_TCP_ADDR=10.233.51.12 DCAEMOD_DESIGNTOOL_PORT_8080_TCP_PORT=8080 DCAEMOD_DESIGNTOOL_PORT_8080_TCP_PROTO=tcp DCAEMOD_DESIGNTOOL_SERVICE_HOST=10.233.51.12 DCAEMOD_DESIGNTOOL_SERVICE_PORT=8080 DCAEMOD_DESIGNTOOL_SERVICE_PORT_HTTP=8080 DCAEMOD_DISTRIBUTOR_API_PORT=tcp://10.233.56.111:8080 DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP=tcp://10.233.56.111:8080 DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP_ADDR=10.233.56.111 DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP_PORT=8080 DCAEMOD_DISTRIBUTOR_API_PORT_8080_TCP_PROTO=tcp DCAEMOD_DISTRIBUTOR_API_SERVICE_HOST=10.233.56.111 DCAEMOD_DISTRIBUTOR_API_SERVICE_PORT=8080 DCAEMOD_DISTRIBUTOR_API_SERVICE_PORT_HTTP=8080 DCAEMOD_GENPROCESSOR_PORT=tcp://10.233.60.143:8080 DCAEMOD_GENPROCESSOR_PORT_8080_TCP=tcp://10.233.60.143:8080 DCAEMOD_GENPROCESSOR_PORT_8080_TCP_ADDR=10.233.60.143 DCAEMOD_GENPROCESSOR_PORT_8080_TCP_PORT=8080 DCAEMOD_GENPROCESSOR_PORT_8080_TCP_PROTO=tcp DCAEMOD_GENPROCESSOR_SERVICE_HOST=10.233.60.143 DCAEMOD_GENPROCESSOR_SERVICE_PORT=8080 DCAEMOD_GENPROCESSOR_SERVICE_PORT_HTTP=8080 DCAEMOD_HEALTHCHECK_PORT=tcp://10.233.30.46:8080 DCAEMOD_HEALTHCHECK_PORT_8080_TCP=tcp://10.233.30.46:8080 DCAEMOD_HEALTHCHECK_PORT_8080_TCP_ADDR=10.233.30.46 DCAEMOD_HEALTHCHECK_PORT_8080_TCP_PORT=8080 DCAEMOD_HEALTHCHECK_PORT_8080_TCP_PROTO=tcp DCAEMOD_HEALTHCHECK_SERVICE_HOST=10.233.30.46 DCAEMOD_HEALTHCHECK_SERVICE_PORT=8080 DCAEMOD_HEALTHCHECK_SERVICE_PORT_HTTP=8080 DCAEMOD_NIFI_REGISTRY_PORT=tcp://10.233.9.44:18080 DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP=tcp://10.233.9.44:18080 DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP_ADDR=10.233.9.44 DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP_PORT=18080 DCAEMOD_NIFI_REGISTRY_PORT_18080_TCP_PROTO=tcp DCAEMOD_NIFI_REGISTRY_SERVICE_HOST=10.233.9.44 DCAEMOD_NIFI_REGISTRY_SERVICE_PORT=18080 DCAEMOD_NIFI_REGISTRY_SERVICE_PORT_HTTP=18080 DCAEMOD_ONBOARDING_API_PORT=tcp://10.233.33.132:8080 DCAEMOD_ONBOARDING_API_PORT_8080_TCP=tcp://10.233.33.132:8080 DCAEMOD_ONBOARDING_API_PORT_8080_TCP_ADDR=10.233.33.132 DCAEMOD_ONBOARDING_API_PORT_8080_TCP_PORT=8080 DCAEMOD_ONBOARDING_API_PORT_8080_TCP_PROTO=tcp DCAEMOD_ONBOARDING_API_SERVICE_HOST=10.233.33.132 DCAEMOD_ONBOARDING_API_SERVICE_PORT=8080 DCAEMOD_ONBOARDING_API_SERVICE_PORT_HTTP=8080 DCAEMOD_PG_PRIMARY_PORT=tcp://10.233.6.90:5432 DCAEMOD_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.6.90:5432 DCAEMOD_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.6.90 DCAEMOD_PG_PRIMARY_PORT_5432_TCP_PORT=5432 DCAEMOD_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp DCAEMOD_PG_PRIMARY_SERVICE_HOST=10.233.6.90 DCAEMOD_PG_PRIMARY_SERVICE_PORT=5432 DCAEMOD_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432 DCAEMOD_PG_REPLICA_PORT=tcp://10.233.11.141:5432 DCAEMOD_PG_REPLICA_PORT_5432_TCP=tcp://10.233.11.141:5432 DCAEMOD_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.11.141 DCAEMOD_PG_REPLICA_PORT_5432_TCP_PORT=5432 DCAEMOD_PG_REPLICA_PORT_5432_TCP_PROTO=tcp DCAEMOD_PG_REPLICA_SERVICE_HOST=10.233.11.141 DCAEMOD_PG_REPLICA_SERVICE_PORT=5432 DCAEMOD_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432 DCAEMOD_POSTGRES_PORT=tcp://10.233.46.97:5432 DCAEMOD_POSTGRES_PORT_5432_TCP=tcp://10.233.46.97:5432 DCAEMOD_POSTGRES_PORT_5432_TCP_ADDR=10.233.46.97 DCAEMOD_POSTGRES_PORT_5432_TCP_PORT=5432 DCAEMOD_POSTGRES_PORT_5432_TCP_PROTO=tcp DCAEMOD_POSTGRES_SERVICE_HOST=10.233.46.97 DCAEMOD_POSTGRES_SERVICE_PORT=5432 DCAEMOD_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432 DCAEMOD_RUNTIME_API_PORT=tcp://10.233.19.22:9090 DCAEMOD_RUNTIME_API_PORT_9090_TCP=tcp://10.233.19.22:9090 DCAEMOD_RUNTIME_API_PORT_9090_TCP_ADDR=10.233.19.22 DCAEMOD_RUNTIME_API_PORT_9090_TCP_PORT=9090 DCAEMOD_RUNTIME_API_PORT_9090_TCP_PROTO=tcp DCAEMOD_RUNTIME_API_SERVICE_HOST=10.233.19.22 DCAEMOD_RUNTIME_API_SERVICE_PORT=9090 DCAEMOD_RUNTIME_API_SERVICE_PORT_HTTP=9090 DCAE_CLOUDIFY_MANAGER_PORT=tcp://10.233.25.169:443 DCAE_CLOUDIFY_MANAGER_PORT_443_TCP=tcp://10.233.25.169:443 DCAE_CLOUDIFY_MANAGER_PORT_443_TCP_ADDR=10.233.25.169 DCAE_CLOUDIFY_MANAGER_PORT_443_TCP_PORT=443 DCAE_CLOUDIFY_MANAGER_PORT_443_TCP_PROTO=tcp DCAE_CLOUDIFY_MANAGER_SERVICE_HOST=10.233.25.169 DCAE_CLOUDIFY_MANAGER_SERVICE_PORT=443 DCAE_CLOUDIFY_MANAGER_SERVICE_PORT_DCAE_CLOUDIFY_MANAGER=443 DCAE_DASHBOARD_PG_PRIMARY_PORT=tcp://10.233.57.14:5432 DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.57.14:5432 DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.57.14 DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP_PORT=5432 DCAE_DASHBOARD_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp DCAE_DASHBOARD_PG_PRIMARY_SERVICE_HOST=10.233.57.14 DCAE_DASHBOARD_PG_PRIMARY_SERVICE_PORT=5432 DCAE_DASHBOARD_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_DASHBOARD_PG_REPLICA_PORT=tcp://10.233.14.159:5432 DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP=tcp://10.233.14.159:5432 DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.14.159 DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP_PORT=5432 DCAE_DASHBOARD_PG_REPLICA_PORT_5432_TCP_PROTO=tcp DCAE_DASHBOARD_PG_REPLICA_SERVICE_HOST=10.233.14.159 DCAE_DASHBOARD_PG_REPLICA_SERVICE_PORT=5432 DCAE_DASHBOARD_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_DASHBOARD_POSTGRES_PORT=tcp://10.233.56.42:5432 DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP=tcp://10.233.56.42:5432 DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP_ADDR=10.233.56.42 DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP_PORT=5432 DCAE_DASHBOARD_POSTGRES_PORT_5432_TCP_PROTO=tcp DCAE_DASHBOARD_POSTGRES_SERVICE_HOST=10.233.56.42 DCAE_DASHBOARD_POSTGRES_SERVICE_PORT=5432 DCAE_DASHBOARD_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_HEALTHCHECK_PORT=tcp://10.233.34.235:80 DCAE_HEALTHCHECK_PORT_80_TCP=tcp://10.233.34.235:80 DCAE_HEALTHCHECK_PORT_80_TCP_ADDR=10.233.34.235 DCAE_HEALTHCHECK_PORT_80_TCP_PORT=80 DCAE_HEALTHCHECK_PORT_80_TCP_PROTO=tcp DCAE_HEALTHCHECK_SERVICE_HOST=10.233.34.235 DCAE_HEALTHCHECK_SERVICE_PORT=80 DCAE_HEALTHCHECK_SERVICE_PORT_DCAE_HEALTHCHECK=80 DCAE_HV_VES_COLLECTOR_PORT=tcp://10.233.12.164:6061 DCAE_HV_VES_COLLECTOR_PORT_6061_TCP=tcp://10.233.12.164:6061 DCAE_HV_VES_COLLECTOR_PORT_6061_TCP_ADDR=10.233.12.164 DCAE_HV_VES_COLLECTOR_PORT_6061_TCP_PORT=6061 DCAE_HV_VES_COLLECTOR_PORT_6061_TCP_PROTO=tcp DCAE_HV_VES_COLLECTOR_SERVICE_HOST=10.233.12.164 DCAE_HV_VES_COLLECTOR_SERVICE_PORT=6061 DCAE_HV_VES_COLLECTOR_SERVICE_PORT_HTTPS_HTTP=6061 DCAE_INV_PG_PRIMARY_PORT=tcp://10.233.44.60:5432 DCAE_INV_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.44.60:5432 DCAE_INV_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.44.60 DCAE_INV_PG_PRIMARY_PORT_5432_TCP_PORT=5432 DCAE_INV_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp DCAE_INV_PG_PRIMARY_SERVICE_HOST=10.233.44.60 DCAE_INV_PG_PRIMARY_SERVICE_PORT=5432 DCAE_INV_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_INV_PG_REPLICA_PORT=tcp://10.233.40.181:5432 DCAE_INV_PG_REPLICA_PORT_5432_TCP=tcp://10.233.40.181:5432 DCAE_INV_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.40.181 DCAE_INV_PG_REPLICA_PORT_5432_TCP_PORT=5432 DCAE_INV_PG_REPLICA_PORT_5432_TCP_PROTO=tcp DCAE_INV_PG_REPLICA_SERVICE_HOST=10.233.40.181 DCAE_INV_PG_REPLICA_SERVICE_PORT=5432 DCAE_INV_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_INV_POSTGRES_PORT=tcp://10.233.17.61:5432 DCAE_INV_POSTGRES_PORT_5432_TCP=tcp://10.233.17.61:5432 DCAE_INV_POSTGRES_PORT_5432_TCP_ADDR=10.233.17.61 DCAE_INV_POSTGRES_PORT_5432_TCP_PORT=5432 DCAE_INV_POSTGRES_PORT_5432_TCP_PROTO=tcp DCAE_INV_POSTGRES_SERVICE_HOST=10.233.17.61 DCAE_INV_POSTGRES_SERVICE_PORT=5432 DCAE_INV_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_MONGOHOST_READ_PORT=tcp://10.233.7.34:27017 DCAE_MONGOHOST_READ_PORT_27017_TCP=tcp://10.233.7.34:27017 DCAE_MONGOHOST_READ_PORT_27017_TCP_ADDR=10.233.7.34 DCAE_MONGOHOST_READ_PORT_27017_TCP_PORT=27017 DCAE_MONGOHOST_READ_PORT_27017_TCP_PROTO=tcp DCAE_MONGOHOST_READ_SERVICE_HOST=10.233.7.34 DCAE_MONGOHOST_READ_SERVICE_PORT=27017 DCAE_MONGOHOST_READ_SERVICE_PORT_MONGO=27017 DCAE_MS_HEALTHCHECK_PORT=tcp://10.233.20.60:8080 DCAE_MS_HEALTHCHECK_PORT_8080_TCP=tcp://10.233.20.60:8080 DCAE_MS_HEALTHCHECK_PORT_8080_TCP_ADDR=10.233.20.60 DCAE_MS_HEALTHCHECK_PORT_8080_TCP_PORT=8080 DCAE_MS_HEALTHCHECK_PORT_8080_TCP_PROTO=tcp DCAE_MS_HEALTHCHECK_SERVICE_HOST=10.233.20.60 DCAE_MS_HEALTHCHECK_SERVICE_PORT=8080 DCAE_MS_HEALTHCHECK_SERVICE_PORT_HTTP=8080 DCAE_PG_PRIMARY_PORT=tcp://10.233.63.245:5432 DCAE_PG_PRIMARY_PORT_5432_TCP=tcp://10.233.63.245:5432 DCAE_PG_PRIMARY_PORT_5432_TCP_ADDR=10.233.63.245 DCAE_PG_PRIMARY_PORT_5432_TCP_PORT=5432 DCAE_PG_PRIMARY_PORT_5432_TCP_PROTO=tcp DCAE_PG_PRIMARY_SERVICE_HOST=10.233.63.245 DCAE_PG_PRIMARY_SERVICE_PORT=5432 DCAE_PG_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_PG_REPLICA_PORT=tcp://10.233.38.169:5432 DCAE_PG_REPLICA_PORT_5432_TCP=tcp://10.233.38.169:5432 DCAE_PG_REPLICA_PORT_5432_TCP_ADDR=10.233.38.169 DCAE_PG_REPLICA_PORT_5432_TCP_PORT=5432 DCAE_PG_REPLICA_PORT_5432_TCP_PROTO=tcp DCAE_PG_REPLICA_SERVICE_HOST=10.233.38.169 DCAE_PG_REPLICA_SERVICE_PORT=5432 DCAE_PG_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_POSTGRES_PORT=tcp://10.233.60.152:5432 DCAE_POSTGRES_PORT_5432_TCP=tcp://10.233.60.152:5432 DCAE_POSTGRES_PORT_5432_TCP_ADDR=10.233.60.152 DCAE_POSTGRES_PORT_5432_TCP_PORT=5432 DCAE_POSTGRES_PORT_5432_TCP_PROTO=tcp DCAE_POSTGRES_SERVICE_HOST=10.233.60.152 DCAE_POSTGRES_SERVICE_PORT=5432 DCAE_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432 DCAE_PRH_PORT=tcp://10.233.20.85:8100 DCAE_PRH_PORT_8100_TCP=tcp://10.233.20.85:8100 DCAE_PRH_PORT_8100_TCP_ADDR=10.233.20.85 DCAE_PRH_PORT_8100_TCP_PORT=8100 DCAE_PRH_PORT_8100_TCP_PROTO=tcp DCAE_PRH_SERVICE_HOST=10.233.20.85 DCAE_PRH_SERVICE_PORT=8100 DCAE_PRH_SERVICE_PORT_HTTP=8100 DCAE_TCAGEN2_PORT=tcp://10.233.19.81:9091 DCAE_TCAGEN2_PORT_9091_TCP=tcp://10.233.19.81:9091 DCAE_TCAGEN2_PORT_9091_TCP_ADDR=10.233.19.81 DCAE_TCAGEN2_PORT_9091_TCP_PORT=9091 DCAE_TCAGEN2_PORT_9091_TCP_PROTO=tcp DCAE_TCAGEN2_SERVICE_HOST=10.233.19.81 DCAE_TCAGEN2_SERVICE_PORT=9091 DCAE_TCAGEN2_SERVICE_PORT_HTTP=9091 DCAE_VES_COLLECTOR_PORT=tcp://10.233.33.115:8443 DCAE_VES_COLLECTOR_PORT_8443_TCP=tcp://10.233.33.115:8443 DCAE_VES_COLLECTOR_PORT_8443_TCP_ADDR=10.233.33.115 DCAE_VES_COLLECTOR_PORT_8443_TCP_PORT=8443 DCAE_VES_COLLECTOR_PORT_8443_TCP_PROTO=tcp DCAE_VES_COLLECTOR_SERVICE_HOST=10.233.33.115 DCAE_VES_COLLECTOR_SERVICE_PORT=8443 DCAE_VES_COLLECTOR_SERVICE_PORT_HTTPS_HTTP=8443 DEPLOYMENT_HANDLER_PORT=tcp://10.233.21.247:8443 DEPLOYMENT_HANDLER_PORT_8443_TCP=tcp://10.233.21.247:8443 DEPLOYMENT_HANDLER_PORT_8443_TCP_ADDR=10.233.21.247 DEPLOYMENT_HANDLER_PORT_8443_TCP_PORT=8443 DEPLOYMENT_HANDLER_PORT_8443_TCP_PROTO=tcp DEPLOYMENT_HANDLER_SERVICE_HOST=10.233.21.247 DEPLOYMENT_HANDLER_SERVICE_PORT=8443 DEPLOYMENT_HANDLER_SERVICE_PORT_DEPLOYMENT_HANDLER=8443 DMAAP_BC_PORT=tcp://10.233.44.99:8443 DMAAP_BC_PORT_8443_TCP=tcp://10.233.44.99:8443 DMAAP_BC_PORT_8443_TCP_ADDR=10.233.44.99 DMAAP_BC_PORT_8443_TCP_PORT=8443 DMAAP_BC_PORT_8443_TCP_PROTO=tcp DMAAP_BC_SERVICE_HOST=10.233.44.99 DMAAP_BC_SERVICE_PORT=8443 DMAAP_BC_SERVICE_PORT_HTTPS_API=8443 DMAAP_DR_DB_PORT=tcp://10.233.48.89:3306 DMAAP_DR_DB_PORT_3306_TCP=tcp://10.233.48.89:3306 DMAAP_DR_DB_PORT_3306_TCP_ADDR=10.233.48.89 DMAAP_DR_DB_PORT_3306_TCP_PORT=3306 DMAAP_DR_DB_PORT_3306_TCP_PROTO=tcp DMAAP_DR_DB_SERVICE_HOST=10.233.48.89 DMAAP_DR_DB_SERVICE_PORT=3306 DMAAP_DR_DB_SERVICE_PORT_MYSQL=3306 DMAAP_DR_NODE_EXTERNAL_PORT=tcp://10.233.11.237:8443 DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP=tcp://10.233.11.237:8443 DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_ADDR=10.233.11.237 DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_PORT=8443 DMAAP_DR_NODE_EXTERNAL_PORT_8443_TCP_PROTO=tcp DMAAP_DR_NODE_EXTERNAL_SERVICE_HOST=10.233.11.237 DMAAP_DR_NODE_EXTERNAL_SERVICE_PORT=8443 DMAAP_DR_NODE_EXTERNAL_SERVICE_PORT_HTTPS_API=8443 DMAAP_DR_NODE_PORT=tcp://10.233.28.61:8443 DMAAP_DR_NODE_PORT_8080_TCP=tcp://10.233.28.61:8080 DMAAP_DR_NODE_PORT_8080_TCP_ADDR=10.233.28.61 DMAAP_DR_NODE_PORT_8080_TCP_PORT=8080 DMAAP_DR_NODE_PORT_8080_TCP_PROTO=tcp DMAAP_DR_NODE_PORT_8443_TCP=tcp://10.233.28.61:8443 DMAAP_DR_NODE_PORT_8443_TCP_ADDR=10.233.28.61 DMAAP_DR_NODE_PORT_8443_TCP_PORT=8443 DMAAP_DR_NODE_PORT_8443_TCP_PROTO=tcp DMAAP_DR_NODE_SERVICE_HOST=10.233.28.61 DMAAP_DR_NODE_SERVICE_PORT=8443 DMAAP_DR_NODE_SERVICE_PORT_HTTPS_API=8443 DMAAP_DR_NODE_SERVICE_PORT_HTTP_API=8080 DMAAP_DR_PROV_PORT=tcp://10.233.20.140:443 DMAAP_DR_PROV_PORT_443_TCP=tcp://10.233.20.140:443 DMAAP_DR_PROV_PORT_443_TCP_ADDR=10.233.20.140 DMAAP_DR_PROV_PORT_443_TCP_PORT=443 DMAAP_DR_PROV_PORT_443_TCP_PROTO=tcp DMAAP_DR_PROV_SERVICE_HOST=10.233.20.140 DMAAP_DR_PROV_SERVICE_PORT=443 DMAAP_DR_PROV_SERVICE_PORT_DR_PROV_PORT2=443 EJBCA_PORT=tcp://10.233.23.60:8443 EJBCA_PORT_8080_TCP=tcp://10.233.23.60:8080 EJBCA_PORT_8080_TCP_ADDR=10.233.23.60 EJBCA_PORT_8080_TCP_PORT=8080 EJBCA_PORT_8080_TCP_PROTO=tcp EJBCA_PORT_8443_TCP=tcp://10.233.23.60:8443 EJBCA_PORT_8443_TCP_ADDR=10.233.23.60 EJBCA_PORT_8443_TCP_PORT=8443 EJBCA_PORT_8443_TCP_PROTO=tcp EJBCA_SERVICE_HOST=10.233.23.60 EJBCA_SERVICE_PORT=8443 EJBCA_SERVICE_PORT_HTTPS_API=8443 EJBCA_SERVICE_PORT_HTTP_API=8080 ESR_GUI_PORT=tcp://10.233.63.246:8080 ESR_GUI_PORT_8080_TCP=tcp://10.233.63.246:8080 ESR_GUI_PORT_8080_TCP_ADDR=10.233.63.246 ESR_GUI_PORT_8080_TCP_PORT=8080 ESR_GUI_PORT_8080_TCP_PROTO=tcp ESR_GUI_SERVICE_HOST=10.233.63.246 ESR_GUI_SERVICE_PORT=8080 ESR_GUI_SERVICE_PORT_ESR_GUI=8080 ESR_SERVER_PORT=tcp://10.233.25.209:9518 ESR_SERVER_PORT_9518_TCP=tcp://10.233.25.209:9518 ESR_SERVER_PORT_9518_TCP_ADDR=10.233.25.209 ESR_SERVER_PORT_9518_TCP_PORT=9518 ESR_SERVER_PORT_9518_TCP_PROTO=tcp ESR_SERVER_SERVICE_HOST=10.233.25.209 ESR_SERVER_SERVICE_PORT=9518 ESR_SERVER_SERVICE_PORT_ESR_SERVER=9518 HOLMES_ENGINE_MGMT_PORT=tcp://10.233.33.74:9102 HOLMES_ENGINE_MGMT_PORT_9102_TCP=tcp://10.233.33.74:9102 HOLMES_ENGINE_MGMT_PORT_9102_TCP_ADDR=10.233.33.74 HOLMES_ENGINE_MGMT_PORT_9102_TCP_PORT=9102 HOLMES_ENGINE_MGMT_PORT_9102_TCP_PROTO=tcp HOLMES_ENGINE_MGMT_SERVICE_HOST=10.233.33.74 HOLMES_ENGINE_MGMT_SERVICE_PORT=9102 HOLMES_ENGINE_MGMT_SERVICE_PORT_HTTPS_REST=9102 HOLMES_POSTGRES_PORT=tcp://10.233.19.41:5432 HOLMES_POSTGRES_PORT_5432_TCP=tcp://10.233.19.41:5432 HOLMES_POSTGRES_PORT_5432_TCP_ADDR=10.233.19.41 HOLMES_POSTGRES_PORT_5432_TCP_PORT=5432 HOLMES_POSTGRES_PORT_5432_TCP_PROTO=tcp HOLMES_POSTGRES_PRIMARY_PORT=tcp://10.233.36.28:5432 HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP=tcp://10.233.36.28:5432 HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_ADDR=10.233.36.28 HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_PORT=5432 HOLMES_POSTGRES_PRIMARY_PORT_5432_TCP_PROTO=tcp HOLMES_POSTGRES_PRIMARY_SERVICE_HOST=10.233.36.28 HOLMES_POSTGRES_PRIMARY_SERVICE_PORT=5432 HOLMES_POSTGRES_PRIMARY_SERVICE_PORT_TCP_POSTGRES=5432 HOLMES_POSTGRES_REPLICA_PORT=tcp://10.233.3.127:5432 HOLMES_POSTGRES_REPLICA_PORT_5432_TCP=tcp://10.233.3.127:5432 HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_ADDR=10.233.3.127 HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_PORT=5432 HOLMES_POSTGRES_REPLICA_PORT_5432_TCP_PROTO=tcp HOLMES_POSTGRES_REPLICA_SERVICE_HOST=10.233.3.127 HOLMES_POSTGRES_REPLICA_SERVICE_PORT=5432 HOLMES_POSTGRES_REPLICA_SERVICE_PORT_TCP_POSTGRES=5432 HOLMES_POSTGRES_SERVICE_HOST=10.233.19.41 HOLMES_POSTGRES_SERVICE_PORT=5432 HOLMES_POSTGRES_SERVICE_PORT_TCP_POSTGRES=5432 HOLMES_RULE_MGMT_PORT=tcp://10.233.18.189:9101 HOLMES_RULE_MGMT_PORT_9101_TCP=tcp://10.233.18.189:9101 HOLMES_RULE_MGMT_PORT_9101_TCP_ADDR=10.233.18.189 HOLMES_RULE_MGMT_PORT_9101_TCP_PORT=9101 HOLMES_RULE_MGMT_PORT_9101_TCP_PROTO=tcp HOLMES_RULE_MGMT_PORT_9104_TCP=tcp://10.233.18.189:9104 HOLMES_RULE_MGMT_PORT_9104_TCP_ADDR=10.233.18.189 HOLMES_RULE_MGMT_PORT_9104_TCP_PORT=9104 HOLMES_RULE_MGMT_PORT_9104_TCP_PROTO=tcp HOLMES_RULE_MGMT_SERVICE_HOST=10.233.18.189 HOLMES_RULE_MGMT_SERVICE_PORT=9101 HOLMES_RULE_MGMT_SERVICE_PORT_HTTPS_REST=9101 HOLMES_RULE_MGMT_SERVICE_PORT_HTTPS_UI=9104 HOME=/home/mrkafka HOSTNAME=onap-message-router-kafka-1 HOST_IP=10.253.0.28 INVENTORY_PORT=tcp://10.233.1.240:8080 INVENTORY_PORT_8080_TCP=tcp://10.233.1.240:8080 INVENTORY_PORT_8080_TCP_ADDR=10.233.1.240 INVENTORY_PORT_8080_TCP_PORT=8080 INVENTORY_PORT_8080_TCP_PROTO=tcp INVENTORY_SERVICE_HOST=10.233.1.240 INVENTORY_SERVICE_PORT=8080 INVENTORY_SERVICE_PORT_INVENTORY=8080 KAFKA_ADVERTISED_LISTENERS=EXTERNAL_SASL_PLAINTEXT://10.253.0.28:30491,INTERNAL_SASL_PLAINTEXT://:9092 KAFKA_AUTHORIZER_CLASS_NAME=org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer KAFKA_BROKER_ID=1 KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE=false KAFKA_DEFAULT_REPLICATION_FACTOR=3 KAFKA_INTER_BROKER_LISTENER_NAME=INTERNAL_SASL_PLAINTEXT KAFKA_JMX_PORT=5555 KAFKA_LISTENERS=EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092 KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT KAFKA_LOG_DIRS=/var/lib/kafka/data KAFKA_LOG_RETENTION_HOURS=168 KAFKA_NUM_PARTITIONS=3 KAFKA_NUM_RECOVERY_THREADS_PER_DATA_DIR=5 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=3 KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/secrets/jaas/kafka_server_jaas.conf KAFKA_SASL_ENABLED_MECHANISMS=PLAIN KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL=PLAIN KAFKA_TRANSACTION_STATE_LOG_MIN_ISR=1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR=1 KAFKA_USER=mrkafka KAFKA_VERSION=5.3.1 KAFKA_ZOOKEEPER_CONNECT=onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS=6000 KAFKA_ZOOKEEPER_SET_ACL=true KUBERNETES_PORT=tcp://10.233.0.1:443 KUBERNETES_PORT_443_TCP=tcp://10.233.0.1:443 KUBERNETES_PORT_443_TCP_ADDR=10.233.0.1 KUBERNETES_PORT_443_TCP_PORT=443 KUBERNETES_PORT_443_TCP_PROTO=tcp KUBERNETES_SERVICE_HOST=10.233.0.1 KUBERNETES_SERVICE_PORT=443 KUBERNETES_SERVICE_PORT_HTTPS=443 LANG=C.UTF-8 MARIADB_GALERA_PORT=tcp://10.233.45.8:3306 MARIADB_GALERA_PORT_3306_TCP=tcp://10.233.45.8:3306 MARIADB_GALERA_PORT_3306_TCP_ADDR=10.233.45.8 MARIADB_GALERA_PORT_3306_TCP_PORT=3306 MARIADB_GALERA_PORT_3306_TCP_PROTO=tcp MARIADB_GALERA_SERVICE_HOST=10.233.45.8 MARIADB_GALERA_SERVICE_PORT=3306 MARIADB_GALERA_SERVICE_PORT_MYSQL=3306 MESSAGE_ROUTER_EXTERNAL_PORT=tcp://10.233.4.95:3905 MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP=tcp://10.233.4.95:3905 MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_ADDR=10.233.4.95 MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_PORT=3905 MESSAGE_ROUTER_EXTERNAL_PORT_3905_TCP_PROTO=tcp MESSAGE_ROUTER_EXTERNAL_SERVICE_HOST=10.233.4.95 MESSAGE_ROUTER_EXTERNAL_SERVICE_PORT=3905 MESSAGE_ROUTER_EXTERNAL_SERVICE_PORT_HTTPS_API=3905 MESSAGE_ROUTER_KAFKA_0_PORT=tcp://10.233.2.181:9091 MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP=tcp://10.233.2.181:9091 MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_ADDR=10.233.2.181 MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_PORT=9091 MESSAGE_ROUTER_KAFKA_0_PORT_9091_TCP_PROTO=tcp MESSAGE_ROUTER_KAFKA_0_SERVICE_HOST=10.233.2.181 MESSAGE_ROUTER_KAFKA_0_SERVICE_PORT=9091 MESSAGE_ROUTER_KAFKA_0_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_0=9091 MESSAGE_ROUTER_KAFKA_1_PORT=tcp://10.233.31.51:9091 MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP=tcp://10.233.31.51:9091 MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_ADDR=10.233.31.51 MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_PORT=9091 MESSAGE_ROUTER_KAFKA_1_PORT_9091_TCP_PROTO=tcp MESSAGE_ROUTER_KAFKA_1_SERVICE_HOST=10.233.31.51 MESSAGE_ROUTER_KAFKA_1_SERVICE_PORT=9091 MESSAGE_ROUTER_KAFKA_1_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_1=9091 MESSAGE_ROUTER_KAFKA_2_PORT=tcp://10.233.1.160:9091 MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP=tcp://10.233.1.160:9091 MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_ADDR=10.233.1.160 MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_PORT=9091 MESSAGE_ROUTER_KAFKA_2_PORT_9091_TCP_PROTO=tcp MESSAGE_ROUTER_KAFKA_2_SERVICE_HOST=10.233.1.160 MESSAGE_ROUTER_KAFKA_2_SERVICE_PORT=9091 MESSAGE_ROUTER_KAFKA_2_SERVICE_PORT_MESSAGE_ROUTER_KAFKA_2=9091 MESSAGE_ROUTER_PORT=tcp://10.233.63.102:3905 MESSAGE_ROUTER_PORT_3904_TCP=tcp://10.233.63.102:3904 MESSAGE_ROUTER_PORT_3904_TCP_ADDR=10.233.63.102 MESSAGE_ROUTER_PORT_3904_TCP_PORT=3904 MESSAGE_ROUTER_PORT_3904_TCP_PROTO=tcp MESSAGE_ROUTER_PORT_3905_TCP=tcp://10.233.63.102:3905 MESSAGE_ROUTER_PORT_3905_TCP_ADDR=10.233.63.102 MESSAGE_ROUTER_PORT_3905_TCP_PORT=3905 MESSAGE_ROUTER_PORT_3905_TCP_PROTO=tcp MESSAGE_ROUTER_SERVICE_HOST=10.233.63.102 MESSAGE_ROUTER_SERVICE_PORT=3905 MESSAGE_ROUTER_SERVICE_PORT_HTTPS_API=3905 MESSAGE_ROUTER_SERVICE_PORT_HTTP_API=3904 MODELING_ETSICATALOG_PORT=tcp://10.233.10.246:8806 MODELING_ETSICATALOG_PORT_8806_TCP=tcp://10.233.10.246:8806 MODELING_ETSICATALOG_PORT_8806_TCP_ADDR=10.233.10.246 MODELING_ETSICATALOG_PORT_8806_TCP_PORT=8806 MODELING_ETSICATALOG_PORT_8806_TCP_PROTO=tcp MODELING_ETSICATALOG_SERVICE_HOST=10.233.10.246 MODELING_ETSICATALOG_SERVICE_PORT=8806 MODELING_ETSICATALOG_SERVICE_PORT_MODELING_ETSICATALOG=8806 MSB_CONSUL_PORT=tcp://10.233.9.140:8500 MSB_CONSUL_PORT_8500_TCP=tcp://10.233.9.140:8500 MSB_CONSUL_PORT_8500_TCP_ADDR=10.233.9.140 MSB_CONSUL_PORT_8500_TCP_PORT=8500 MSB_CONSUL_PORT_8500_TCP_PROTO=tcp MSB_CONSUL_SERVICE_HOST=10.233.9.140 MSB_CONSUL_SERVICE_PORT=8500 MSB_CONSUL_SERVICE_PORT_HTTP_MSB_CONSUL=8500 MSB_DISCOVERY_PORT=tcp://10.233.46.96:10081 MSB_DISCOVERY_PORT_10081_TCP=tcp://10.233.46.96:10081 MSB_DISCOVERY_PORT_10081_TCP_ADDR=10.233.46.96 MSB_DISCOVERY_PORT_10081_TCP_PORT=10081 MSB_DISCOVERY_PORT_10081_TCP_PROTO=tcp MSB_DISCOVERY_SERVICE_HOST=10.233.46.96 MSB_DISCOVERY_SERVICE_PORT=10081 MSB_DISCOVERY_SERVICE_PORT_HTTP_MSB_DISCOVERY=10081 MSB_EAG_PORT=tcp://10.233.28.74:443 MSB_EAG_PORT_443_TCP=tcp://10.233.28.74:443 MSB_EAG_PORT_443_TCP_ADDR=10.233.28.74 MSB_EAG_PORT_443_TCP_PORT=443 MSB_EAG_PORT_443_TCP_PROTO=tcp MSB_EAG_SERVICE_HOST=10.233.28.74 MSB_EAG_SERVICE_PORT=443 MSB_EAG_SERVICE_PORT_HTTPS_MSB_EAG=443 MSB_IAG_PORT=tcp://10.233.63.78:443 MSB_IAG_PORT_443_TCP=tcp://10.233.63.78:443 MSB_IAG_PORT_443_TCP_ADDR=10.233.63.78 MSB_IAG_PORT_443_TCP_PORT=443 MSB_IAG_PORT_443_TCP_PROTO=tcp MSB_IAG_SERVICE_HOST=10.233.63.78 MSB_IAG_SERVICE_PORT=443 MSB_IAG_SERVICE_PORT_HTTPS_MSB_IAG=443 MULTICLOUD_FCAPS_PORT=tcp://10.233.12.190:9011 MULTICLOUD_FCAPS_PORT_9011_TCP=tcp://10.233.12.190:9011 MULTICLOUD_FCAPS_PORT_9011_TCP_ADDR=10.233.12.190 MULTICLOUD_FCAPS_PORT_9011_TCP_PORT=9011 MULTICLOUD_FCAPS_PORT_9011_TCP_PROTO=tcp MULTICLOUD_FCAPS_SERVICE_HOST=10.233.12.190 MULTICLOUD_FCAPS_SERVICE_PORT=9011 MULTICLOUD_FCAPS_SERVICE_PORT_MULTICLOUD_FCAPS=9011 MULTICLOUD_FRAMEWORK_PORT=tcp://10.233.31.168:9001 MULTICLOUD_FRAMEWORK_PORT_9001_TCP=tcp://10.233.31.168:9001 MULTICLOUD_FRAMEWORK_PORT_9001_TCP_ADDR=10.233.31.168 MULTICLOUD_FRAMEWORK_PORT_9001_TCP_PORT=9001 MULTICLOUD_FRAMEWORK_PORT_9001_TCP_PROTO=tcp MULTICLOUD_FRAMEWORK_SERVICE_HOST=10.233.31.168 MULTICLOUD_FRAMEWORK_SERVICE_PORT=9001 MULTICLOUD_FRAMEWORK_SERVICE_PORT_MULTICLOUD_FRAMEWORK=9001 MULTICLOUD_K8S_MONGO_READ_PORT=tcp://10.233.25.8:27017 MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP=tcp://10.233.25.8:27017 MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_ADDR=10.233.25.8 MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_PORT=27017 MULTICLOUD_K8S_MONGO_READ_PORT_27017_TCP_PROTO=tcp MULTICLOUD_K8S_MONGO_READ_SERVICE_HOST=10.233.25.8 MULTICLOUD_K8S_MONGO_READ_SERVICE_PORT=27017 MULTICLOUD_K8S_MONGO_READ_SERVICE_PORT_MONGO=27017 MULTICLOUD_K8S_PORT=tcp://10.233.61.233:9015 MULTICLOUD_K8S_PORT_9015_TCP=tcp://10.233.61.233:9015 MULTICLOUD_K8S_PORT_9015_TCP_ADDR=10.233.61.233 MULTICLOUD_K8S_PORT_9015_TCP_PORT=9015 MULTICLOUD_K8S_PORT_9015_TCP_PROTO=tcp MULTICLOUD_K8S_SERVICE_HOST=10.233.61.233 MULTICLOUD_K8S_SERVICE_PORT=9015 MULTICLOUD_PIKE_PORT=tcp://10.233.18.35:9007 MULTICLOUD_PIKE_PORT_9007_TCP=tcp://10.233.18.35:9007 MULTICLOUD_PIKE_PORT_9007_TCP_ADDR=10.233.18.35 MULTICLOUD_PIKE_PORT_9007_TCP_PORT=9007 MULTICLOUD_PIKE_PORT_9007_TCP_PROTO=tcp MULTICLOUD_PIKE_SERVICE_HOST=10.233.18.35 MULTICLOUD_PIKE_SERVICE_PORT=9007 MULTICLOUD_PIKE_SERVICE_PORT_MULTICLOUD_PIKE=9007 MULTICLOUD_STARLINGX_PORT=tcp://10.233.15.157:9009 MULTICLOUD_STARLINGX_PORT_9009_TCP=tcp://10.233.15.157:9009 MULTICLOUD_STARLINGX_PORT_9009_TCP_ADDR=10.233.15.157 MULTICLOUD_STARLINGX_PORT_9009_TCP_PORT=9009 MULTICLOUD_STARLINGX_PORT_9009_TCP_PROTO=tcp MULTICLOUD_STARLINGX_SERVICE_HOST=10.233.15.157 MULTICLOUD_STARLINGX_SERVICE_PORT=9009 MULTICLOUD_STARLINGX_SERVICE_PORT_MULTICLOUD_STARLINGX=9009 MULTICLOUD_TITANIUMCLOUD_PORT=tcp://10.233.41.54:9005 MULTICLOUD_TITANIUMCLOUD_PORT_9005_TCP=tcp://10.233.41.54:9005 MULTICLOUD_TITANIUMCLOUD_PORT_9005_TCP_ADDR=10.233.41.54 MULTICLOUD_TITANIUMCLOUD_PORT_9005_TCP_PORT=9005 MULTICLOUD_TITANIUMCLOUD_PORT_9005_TCP_PROTO=tcp MULTICLOUD_TITANIUMCLOUD_SERVICE_HOST=10.233.41.54 MULTICLOUD_TITANIUMCLOUD_SERVICE_PORT=9005 MULTICLOUD_TITANIUMCLOUD_SERVICE_PORT_MULTICLOUD_TITANIUMCLOUD=9005 MULTICLOUD_VIO_PORT=tcp://10.233.21.155:9004 MULTICLOUD_VIO_PORT_9004_TCP=tcp://10.233.21.155:9004 MULTICLOUD_VIO_PORT_9004_TCP_ADDR=10.233.21.155 MULTICLOUD_VIO_PORT_9004_TCP_PORT=9004 MULTICLOUD_VIO_PORT_9004_TCP_PROTO=tcp MULTICLOUD_VIO_SERVICE_HOST=10.233.21.155 MULTICLOUD_VIO_SERVICE_PORT=9004 MULTICLOUD_VIO_SERVICE_PORT_MULTICLOUD_VIO=9004 MUSIC_PORT=tcp://10.233.24.180:8443 MUSIC_PORT_8443_TCP=tcp://10.233.24.180:8443 MUSIC_PORT_8443_TCP_ADDR=10.233.24.180 MUSIC_PORT_8443_TCP_PORT=8443 MUSIC_PORT_8443_TCP_PROTO=tcp MUSIC_SERVICE_HOST=10.233.24.180 MUSIC_SERVICE_PORT=8443 MUSIC_SERVICE_PORT_HTTPS_API=8443 NBI_MONGOHOST_READ_PORT=tcp://10.233.20.7:27017 NBI_MONGOHOST_READ_PORT_27017_TCP=tcp://10.233.20.7:27017 NBI_MONGOHOST_READ_PORT_27017_TCP_ADDR=10.233.20.7 NBI_MONGOHOST_READ_PORT_27017_TCP_PORT=27017 NBI_MONGOHOST_READ_PORT_27017_TCP_PROTO=tcp NBI_MONGOHOST_READ_SERVICE_HOST=10.233.20.7 NBI_MONGOHOST_READ_SERVICE_PORT=27017 NBI_MONGOHOST_READ_SERVICE_PORT_MONGO=27017 NBI_PORT=tcp://10.233.45.241:8443 NBI_PORT_8443_TCP=tcp://10.233.45.241:8443 NBI_PORT_8443_TCP_ADDR=10.233.45.241 NBI_PORT_8443_TCP_PORT=8443 NBI_PORT_8443_TCP_PROTO=tcp NBI_SERVICE_HOST=10.233.45.241 NBI_SERVICE_PORT=8443 NBI_SERVICE_PORT_API_8443=8443 NENG_SERV_PORT=tcp://10.233.29.34:8080 NENG_SERV_PORT_8080_TCP=tcp://10.233.29.34:8080 NENG_SERV_PORT_8080_TCP_ADDR=10.233.29.34 NENG_SERV_PORT_8080_TCP_PORT=8080 NENG_SERV_PORT_8080_TCP_PROTO=tcp NENG_SERV_SERVICE_HOST=10.233.29.34 NENG_SERV_SERVICE_PORT=8080 NENG_SERV_SERVICE_PORT_NENG_SERV_PORT=8080 NETBOX_APP_PORT=tcp://10.233.12.6:8001 NETBOX_APP_PORT_8001_TCP=tcp://10.233.12.6:8001 NETBOX_APP_PORT_8001_TCP_ADDR=10.233.12.6 NETBOX_APP_PORT_8001_TCP_PORT=8001 NETBOX_APP_PORT_8001_TCP_PROTO=tcp NETBOX_APP_SERVICE_HOST=10.233.12.6 NETBOX_APP_SERVICE_PORT=8001 NETBOX_APP_SERVICE_PORT_NETBOX_APP=8001 NETBOX_NGINX_PORT=tcp://10.233.60.211:8080 NETBOX_NGINX_PORT_8080_TCP=tcp://10.233.60.211:8080 NETBOX_NGINX_PORT_8080_TCP_ADDR=10.233.60.211 NETBOX_NGINX_PORT_8080_TCP_PORT=8080 NETBOX_NGINX_PORT_8080_TCP_PROTO=tcp NETBOX_NGINX_SERVICE_HOST=10.233.60.211 NETBOX_NGINX_SERVICE_PORT=8080 NETBOX_POSTGRES_PORT=tcp://10.233.56.44:5432 NETBOX_POSTGRES_PORT_5432_TCP=tcp://10.233.56.44:5432 NETBOX_POSTGRES_PORT_5432_TCP_ADDR=10.233.56.44 NETBOX_POSTGRES_PORT_5432_TCP_PORT=5432 NETBOX_POSTGRES_PORT_5432_TCP_PROTO=tcp NETBOX_POSTGRES_SERVICE_HOST=10.233.56.44 NETBOX_POSTGRES_SERVICE_PORT=5432 NETBOX_POSTGRES_SERVICE_PORT_NETBOX_POSTGRES=5432 ONAP_APPC_DB_METRICS_PORT=tcp://10.233.43.130:9104 ONAP_APPC_DB_METRICS_PORT_9104_TCP=tcp://10.233.43.130:9104 ONAP_APPC_DB_METRICS_PORT_9104_TCP_ADDR=10.233.43.130 ONAP_APPC_DB_METRICS_PORT_9104_TCP_PORT=9104 ONAP_APPC_DB_METRICS_PORT_9104_TCP_PROTO=tcp ONAP_APPC_DB_METRICS_SERVICE_HOST=10.233.43.130 ONAP_APPC_DB_METRICS_SERVICE_PORT=9104 ONAP_APPC_DB_METRICS_SERVICE_PORT_METRICS=9104 ONAP_CDS_DB_METRICS_PORT=tcp://10.233.20.31:9104 ONAP_CDS_DB_METRICS_PORT_9104_TCP=tcp://10.233.20.31:9104 ONAP_CDS_DB_METRICS_PORT_9104_TCP_ADDR=10.233.20.31 ONAP_CDS_DB_METRICS_PORT_9104_TCP_PORT=9104 ONAP_CDS_DB_METRICS_PORT_9104_TCP_PROTO=tcp ONAP_CDS_DB_METRICS_SERVICE_HOST=10.233.20.31 ONAP_CDS_DB_METRICS_SERVICE_PORT=9104 ONAP_CDS_DB_METRICS_SERVICE_PORT_METRICS=9104 ONAP_CMSO_DB_METRICS_PORT=tcp://10.233.56.182:9104 ONAP_CMSO_DB_METRICS_PORT_9104_TCP=tcp://10.233.56.182:9104 ONAP_CMSO_DB_METRICS_PORT_9104_TCP_ADDR=10.233.56.182 ONAP_CMSO_DB_METRICS_PORT_9104_TCP_PORT=9104 ONAP_CMSO_DB_METRICS_PORT_9104_TCP_PROTO=tcp ONAP_CMSO_DB_METRICS_SERVICE_HOST=10.233.56.182 ONAP_CMSO_DB_METRICS_SERVICE_PORT=9104 ONAP_CMSO_DB_METRICS_SERVICE_PORT_METRICS=9104 ONAP_DMAAP_DR_DB_METRICS_PORT=tcp://10.233.25.187:9104 ONAP_DMAAP_DR_DB_METRICS_PORT_9104_TCP=tcp://10.233.25.187:9104 ONAP_DMAAP_DR_DB_METRICS_PORT_9104_TCP_ADDR=10.233.25.187 ONAP_DMAAP_DR_DB_METRICS_PORT_9104_TCP_PORT=9104 ONAP_DMAAP_DR_DB_METRICS_PORT_9104_TCP_PROTO=tcp ONAP_DMAAP_DR_DB_METRICS_SERVICE_HOST=10.233.25.187 ONAP_DMAAP_DR_DB_METRICS_SERVICE_PORT=9104 ONAP_DMAAP_DR_DB_METRICS_SERVICE_PORT_METRICS=9104 ONAP_MARIADB_GALERA_METRICS_PORT=tcp://10.233.28.88:9104 ONAP_MARIADB_GALERA_METRICS_PORT_9104_TCP=tcp://10.233.28.88:9104 ONAP_MARIADB_GALERA_METRICS_PORT_9104_TCP_ADDR=10.233.28.88 ONAP_MARIADB_GALERA_METRICS_PORT_9104_TCP_PORT=9104 ONAP_MARIADB_GALERA_METRICS_PORT_9104_TCP_PROTO=tcp ONAP_MARIADB_GALERA_METRICS_SERVICE_HOST=10.233.28.88 ONAP_MARIADB_GALERA_METRICS_SERVICE_PORT=9104 ONAP_MARIADB_GALERA_METRICS_SERVICE_PORT_METRICS=9104 ONAP_POLICY_MARIADB_METRICS_PORT=tcp://10.233.51.129:9104 ONAP_POLICY_MARIADB_METRICS_PORT_9104_TCP=tcp://10.233.51.129:9104 ONAP_POLICY_MARIADB_METRICS_PORT_9104_TCP_ADDR=10.233.51.129 ONAP_POLICY_MARIADB_METRICS_PORT_9104_TCP_PORT=9104 ONAP_POLICY_MARIADB_METRICS_PORT_9104_TCP_PROTO=tcp ONAP_POLICY_MARIADB_METRICS_SERVICE_HOST=10.233.51.129 ONAP_POLICY_MARIADB_METRICS_SERVICE_PORT=9104 ONAP_POLICY_MARIADB_METRICS_SERVICE_PORT_METRICS=9104 OOF_CMSO_OPTIMIZER_PORT=tcp://10.233.28.93:7997 OOF_CMSO_OPTIMIZER_PORT_7997_TCP=tcp://10.233.28.93:7997 OOF_CMSO_OPTIMIZER_PORT_7997_TCP_ADDR=10.233.28.93 OOF_CMSO_OPTIMIZER_PORT_7997_TCP_PORT=7997 OOF_CMSO_OPTIMIZER_PORT_7997_TCP_PROTO=tcp OOF_CMSO_OPTIMIZER_SERVICE_HOST=10.233.28.93 OOF_CMSO_OPTIMIZER_SERVICE_PORT=7997 OOF_CMSO_OPTIMIZER_SERVICE_PORT_CMSO=7997 OOF_CMSO_PORT=tcp://10.233.10.206:8080 OOF_CMSO_PORT_8080_TCP=tcp://10.233.10.206:8080 OOF_CMSO_PORT_8080_TCP_ADDR=10.233.10.206 OOF_CMSO_PORT_8080_TCP_PORT=8080 OOF_CMSO_PORT_8080_TCP_PROTO=tcp OOF_CMSO_SERVICE_HOST=10.233.10.206 OOF_CMSO_SERVICE_PORT=8080 OOF_CMSO_SERVICE_PORT_CMSO=8080 OOF_CMSO_TICKETMGT_PORT=tcp://10.233.60.94:7999 OOF_CMSO_TICKETMGT_PORT_7999_TCP=tcp://10.233.60.94:7999 OOF_CMSO_TICKETMGT_PORT_7999_TCP_ADDR=10.233.60.94 OOF_CMSO_TICKETMGT_PORT_7999_TCP_PORT=7999 OOF_CMSO_TICKETMGT_PORT_7999_TCP_PROTO=tcp OOF_CMSO_TICKETMGT_SERVICE_HOST=10.233.60.94 OOF_CMSO_TICKETMGT_SERVICE_PORT=7999 OOF_CMSO_TICKETMGT_SERVICE_PORT_CMSO_TICKETMGT=7999 OOF_CMSO_TOPOLOGY_PORT=tcp://10.233.35.47:7998 OOF_CMSO_TOPOLOGY_PORT_7998_TCP=tcp://10.233.35.47:7998 OOF_CMSO_TOPOLOGY_PORT_7998_TCP_ADDR=10.233.35.47 OOF_CMSO_TOPOLOGY_PORT_7998_TCP_PORT=7998 OOF_CMSO_TOPOLOGY_PORT_7998_TCP_PROTO=tcp OOF_CMSO_TOPOLOGY_SERVICE_HOST=10.233.35.47 OOF_CMSO_TOPOLOGY_SERVICE_PORT=7998 OOF_CMSO_TOPOLOGY_SERVICE_PORT_CMSO_TOPOLOGY=7998 OOF_HAS_API_PORT=tcp://10.233.58.134:8091 OOF_HAS_API_PORT_8091_TCP=tcp://10.233.58.134:8091 OOF_HAS_API_PORT_8091_TCP_ADDR=10.233.58.134 OOF_HAS_API_PORT_8091_TCP_PORT=8091 OOF_HAS_API_PORT_8091_TCP_PROTO=tcp OOF_HAS_API_SERVICE_HOST=10.233.58.134 OOF_HAS_API_SERVICE_PORT=8091 OOF_HAS_API_SERVICE_PORT_OOF_HAS_API=8091 OOF_OSDF_PORT=tcp://10.233.44.68:8698 OOF_OSDF_PORT_8698_TCP=tcp://10.233.44.68:8698 OOF_OSDF_PORT_8698_TCP_ADDR=10.233.44.68 OOF_OSDF_PORT_8698_TCP_PORT=8698 OOF_OSDF_PORT_8698_TCP_PROTO=tcp OOF_OSDF_SERVICE_HOST=10.233.44.68 OOF_OSDF_SERVICE_PORT=8698 OOM_CERT_SERVICE_PORT=tcp://10.233.28.202:8443 OOM_CERT_SERVICE_PORT_8443_TCP=tcp://10.233.28.202:8443 OOM_CERT_SERVICE_PORT_8443_TCP_ADDR=10.233.28.202 OOM_CERT_SERVICE_PORT_8443_TCP_PORT=8443 OOM_CERT_SERVICE_PORT_8443_TCP_PROTO=tcp OOM_CERT_SERVICE_SERVICE_HOST=10.233.28.202 OOM_CERT_SERVICE_SERVICE_PORT=8443 OOM_CERT_SERVICE_SERVICE_PORT_HTTPS_HTTP=8443 PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin POLICY_APEX_PDP_PORT=tcp://10.233.4.136:6969 POLICY_APEX_PDP_PORT_6969_TCP=tcp://10.233.4.136:6969 POLICY_APEX_PDP_PORT_6969_TCP_ADDR=10.233.4.136 POLICY_APEX_PDP_PORT_6969_TCP_PORT=6969 POLICY_APEX_PDP_PORT_6969_TCP_PROTO=tcp POLICY_APEX_PDP_SERVICE_HOST=10.233.4.136 POLICY_APEX_PDP_SERVICE_PORT=6969 POLICY_APEX_PDP_SERVICE_PORT_POLICY_APEX_PDP=6969 POLICY_API_PORT=tcp://10.233.13.187:6969 POLICY_API_PORT_6969_TCP=tcp://10.233.13.187:6969 POLICY_API_PORT_6969_TCP_ADDR=10.233.13.187 POLICY_API_PORT_6969_TCP_PORT=6969 POLICY_API_PORT_6969_TCP_PROTO=tcp POLICY_API_SERVICE_HOST=10.233.13.187 POLICY_API_SERVICE_PORT=6969 POLICY_API_SERVICE_PORT_POLICY_API=6969 POLICY_CLAMP_BE_PORT=tcp://10.233.47.144:8443 POLICY_CLAMP_BE_PORT_8443_TCP=tcp://10.233.47.144:8443 POLICY_CLAMP_BE_PORT_8443_TCP_ADDR=10.233.47.144 POLICY_CLAMP_BE_PORT_8443_TCP_PORT=8443 POLICY_CLAMP_BE_PORT_8443_TCP_PROTO=tcp POLICY_CLAMP_BE_SERVICE_HOST=10.233.47.144 POLICY_CLAMP_BE_SERVICE_PORT=8443 POLICY_CLAMP_BE_SERVICE_PORT_POLICY_CLAMP_BE=8443 POLICY_CLAMP_FE_PORT=tcp://10.233.35.201:2443 POLICY_CLAMP_FE_PORT_2443_TCP=tcp://10.233.35.201:2443 POLICY_CLAMP_FE_PORT_2443_TCP_ADDR=10.233.35.201 POLICY_CLAMP_FE_PORT_2443_TCP_PORT=2443 POLICY_CLAMP_FE_PORT_2443_TCP_PROTO=tcp POLICY_CLAMP_FE_SERVICE_HOST=10.233.35.201 POLICY_CLAMP_FE_SERVICE_PORT=2443 POLICY_CLAMP_FE_SERVICE_PORT_POLICY_CLAMP_FE=2443 POLICY_DISTRIBUTION_PORT=tcp://10.233.48.132:6969 POLICY_DISTRIBUTION_PORT_6969_TCP=tcp://10.233.48.132:6969 POLICY_DISTRIBUTION_PORT_6969_TCP_ADDR=10.233.48.132 POLICY_DISTRIBUTION_PORT_6969_TCP_PORT=6969 POLICY_DISTRIBUTION_PORT_6969_TCP_PROTO=tcp POLICY_DISTRIBUTION_SERVICE_HOST=10.233.48.132 POLICY_DISTRIBUTION_SERVICE_PORT=6969 POLICY_DISTRIBUTION_SERVICE_PORT_POLICY_DISTRIBUTION=6969 POLICY_DROOLS_PDP_PORT=tcp://10.233.61.114:6969 POLICY_DROOLS_PDP_PORT_6969_TCP=tcp://10.233.61.114:6969 POLICY_DROOLS_PDP_PORT_6969_TCP_ADDR=10.233.61.114 POLICY_DROOLS_PDP_PORT_6969_TCP_PORT=6969 POLICY_DROOLS_PDP_PORT_6969_TCP_PROTO=tcp POLICY_DROOLS_PDP_PORT_9696_TCP=tcp://10.233.61.114:9696 POLICY_DROOLS_PDP_PORT_9696_TCP_ADDR=10.233.61.114 POLICY_DROOLS_PDP_PORT_9696_TCP_PORT=9696 POLICY_DROOLS_PDP_PORT_9696_TCP_PROTO=tcp POLICY_DROOLS_PDP_SERVICE_HOST=10.233.61.114 POLICY_DROOLS_PDP_SERVICE_PORT=6969 POLICY_DROOLS_PDP_SERVICE_PORT_POLICY_DROOLS_PDP_6969=6969 POLICY_DROOLS_PDP_SERVICE_PORT_POLICY_DROOLS_PDP_9696=9696 POLICY_HANDLER_PORT=tcp://10.233.63.19:80 POLICY_HANDLER_PORT_80_TCP=tcp://10.233.63.19:80 POLICY_HANDLER_PORT_80_TCP_ADDR=10.233.63.19 POLICY_HANDLER_PORT_80_TCP_PORT=80 POLICY_HANDLER_PORT_80_TCP_PROTO=tcp POLICY_HANDLER_SERVICE_HOST=10.233.63.19 POLICY_HANDLER_SERVICE_PORT=80 POLICY_HANDLER_SERVICE_PORT_POLICY_HANDLER=80 POLICY_MARIADB_PORT=tcp://10.233.49.190:3306 POLICY_MARIADB_PORT_3306_TCP=tcp://10.233.49.190:3306 POLICY_MARIADB_PORT_3306_TCP_ADDR=10.233.49.190 POLICY_MARIADB_PORT_3306_TCP_PORT=3306 POLICY_MARIADB_PORT_3306_TCP_PROTO=tcp POLICY_MARIADB_SERVICE_HOST=10.233.49.190 POLICY_MARIADB_SERVICE_PORT=3306 POLICY_MARIADB_SERVICE_PORT_MYSQL=3306 POLICY_PAP_PORT=tcp://10.233.51.13:6969 POLICY_PAP_PORT_6969_TCP=tcp://10.233.51.13:6969 POLICY_PAP_PORT_6969_TCP_ADDR=10.233.51.13 POLICY_PAP_PORT_6969_TCP_PORT=6969 POLICY_PAP_PORT_6969_TCP_PROTO=tcp POLICY_PAP_SERVICE_HOST=10.233.51.13 POLICY_PAP_SERVICE_PORT=6969 POLICY_PAP_SERVICE_PORT_HTTP_API=6969 POLICY_XACML_PDP_PORT=tcp://10.233.53.112:6969 POLICY_XACML_PDP_PORT_6969_TCP=tcp://10.233.53.112:6969 POLICY_XACML_PDP_PORT_6969_TCP_ADDR=10.233.53.112 POLICY_XACML_PDP_PORT_6969_TCP_PORT=6969 POLICY_XACML_PDP_PORT_6969_TCP_PROTO=tcp POLICY_XACML_PDP_SERVICE_HOST=10.233.53.112 POLICY_XACML_PDP_SERVICE_PORT=6969 POLICY_XACML_PDP_SERVICE_PORT_POLICY_XACML_PDP=6969 PORTAL_APP_PORT=tcp://10.233.34.254:8443 PORTAL_APP_PORT_8443_TCP=tcp://10.233.34.254:8443 PORTAL_APP_PORT_8443_TCP_ADDR=10.233.34.254 PORTAL_APP_PORT_8443_TCP_PORT=8443 PORTAL_APP_PORT_8443_TCP_PROTO=tcp PORTAL_APP_SERVICE_HOST=10.233.34.254 PORTAL_APP_SERVICE_PORT=8443 PORTAL_APP_SERVICE_PORT_PORTAL_APP4=8443 PORTAL_CASSANDRA_PORT=tcp://10.233.35.48:9160 PORTAL_CASSANDRA_PORT_7000_TCP=tcp://10.233.35.48:7000 PORTAL_CASSANDRA_PORT_7000_TCP_ADDR=10.233.35.48 PORTAL_CASSANDRA_PORT_7000_TCP_PORT=7000 PORTAL_CASSANDRA_PORT_7000_TCP_PROTO=tcp PORTAL_CASSANDRA_PORT_7001_TCP=tcp://10.233.35.48:7001 PORTAL_CASSANDRA_PORT_7001_TCP_ADDR=10.233.35.48 PORTAL_CASSANDRA_PORT_7001_TCP_PORT=7001 PORTAL_CASSANDRA_PORT_7001_TCP_PROTO=tcp PORTAL_CASSANDRA_PORT_7199_TCP=tcp://10.233.35.48:7199 PORTAL_CASSANDRA_PORT_7199_TCP_ADDR=10.233.35.48 PORTAL_CASSANDRA_PORT_7199_TCP_PORT=7199 PORTAL_CASSANDRA_PORT_7199_TCP_PROTO=tcp PORTAL_CASSANDRA_PORT_9042_TCP=tcp://10.233.35.48:9042 PORTAL_CASSANDRA_PORT_9042_TCP_ADDR=10.233.35.48 PORTAL_CASSANDRA_PORT_9042_TCP_PORT=9042 PORTAL_CASSANDRA_PORT_9042_TCP_PROTO=tcp PORTAL_CASSANDRA_PORT_9160_TCP=tcp://10.233.35.48:9160 PORTAL_CASSANDRA_PORT_9160_TCP_ADDR=10.233.35.48 PORTAL_CASSANDRA_PORT_9160_TCP_PORT=9160 PORTAL_CASSANDRA_PORT_9160_TCP_PROTO=tcp PORTAL_CASSANDRA_SERVICE_HOST=10.233.35.48 PORTAL_CASSANDRA_SERVICE_PORT=9160 PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA2=7000 PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA3=7001 PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA4=7199 PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA5=9042 PORTAL_CASSANDRA_SERVICE_PORT_PORTAL_CASSANDRA=9160 PORTAL_DB_PORT=tcp://10.233.15.19:3306 PORTAL_DB_PORT_3306_TCP=tcp://10.233.15.19:3306 PORTAL_DB_PORT_3306_TCP_ADDR=10.233.15.19 PORTAL_DB_PORT_3306_TCP_PORT=3306 PORTAL_DB_PORT_3306_TCP_PROTO=tcp PORTAL_DB_SERVICE_HOST=10.233.15.19 PORTAL_DB_SERVICE_PORT=3306 PORTAL_DB_SERVICE_PORT_PORTAL_DB=3306 PORTAL_SDK_PORT=tcp://10.233.31.199:8443 PORTAL_SDK_PORT_8443_TCP=tcp://10.233.31.199:8443 PORTAL_SDK_PORT_8443_TCP_ADDR=10.233.31.199 PORTAL_SDK_PORT_8443_TCP_PORT=8443 PORTAL_SDK_PORT_8443_TCP_PROTO=tcp PORTAL_SDK_SERVICE_HOST=10.233.31.199 PORTAL_SDK_SERVICE_PORT=8443 PORTAL_SDK_SERVICE_PORT_PORTAL_SDK=8443 PORTAL_WIDGET_PORT=tcp://10.233.22.113:8082 PORTAL_WIDGET_PORT_8082_TCP=tcp://10.233.22.113:8082 PORTAL_WIDGET_PORT_8082_TCP_ADDR=10.233.22.113 PORTAL_WIDGET_PORT_8082_TCP_PORT=8082 PORTAL_WIDGET_PORT_8082_TCP_PROTO=tcp PORTAL_WIDGET_SERVICE_HOST=10.233.22.113 PORTAL_WIDGET_SERVICE_PORT=8082 PORTAL_WIDGET_SERVICE_PORT_PORTAL_WIDGET=8082 PWD=/ PYTHON_PIP_VERSION=8.1.2 PYTHON_VERSION=2.7.9-1 ROBOT_PORT=tcp://10.233.62.181:443 ROBOT_PORT_443_TCP=tcp://10.233.62.181:443 ROBOT_PORT_443_TCP_ADDR=10.233.62.181 ROBOT_PORT_443_TCP_PORT=443 ROBOT_PORT_443_TCP_PROTO=tcp ROBOT_SERVICE_HOST=10.233.62.181 ROBOT_SERVICE_PORT=443 ROBOT_SERVICE_PORT_HTTPD=443 SCALA_VERSION=2.12 SDC_BE_EXTERNAL_PORT=tcp://10.233.47.198:8443 SDC_BE_EXTERNAL_PORT_8443_TCP=tcp://10.233.47.198:8443 SDC_BE_EXTERNAL_PORT_8443_TCP_ADDR=10.233.47.198 SDC_BE_EXTERNAL_PORT_8443_TCP_PORT=8443 SDC_BE_EXTERNAL_PORT_8443_TCP_PROTO=tcp SDC_BE_EXTERNAL_SERVICE_HOST=10.233.47.198 SDC_BE_EXTERNAL_SERVICE_PORT=8443 SDC_BE_EXTERNAL_SERVICE_PORT_HTTPS_API=8443 SDC_BE_PORT=tcp://10.233.56.194:8443 SDC_BE_PORT_8080_TCP=tcp://10.233.56.194:8080 SDC_BE_PORT_8080_TCP_ADDR=10.233.56.194 SDC_BE_PORT_8080_TCP_PORT=8080 SDC_BE_PORT_8080_TCP_PROTO=tcp SDC_BE_PORT_8443_TCP=tcp://10.233.56.194:8443 SDC_BE_PORT_8443_TCP_ADDR=10.233.56.194 SDC_BE_PORT_8443_TCP_PORT=8443 SDC_BE_PORT_8443_TCP_PROTO=tcp SDC_BE_SERVICE_HOST=10.233.56.194 SDC_BE_SERVICE_PORT=8443 SDC_BE_SERVICE_PORT_HTTPS_API=8443 SDC_BE_SERVICE_PORT_HTTP_API=8080 SDC_FE_PORT=tcp://10.233.38.57:9443 SDC_FE_PORT_9443_TCP=tcp://10.233.38.57:9443 SDC_FE_PORT_9443_TCP_ADDR=10.233.38.57 SDC_FE_PORT_9443_TCP_PORT=9443 SDC_FE_PORT_9443_TCP_PROTO=tcp SDC_FE_SERVICE_HOST=10.233.38.57 SDC_FE_SERVICE_PORT=9443 SDC_FE_SERVICE_PORT_SDC_FE2=9443 SDC_ONBOARDING_BE_PORT=tcp://10.233.26.191:8445 SDC_ONBOARDING_BE_PORT_8081_TCP=tcp://10.233.26.191:8081 SDC_ONBOARDING_BE_PORT_8081_TCP_ADDR=10.233.26.191 SDC_ONBOARDING_BE_PORT_8081_TCP_PORT=8081 SDC_ONBOARDING_BE_PORT_8081_TCP_PROTO=tcp SDC_ONBOARDING_BE_PORT_8445_TCP=tcp://10.233.26.191:8445 SDC_ONBOARDING_BE_PORT_8445_TCP_ADDR=10.233.26.191 SDC_ONBOARDING_BE_PORT_8445_TCP_PORT=8445 SDC_ONBOARDING_BE_PORT_8445_TCP_PROTO=tcp SDC_ONBOARDING_BE_SERVICE_HOST=10.233.26.191 SDC_ONBOARDING_BE_SERVICE_PORT=8445 SDC_ONBOARDING_BE_SERVICE_PORT_SDC_ONBOARDING_BE2=8081 SDC_ONBOARDING_BE_SERVICE_PORT_SDC_ONBOARDING_BE=8445 SDC_WFD_BE_PORT=tcp://10.233.48.174:8443 SDC_WFD_BE_PORT_8443_TCP=tcp://10.233.48.174:8443 SDC_WFD_BE_PORT_8443_TCP_ADDR=10.233.48.174 SDC_WFD_BE_PORT_8443_TCP_PORT=8443 SDC_WFD_BE_PORT_8443_TCP_PROTO=tcp SDC_WFD_BE_SERVICE_HOST=10.233.48.174 SDC_WFD_BE_SERVICE_PORT=8443 SDC_WFD_BE_SERVICE_PORT_SDC_WFD_BE=8443 SDC_WFD_FE_PORT=tcp://10.233.12.116:8443 SDC_WFD_FE_PORT_8443_TCP=tcp://10.233.12.116:8443 SDC_WFD_FE_PORT_8443_TCP_ADDR=10.233.12.116 SDC_WFD_FE_PORT_8443_TCP_PORT=8443 SDC_WFD_FE_PORT_8443_TCP_PROTO=tcp SDC_WFD_FE_SERVICE_HOST=10.233.12.116 SDC_WFD_FE_SERVICE_PORT=8443 SDC_WFD_FE_SERVICE_PORT_SDC_WFD_FE=8443 SDNC_ANSIBLE_SERVER_PORT=tcp://10.233.26.82:8000 SDNC_ANSIBLE_SERVER_PORT_8000_TCP=tcp://10.233.26.82:8000 SDNC_ANSIBLE_SERVER_PORT_8000_TCP_ADDR=10.233.26.82 SDNC_ANSIBLE_SERVER_PORT_8000_TCP_PORT=8000 SDNC_ANSIBLE_SERVER_PORT_8000_TCP_PROTO=tcp SDNC_ANSIBLE_SERVER_SERVICE_HOST=10.233.26.82 SDNC_ANSIBLE_SERVER_SERVICE_PORT=8000 SDNC_ANSIBLE_SERVER_SERVICE_PORT_SDNC_ANSIBLE_SERVER=8000 SDNC_DGBUILDER_PORT=tcp://10.233.41.81:3000 SDNC_DGBUILDER_PORT_3000_TCP=tcp://10.233.41.81:3000 SDNC_DGBUILDER_PORT_3000_TCP_ADDR=10.233.41.81 SDNC_DGBUILDER_PORT_3000_TCP_PORT=3000 SDNC_DGBUILDER_PORT_3000_TCP_PROTO=tcp SDNC_DGBUILDER_SERVICE_HOST=10.233.41.81 SDNC_DGBUILDER_SERVICE_PORT=3000 SDNC_DGBUILDER_SERVICE_PORT_DGBUILDER=3000 SDNC_OAM_PORT=tcp://10.233.34.134:8282 SDNC_OAM_PORT_8202_TCP=tcp://10.233.34.134:8202 SDNC_OAM_PORT_8202_TCP_ADDR=10.233.34.134 SDNC_OAM_PORT_8202_TCP_PORT=8202 SDNC_OAM_PORT_8202_TCP_PROTO=tcp SDNC_OAM_PORT_8282_TCP=tcp://10.233.34.134:8282 SDNC_OAM_PORT_8282_TCP_ADDR=10.233.34.134 SDNC_OAM_PORT_8282_TCP_PORT=8282 SDNC_OAM_PORT_8282_TCP_PROTO=tcp SDNC_OAM_SERVICE_HOST=10.233.34.134 SDNC_OAM_SERVICE_PORT=8282 SDNC_OAM_SERVICE_PORT_SDNC_KARAF=8202 SDNC_OAM_SERVICE_PORT_SDNC_RESTCONF_ALT=8282 SDNC_PORT=tcp://10.233.33.97:8443 SDNC_PORT_8443_TCP=tcp://10.233.33.97:8443 SDNC_PORT_8443_TCP_ADDR=10.233.33.97 SDNC_PORT_8443_TCP_PORT=8443 SDNC_PORT_8443_TCP_PROTO=tcp SDNC_SERVICE_HOST=10.233.33.97 SDNC_SERVICE_PORT=8443 SDNC_SERVICE_PORT_SDNC_RESTCONF=8443 SDNC_WEB_SERVICE_PORT=tcp://10.233.34.64:8443 SDNC_WEB_SERVICE_PORT_8443_TCP=tcp://10.233.34.64:8443 SDNC_WEB_SERVICE_PORT_8443_TCP_ADDR=10.233.34.64 SDNC_WEB_SERVICE_PORT_8443_TCP_PORT=8443 SDNC_WEB_SERVICE_PORT_8443_TCP_PROTO=tcp SDNC_WEB_SERVICE_SERVICE_HOST=10.233.34.64 SDNC_WEB_SERVICE_SERVICE_PORT=8443 SDNC_WEB_SERVICE_SERVICE_PORT_SDNC_WEB=8443 SDNRDB_PORT=tcp://10.233.38.68:9200 SDNRDB_PORT_9200_TCP=tcp://10.233.38.68:9200 SDNRDB_PORT_9200_TCP_ADDR=10.233.38.68 SDNRDB_PORT_9200_TCP_PORT=9200 SDNRDB_PORT_9200_TCP_PROTO=tcp SDNRDB_SERVICE_HOST=10.233.38.68 SDNRDB_SERVICE_PORT=9200 SDNRDB_SERVICE_PORT_9300_TCP=tcp://10.233.18.123:9300 SDNRDB_SERVICE_PORT_9300_TCP_ADDR=10.233.18.123 SDNRDB_SERVICE_PORT_9300_TCP_PORT=9300 SDNRDB_SERVICE_PORT_9300_TCP_PROTO=tcp SDNRDB_SERVICE_PORT_ELASTICSEARCH=9200 SDNRDB_SERVICE_SERVICE_HOST=10.233.18.123 SDNRDB_SERVICE_SERVICE_PORT=9300 SDNRDB_SERVICE_SERVICE_PORT_HTTP_TRANSPORT=9300 SHLVL=1 ZULU_OPENJDK_VERSION=8=8.38.0.13 _=/usr/bin/env aaf_locate_url=https://aaf-locate.onap:8095 enableCadi=true ===> User uid=1000(mrkafka) gid=0(root) groups=0(root) ===> Configuring ... SASL is enabled. ===> Running preflight checks ... ===> Check if /var/lib/kafka/data is writable ... ===> Check if Zookeeper is healthy ... [main] INFO io.confluent.admin.utils.ClusterStatus - SASL is enabled. java.security.auth.login.config=/etc/kafka/secrets/jaas/kafka_server_jaas.conf [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:zookeeper.version=3.4.14-4c25d480e66aadd371de8bd2fd8da255ac140bcf, built on 03/06/2019 16:18 GMT [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:host.name=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.version=1.8.0_212 [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.vendor=Azul Systems, Inc. [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.class.path=/etc/confluent/docker/docker-utils.jar [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.io.tmpdir=/tmp [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.compiler= [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.name=Linux [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.arch=amd64 [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.version=4.19.0-13-cloud-amd64 [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.name=mrkafka [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.home=/home/mrkafka [main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.dir=/ [main] INFO org.apache.zookeeper.ZooKeeper - Initiating client connection, connectString=onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 sessionTimeout=40000 watcher=io.confluent.admin.utils.ZookeeperConnectionWatcher@30dae81 [main-SendThread(onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.Login - Client successfully logged in. [main-SendThread(onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.client.ZooKeeperSaslClient - Client will use DIGEST-MD5 as SASL mechanism. [main-SendThread(onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.ClientCnxn - Opening socket connection to server onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.233.76.16:2181. Will attempt to SASL-authenticate using Login Context section 'Client' [main-SendThread(onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.ClientCnxn - Socket connection established to onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.233.76.16:2181, initiating session [main-SendThread(onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181)] INFO org.apache.zookeeper.ClientCnxn - Session establishment complete on server onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local/10.233.76.16:2181, sessionid = 0x200007e2cc00000, negotiated timeout = 40000 [main] INFO org.apache.zookeeper.ZooKeeper - Session: 0x200007e2cc00000 closed [main-EventThread] INFO org.apache.zookeeper.ClientCnxn - EventThread shut down for session: 0x200007e2cc00000 ===> Launching ... ===> Launching kafka ... SLF4J: Class path contains multiple SLF4J bindings. SLF4J: Found binding in [jar:file:/usr/share/java/kafka/slf4j-log4j12-1.7.26.jar!/org/slf4j/impl/StaticLoggerBinder.class] SLF4J: Found binding in [jar:file:/usr/share/java/kafka/kafka11aaf-jar-with-dependencies.jar!/org/slf4j/impl/StaticLoggerBinder.class] SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation. SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory] [2021-06-17 23:52:06,249] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$) [2021-06-17 23:52:06,530] INFO KafkaConfig values: advertised.host.name = null advertised.listeners = EXTERNAL_SASL_PLAINTEXT://10.253.0.28:30491,INTERNAL_SASL_PLAINTEXT://:9092 advertised.port = null alter.config.policy.class.name = null alter.log.dirs.replication.quota.window.num = 11 alter.log.dirs.replication.quota.window.size.seconds = 1 authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer auto.create.topics.enable = true auto.leader.rebalance.enable = true background.threads = 10 broker.id = 1 broker.id.generation.enable = true broker.rack = null client.quota.callback.class = null compression.type = producer connection.failed.authentication.delay.ms = 100 connections.max.idle.ms = 600000 connections.max.reauth.ms = 0 control.plane.listener.name = null controlled.shutdown.enable = true controlled.shutdown.max.retries = 3 controlled.shutdown.retry.backoff.ms = 5000 controller.socket.timeout.ms = 30000 create.topic.policy.class.name = null default.replication.factor = 3 delegation.token.expiry.check.interval.ms = 3600000 delegation.token.expiry.time.ms = 86400000 delegation.token.master.key = null delegation.token.max.lifetime.ms = 604800000 delete.records.purgatory.purge.interval.requests = 1 delete.topic.enable = true fetch.purgatory.purge.interval.requests = 1000 group.initial.rebalance.delay.ms = 3000 group.max.session.timeout.ms = 1800000 group.max.size = 2147483647 group.min.session.timeout.ms = 6000 host.name = inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT inter.broker.protocol.version = 2.3-IV1 kafka.metrics.polling.interval.secs = 10 kafka.metrics.reporters = [] leader.imbalance.check.interval.seconds = 300 leader.imbalance.per.broker.percentage = 10 listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092 log.cleaner.backoff.ms = 15000 log.cleaner.dedupe.buffer.size = 134217728 log.cleaner.delete.retention.ms = 86400000 log.cleaner.enable = true log.cleaner.io.buffer.load.factor = 0.9 log.cleaner.io.buffer.size = 524288 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 log.cleaner.max.compaction.lag.ms = 9223372036854775807 log.cleaner.min.cleanable.ratio = 0.5 log.cleaner.min.compaction.lag.ms = 0 log.cleaner.threads = 1 log.cleanup.policy = [delete] log.dir = /tmp/kafka-logs log.dirs = /var/lib/kafka/data log.flush.interval.messages = 9223372036854775807 log.flush.interval.ms = null log.flush.offset.checkpoint.interval.ms = 60000 log.flush.scheduler.interval.ms = 9223372036854775807 log.flush.start.offset.checkpoint.interval.ms = 60000 log.index.interval.bytes = 4096 log.index.size.max.bytes = 10485760 log.message.downconversion.enable = true log.message.format.version = 2.3-IV1 log.message.timestamp.difference.max.ms = 9223372036854775807 log.message.timestamp.type = CreateTime log.preallocate = false log.retention.bytes = -1 log.retention.check.interval.ms = 300000 log.retention.hours = 168 log.retention.minutes = null log.retention.ms = null log.roll.hours = 168 log.roll.jitter.hours = 0 log.roll.jitter.ms = null log.roll.ms = null log.segment.bytes = 1073741824 log.segment.delete.delay.ms = 60000 max.connections = 2147483647 max.connections.per.ip = 2147483647 max.connections.per.ip.overrides = max.incremental.fetch.session.cache.slots = 1000 message.max.bytes = 1000012 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 min.insync.replicas = 1 num.io.threads = 8 num.network.threads = 3 num.partitions = 3 num.recovery.threads.per.data.dir = 5 num.replica.alter.log.dirs.threads = null num.replica.fetchers = 1 offset.metadata.max.bytes = 4096 offsets.commit.required.acks = -1 offsets.commit.timeout.ms = 5000 offsets.load.buffer.size = 5242880 offsets.retention.check.interval.ms = 600000 offsets.retention.minutes = 10080 offsets.topic.compression.codec = 0 offsets.topic.num.partitions = 50 offsets.topic.replication.factor = 3 offsets.topic.segment.bytes = 104857600 password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding password.encoder.iterations = 4096 password.encoder.key.length = 128 password.encoder.keyfactory.algorithm = null password.encoder.old.secret = null password.encoder.secret = null port = 9092 principal.builder.class = null producer.purgatory.purge.interval.requests = 1000 queued.max.request.bytes = -1 queued.max.requests = 500 quota.consumer.default = 9223372036854775807 quota.producer.default = 9223372036854775807 quota.window.num = 11 quota.window.size.seconds = 1 replica.fetch.backoff.ms = 1000 replica.fetch.max.bytes = 1048576 replica.fetch.min.bytes = 1 replica.fetch.response.max.bytes = 10485760 replica.fetch.wait.max.ms = 500 replica.high.watermark.checkpoint.interval.ms = 5000 replica.lag.time.max.ms = 10000 replica.socket.receive.buffer.bytes = 65536 replica.socket.timeout.ms = 30000 replication.quota.window.num = 11 replication.quota.window.size.seconds = 1 request.timeout.ms = 30000 reserved.broker.max.id = 1000 sasl.client.callback.handler.class = null sasl.enabled.mechanisms = [PLAIN] sasl.jaas.config = null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.principal.to.local.rules = [DEFAULT] sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.login.callback.handler.class = null sasl.login.class = null sasl.login.refresh.buffer.seconds = 300 sasl.login.refresh.min.period.seconds = 60 sasl.login.refresh.window.factor = 0.8 sasl.login.refresh.window.jitter = 0.05 sasl.mechanism.inter.broker.protocol = PLAIN sasl.server.callback.handler.class = null security.inter.broker.protocol = PLAINTEXT socket.receive.buffer.bytes = 102400 socket.request.max.bytes = 104857600 socket.send.buffer.bytes = 102400 ssl.cipher.suites = [] ssl.client.auth = none ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] ssl.endpoint.identification.algorithm = https ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.principal.mapping.rules = [DEFAULT] ssl.protocol = TLS ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000 transaction.max.timeout.ms = 900000 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 transaction.state.log.load.buffer.size = 5242880 transaction.state.log.min.isr = 1 transaction.state.log.num.partitions = 50 transaction.state.log.replication.factor = 1 transaction.state.log.segment.bytes = 104857600 transactional.id.expiration.ms = 604800000 unclean.leader.election.enable = false zookeeper.connect = onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 zookeeper.connection.timeout.ms = 6000 zookeeper.max.in.flight.requests = 10 zookeeper.session.timeout.ms = 6000 zookeeper.set.acl = true zookeeper.sync.time.ms = 2000 (kafka.server.KafkaConfig) [2021-06-17 23:52:06,611] WARN The package io.confluent.support.metrics.collectors.FullCollector for collecting the full set of support metrics could not be loaded, so we are reverting to anonymous, basic metric collection. If you are a Confluent customer, please refer to the Confluent Platform documentation, section Proactive Support, on how to activate full metrics collection. (io.confluent.support.metrics.KafkaSupportConfig) [2021-06-17 23:52:06,612] WARN The support metrics collection feature ("Metrics") of Proactive Support is disabled. (io.confluent.support.metrics.SupportedServerStartable) [2021-06-17 23:52:06,614] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler) [2021-06-17 23:52:06,615] INFO starting (kafka.server.KafkaServer) [2021-06-17 23:52:06,616] INFO Connecting to zookeeper on onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 (kafka.server.KafkaServer) [2021-06-17 23:52:06,642] INFO [ZooKeeperClient Kafka server] Initializing a new session to onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181. (kafka.zookeeper.ZooKeeperClient) [2021-06-17 23:52:06,648] INFO Client environment:zookeeper.version=3.4.14-4c25d480e66aadd371de8bd2fd8da255ac140bcf, built on 03/06/2019 16:18 GMT (org.apache.zookeeper.ZooKeeper) [2021-06-17 23:52:06,648] INFO Client environment:host.name=onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local (org.apache.zookeeper.ZooKeeper) [2021-06-17 23:52:06,648] INFO Client environment:java.version=1.8.0_212 (org.apache.zookeeper.ZooKeeper) [2021-06-17 23:52:06,648] INFO Client environment:java.vendor=Azul Systems, Inc. (org.apache.zookeeper.ZooKeeper) [2021-06-17 23:52:06,648] INFO Client environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre (org.apache.zookeeper.ZooKeeper) [2021-06-17 23:52:06,649] INFO Client environment:java.class.path=/usr/bin/../share/java/kafka/activation-1.1.1.jar:/usr/bin/../share/java/kafka/aopalliance-repackaged-2.5.0.jar:/usr/bin/../share/java/kafka/argparse4j-0.7.0.jar:/usr/bin/../share/java/kafka/audience-annotations-0.5.0.jar:/usr/bin/../share/java/kafka/avro-1.8.1.jar:/usr/bin/../share/java/kafka/commons-codec-1.11.jar:/usr/bin/../share/java/kafka/commons-compress-1.8.1.jar:/usr/bin/../share/java/kafka/commons-lang3-3.8.1.jar:/usr/bin/../share/java/kafka/commons-logging-1.2.jar:/usr/bin/../share/java/kafka/connect-api-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-basic-auth-extension-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-file-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-json-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-runtime-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/connect-transforms-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/guava-20.0.jar:/usr/bin/../share/java/kafka/hk2-api-2.5.0.jar:/usr/bin/../share/java/kafka/hk2-locator-2.5.0.jar:/usr/bin/../share/java/kafka/hk2-utils-2.5.0.jar:/usr/bin/../share/java/kafka/httpclient-4.5.7.jar:/usr/bin/../share/java/kafka/httpcore-4.4.11.jar:/usr/bin/../share/java/kafka/httpmime-4.5.7.jar:/usr/bin/../share/java/kafka/jackson-annotations-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-core-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-core-asl-1.9.13.jar:/usr/bin/../share/java/kafka/jackson-databind-2.9.9.3.jar:/usr/bin/../share/java/kafka/jackson-dataformat-csv-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-datatype-jdk8-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-base-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-json-provider-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-mapper-asl-1.9.13.jar:/usr/bin/../share/java/kafka/jackson-module-jaxb-annotations-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-module-paranamer-2.9.9.jar:/usr/bin/../share/java/kafka/jackson-module-scala_2.11-2.9.9.jar:/usr/bin/../share/java/kafka/jakarta.annotation-api-1.3.4.jar:/usr/bin/../share/java/kafka/jakarta.inject-2.5.0.jar:/usr/bin/../share/java/kafka/jakarta.ws.rs-api-2.1.5.jar:/usr/bin/../share/java/kafka/javassist-3.22.0-CR2.jar:/usr/bin/../share/java/kafka/javax.servlet-api-3.1.0.jar:/usr/bin/../share/java/kafka/javax.ws.rs-api-2.1.1.jar:/usr/bin/../share/java/kafka/jaxb-api-2.3.0.jar:/usr/bin/../share/java/kafka/jersey-client-2.28.jar:/usr/bin/../share/java/kafka/jersey-common-2.28.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-2.28.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-core-2.28.jar:/usr/bin/../share/java/kafka/jersey-hk2-2.28.jar:/usr/bin/../share/java/kafka/jersey-media-jaxb-2.28.jar:/usr/bin/../share/java/kafka/jersey-server-2.28.jar:/usr/bin/../share/java/kafka/jetty-client-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-continuation-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-http-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-io-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-security-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-server-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-servlet-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-servlets-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jetty-util-9.4.18.v20190429.jar:/usr/bin/../share/java/kafka/jopt-simple-5.0.4.jar:/usr/bin/../share/java/kafka/jsr305-3.0.2.jar:/usr/bin/../share/java/kafka/kafka-clients-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-log4j-appender-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-examples-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-scala_2.11-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-streams-test-utils-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka-tools-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/kafka.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-javadoc.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-scaladoc.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-sources.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-test-sources.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs-test.jar:/usr/bin/../share/java/kafka/kafka_2.11-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/log4j-1.2.17.jar:/usr/bin/../share/java/kafka/lz4-java-1.6.0.jar:/usr/bin/../share/java/kafka/maven-artifact-3.6.1.jar:/usr/bin/../share/java/kafka/metrics-core-2.2.0.jar:/usr/bin/../share/java/kafka/osgi-resource-locator-1.0.1.jar:/usr/bin/../share/java/kafka/paranamer-2.7.jar:/usr/bin/../share/java/kafka/paranamer-2.8.jar:/usr/bin/../share/java/kafka/plexus-utils-3.2.0.jar:/usr/bin/../share/java/kafka/reflections-0.9.11.jar:/usr/bin/../share/java/kafka/rocksdbjni-5.18.3.jar:/usr/bin/../share/java/kafka/scala-library-2.11.12.jar:/usr/bin/../share/java/kafka/scala-logging_2.11-3.9.0.jar:/usr/bin/../share/java/kafka/scala-reflect-2.11.12.jar:/usr/bin/../share/java/kafka/slf4j-api-1.7.26.jar:/usr/bin/../share/java/kafka/slf4j-log4j12-1.7.26.jar:/usr/bin/../share/java/kafka/snappy-java-1.1.7.3.jar:/usr/bin/../share/java/kafka/spotbugs-annotations-3.1.9.jar:/usr/bin/../share/java/kafka/support-metrics-client-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/support-metrics-common-5.3.1-ccs.jar:/usr/bin/../share/java/kafka/validation-api-2.0.1.Final.jar:/usr/bin/../share/java/kafka/xz-1.5.jar:/usr/bin/../share/java/kafka/zkclient-0.11.jar:/usr/bin/../share/java/kafka/zookeeper-3.4.14.jar:/usr/bin/../share/java/kafka/zstd-jni-1.4.0-1.jar:/usr/bin/../share/java/kafka/kafka11aaf-jar-with-dependencies.jar:/usr/bin/../support-metrics-client/build/dependant-libs-2.12/*:/usr/bin/../support-metrics-client/build/libs/*:/usr/share/java/support-metrics-client/* (org.apache.zookeeper.ZooKeeper) [2021-06-17 23:52:06,649] INFO Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper) [2021-06-17 23:52:06,649] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper) [2021-06-17 23:52:06,649] INFO Client environment:java.compiler= (org.apache.zookeeper.ZooKeeper) [2021-06-17 23:52:06,649] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper) [2021-06-17 23:52:06,649] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper) [2021-06-17 23:52:06,649] INFO Client environment:os.version=4.19.0-13-cloud-amd64 (org.apache.zookeeper.ZooKeeper) [2021-06-17 23:52:06,649] INFO Client environment:user.name=mrkafka (org.apache.zookeeper.ZooKeeper) [2021-06-17 23:52:06,649] INFO Client environment:user.home=/home/mrkafka (org.apache.zookeeper.ZooKeeper) [2021-06-17 23:52:06,649] INFO Client environment:user.dir=/ (org.apache.zookeeper.ZooKeeper) [2021-06-17 23:52:06,651] INFO Initiating client connection, connectString=onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 sessionTimeout=6000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@28701274 (org.apache.zookeeper.ZooKeeper) [2021-06-17 23:52:06,665] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) [2021-06-17 23:52:06,674] INFO Client successfully logged in. (org.apache.zookeeper.Login) [2021-06-17 23:52:06,676] INFO Client will use DIGEST-MD5 as SASL mechanism. (org.apache.zookeeper.client.ZooKeeperSaslClient) [2021-06-17 23:52:06,698] INFO Opening socket connection to server onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.69.176:2181. Will attempt to SASL-authenticate using Login Context section 'Client' (org.apache.zookeeper.ClientCnxn) [2021-06-17 23:52:06,704] INFO Socket connection established to onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.69.176:2181, initiating session (org.apache.zookeeper.ClientCnxn) [2021-06-17 23:52:06,723] INFO Session establishment complete on server onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local/10.233.69.176:2181, sessionid = 0x100007a552b0000, negotiated timeout = 6000 (org.apache.zookeeper.ClientCnxn) [2021-06-17 23:52:06,739] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient) [2021-06-17 23:52:07,065] INFO Cluster ID = P3Yzo-7gQzaXtwUehK5FWw (kafka.server.KafkaServer) [2021-06-17 23:52:07,069] WARN No meta.properties file under dir /var/lib/kafka/data/meta.properties (kafka.server.BrokerMetadataCheckpoint) [2021-06-17 23:52:07,124] INFO KafkaConfig values: advertised.host.name = null advertised.listeners = EXTERNAL_SASL_PLAINTEXT://10.253.0.28:30491,INTERNAL_SASL_PLAINTEXT://:9092 advertised.port = null alter.config.policy.class.name = null alter.log.dirs.replication.quota.window.num = 11 alter.log.dirs.replication.quota.window.size.seconds = 1 authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer auto.create.topics.enable = true auto.leader.rebalance.enable = true background.threads = 10 broker.id = 1 broker.id.generation.enable = true broker.rack = null client.quota.callback.class = null compression.type = producer connection.failed.authentication.delay.ms = 100 connections.max.idle.ms = 600000 connections.max.reauth.ms = 0 control.plane.listener.name = null controlled.shutdown.enable = true controlled.shutdown.max.retries = 3 controlled.shutdown.retry.backoff.ms = 5000 controller.socket.timeout.ms = 30000 create.topic.policy.class.name = null default.replication.factor = 3 delegation.token.expiry.check.interval.ms = 3600000 delegation.token.expiry.time.ms = 86400000 delegation.token.master.key = null delegation.token.max.lifetime.ms = 604800000 delete.records.purgatory.purge.interval.requests = 1 delete.topic.enable = true fetch.purgatory.purge.interval.requests = 1000 group.initial.rebalance.delay.ms = 3000 group.max.session.timeout.ms = 1800000 group.max.size = 2147483647 group.min.session.timeout.ms = 6000 host.name = inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT inter.broker.protocol.version = 2.3-IV1 kafka.metrics.polling.interval.secs = 10 kafka.metrics.reporters = [] leader.imbalance.check.interval.seconds = 300 leader.imbalance.per.broker.percentage = 10 listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092 log.cleaner.backoff.ms = 15000 log.cleaner.dedupe.buffer.size = 134217728 log.cleaner.delete.retention.ms = 86400000 log.cleaner.enable = true log.cleaner.io.buffer.load.factor = 0.9 log.cleaner.io.buffer.size = 524288 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 log.cleaner.max.compaction.lag.ms = 9223372036854775807 log.cleaner.min.cleanable.ratio = 0.5 log.cleaner.min.compaction.lag.ms = 0 log.cleaner.threads = 1 log.cleanup.policy = [delete] log.dir = /tmp/kafka-logs log.dirs = /var/lib/kafka/data log.flush.interval.messages = 9223372036854775807 log.flush.interval.ms = null log.flush.offset.checkpoint.interval.ms = 60000 log.flush.scheduler.interval.ms = 9223372036854775807 log.flush.start.offset.checkpoint.interval.ms = 60000 log.index.interval.bytes = 4096 log.index.size.max.bytes = 10485760 log.message.downconversion.enable = true log.message.format.version = 2.3-IV1 log.message.timestamp.difference.max.ms = 9223372036854775807 log.message.timestamp.type = CreateTime log.preallocate = false log.retention.bytes = -1 log.retention.check.interval.ms = 300000 log.retention.hours = 168 log.retention.minutes = null log.retention.ms = null log.roll.hours = 168 log.roll.jitter.hours = 0 log.roll.jitter.ms = null log.roll.ms = null log.segment.bytes = 1073741824 log.segment.delete.delay.ms = 60000 max.connections = 2147483647 max.connections.per.ip = 2147483647 max.connections.per.ip.overrides = max.incremental.fetch.session.cache.slots = 1000 message.max.bytes = 1000012 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 min.insync.replicas = 1 num.io.threads = 8 num.network.threads = 3 num.partitions = 3 num.recovery.threads.per.data.dir = 5 num.replica.alter.log.dirs.threads = null num.replica.fetchers = 1 offset.metadata.max.bytes = 4096 offsets.commit.required.acks = -1 offsets.commit.timeout.ms = 5000 offsets.load.buffer.size = 5242880 offsets.retention.check.interval.ms = 600000 offsets.retention.minutes = 10080 offsets.topic.compression.codec = 0 offsets.topic.num.partitions = 50 offsets.topic.replication.factor = 3 offsets.topic.segment.bytes = 104857600 password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding password.encoder.iterations = 4096 password.encoder.key.length = 128 password.encoder.keyfactory.algorithm = null password.encoder.old.secret = null password.encoder.secret = null port = 9092 principal.builder.class = null producer.purgatory.purge.interval.requests = 1000 queued.max.request.bytes = -1 queued.max.requests = 500 quota.consumer.default = 9223372036854775807 quota.producer.default = 9223372036854775807 quota.window.num = 11 quota.window.size.seconds = 1 replica.fetch.backoff.ms = 1000 replica.fetch.max.bytes = 1048576 replica.fetch.min.bytes = 1 replica.fetch.response.max.bytes = 10485760 replica.fetch.wait.max.ms = 500 replica.high.watermark.checkpoint.interval.ms = 5000 replica.lag.time.max.ms = 10000 replica.socket.receive.buffer.bytes = 65536 replica.socket.timeout.ms = 30000 replication.quota.window.num = 11 replication.quota.window.size.seconds = 1 request.timeout.ms = 30000 reserved.broker.max.id = 1000 sasl.client.callback.handler.class = null sasl.enabled.mechanisms = [PLAIN] sasl.jaas.config = null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.principal.to.local.rules = [DEFAULT] sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.login.callback.handler.class = null sasl.login.class = null sasl.login.refresh.buffer.seconds = 300 sasl.login.refresh.min.period.seconds = 60 sasl.login.refresh.window.factor = 0.8 sasl.login.refresh.window.jitter = 0.05 sasl.mechanism.inter.broker.protocol = PLAIN sasl.server.callback.handler.class = null security.inter.broker.protocol = PLAINTEXT socket.receive.buffer.bytes = 102400 socket.request.max.bytes = 104857600 socket.send.buffer.bytes = 102400 ssl.cipher.suites = [] ssl.client.auth = none ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] ssl.endpoint.identification.algorithm = https ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.principal.mapping.rules = [DEFAULT] ssl.protocol = TLS ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000 transaction.max.timeout.ms = 900000 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 transaction.state.log.load.buffer.size = 5242880 transaction.state.log.min.isr = 1 transaction.state.log.num.partitions = 50 transaction.state.log.replication.factor = 1 transaction.state.log.segment.bytes = 104857600 transactional.id.expiration.ms = 604800000 unclean.leader.election.enable = false zookeeper.connect = onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 zookeeper.connection.timeout.ms = 6000 zookeeper.max.in.flight.requests = 10 zookeeper.session.timeout.ms = 6000 zookeeper.set.acl = true zookeeper.sync.time.ms = 2000 (kafka.server.KafkaConfig) [2021-06-17 23:52:07,134] INFO KafkaConfig values: advertised.host.name = null advertised.listeners = EXTERNAL_SASL_PLAINTEXT://10.253.0.28:30491,INTERNAL_SASL_PLAINTEXT://:9092 advertised.port = null alter.config.policy.class.name = null alter.log.dirs.replication.quota.window.num = 11 alter.log.dirs.replication.quota.window.size.seconds = 1 authorizer.class.name = org.onap.dmaap.kafkaAuthorize.KafkaCustomAuthorizer auto.create.topics.enable = true auto.leader.rebalance.enable = true background.threads = 10 broker.id = 1 broker.id.generation.enable = true broker.rack = null client.quota.callback.class = null compression.type = producer connection.failed.authentication.delay.ms = 100 connections.max.idle.ms = 600000 connections.max.reauth.ms = 0 control.plane.listener.name = null controlled.shutdown.enable = true controlled.shutdown.max.retries = 3 controlled.shutdown.retry.backoff.ms = 5000 controller.socket.timeout.ms = 30000 create.topic.policy.class.name = null default.replication.factor = 3 delegation.token.expiry.check.interval.ms = 3600000 delegation.token.expiry.time.ms = 86400000 delegation.token.master.key = null delegation.token.max.lifetime.ms = 604800000 delete.records.purgatory.purge.interval.requests = 1 delete.topic.enable = true fetch.purgatory.purge.interval.requests = 1000 group.initial.rebalance.delay.ms = 3000 group.max.session.timeout.ms = 1800000 group.max.size = 2147483647 group.min.session.timeout.ms = 6000 host.name = inter.broker.listener.name = INTERNAL_SASL_PLAINTEXT inter.broker.protocol.version = 2.3-IV1 kafka.metrics.polling.interval.secs = 10 kafka.metrics.reporters = [] leader.imbalance.check.interval.seconds = 300 leader.imbalance.per.broker.percentage = 10 listener.security.protocol.map = INTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT,EXTERNAL_SASL_PLAINTEXT:SASL_PLAINTEXT listeners = EXTERNAL_SASL_PLAINTEXT://0.0.0.0:9091,INTERNAL_SASL_PLAINTEXT://0.0.0.0:9092 log.cleaner.backoff.ms = 15000 log.cleaner.dedupe.buffer.size = 134217728 log.cleaner.delete.retention.ms = 86400000 log.cleaner.enable = true log.cleaner.io.buffer.load.factor = 0.9 log.cleaner.io.buffer.size = 524288 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 log.cleaner.max.compaction.lag.ms = 9223372036854775807 log.cleaner.min.cleanable.ratio = 0.5 log.cleaner.min.compaction.lag.ms = 0 log.cleaner.threads = 1 log.cleanup.policy = [delete] log.dir = /tmp/kafka-logs log.dirs = /var/lib/kafka/data log.flush.interval.messages = 9223372036854775807 log.flush.interval.ms = null log.flush.offset.checkpoint.interval.ms = 60000 log.flush.scheduler.interval.ms = 9223372036854775807 log.flush.start.offset.checkpoint.interval.ms = 60000 log.index.interval.bytes = 4096 log.index.size.max.bytes = 10485760 log.message.downconversion.enable = true log.message.format.version = 2.3-IV1 log.message.timestamp.difference.max.ms = 9223372036854775807 log.message.timestamp.type = CreateTime log.preallocate = false log.retention.bytes = -1 log.retention.check.interval.ms = 300000 log.retention.hours = 168 log.retention.minutes = null log.retention.ms = null log.roll.hours = 168 log.roll.jitter.hours = 0 log.roll.jitter.ms = null log.roll.ms = null log.segment.bytes = 1073741824 log.segment.delete.delay.ms = 60000 max.connections = 2147483647 max.connections.per.ip = 2147483647 max.connections.per.ip.overrides = max.incremental.fetch.session.cache.slots = 1000 message.max.bytes = 1000012 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 min.insync.replicas = 1 num.io.threads = 8 num.network.threads = 3 num.partitions = 3 num.recovery.threads.per.data.dir = 5 num.replica.alter.log.dirs.threads = null num.replica.fetchers = 1 offset.metadata.max.bytes = 4096 offsets.commit.required.acks = -1 offsets.commit.timeout.ms = 5000 offsets.load.buffer.size = 5242880 offsets.retention.check.interval.ms = 600000 offsets.retention.minutes = 10080 offsets.topic.compression.codec = 0 offsets.topic.num.partitions = 50 offsets.topic.replication.factor = 3 offsets.topic.segment.bytes = 104857600 password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding password.encoder.iterations = 4096 password.encoder.key.length = 128 password.encoder.keyfactory.algorithm = null password.encoder.old.secret = null password.encoder.secret = null port = 9092 principal.builder.class = null producer.purgatory.purge.interval.requests = 1000 queued.max.request.bytes = -1 queued.max.requests = 500 quota.consumer.default = 9223372036854775807 quota.producer.default = 9223372036854775807 quota.window.num = 11 quota.window.size.seconds = 1 replica.fetch.backoff.ms = 1000 replica.fetch.max.bytes = 1048576 replica.fetch.min.bytes = 1 replica.fetch.response.max.bytes = 10485760 replica.fetch.wait.max.ms = 500 replica.high.watermark.checkpoint.interval.ms = 5000 replica.lag.time.max.ms = 10000 replica.socket.receive.buffer.bytes = 65536 replica.socket.timeout.ms = 30000 replication.quota.window.num = 11 replication.quota.window.size.seconds = 1 request.timeout.ms = 30000 reserved.broker.max.id = 1000 sasl.client.callback.handler.class = null sasl.enabled.mechanisms = [PLAIN] sasl.jaas.config = null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.principal.to.local.rules = [DEFAULT] sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.login.callback.handler.class = null sasl.login.class = null sasl.login.refresh.buffer.seconds = 300 sasl.login.refresh.min.period.seconds = 60 sasl.login.refresh.window.factor = 0.8 sasl.login.refresh.window.jitter = 0.05 sasl.mechanism.inter.broker.protocol = PLAIN sasl.server.callback.handler.class = null security.inter.broker.protocol = PLAINTEXT socket.receive.buffer.bytes = 102400 socket.request.max.bytes = 104857600 socket.send.buffer.bytes = 102400 ssl.cipher.suites = [] ssl.client.auth = none ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] ssl.endpoint.identification.algorithm = https ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.principal.mapping.rules = [DEFAULT] ssl.protocol = TLS ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000 transaction.max.timeout.ms = 900000 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 transaction.state.log.load.buffer.size = 5242880 transaction.state.log.min.isr = 1 transaction.state.log.num.partitions = 50 transaction.state.log.replication.factor = 1 transaction.state.log.segment.bytes = 104857600 transactional.id.expiration.ms = 604800000 unclean.leader.election.enable = false zookeeper.connect = onap-message-router-zookeeper-0.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-1.message-router-zookeeper.onap.svc.cluster.local:2181,onap-message-router-zookeeper-2.message-router-zookeeper.onap.svc.cluster.local:2181 zookeeper.connection.timeout.ms = 6000 zookeeper.max.in.flight.requests = 10 zookeeper.session.timeout.ms = 6000 zookeeper.set.acl = true zookeeper.sync.time.ms = 2000 (kafka.server.KafkaConfig) [2021-06-17 23:52:07,161] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2021-06-17 23:52:07,161] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2021-06-17 23:52:07,163] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) [2021-06-17 23:52:07,198] INFO Loading logs. (kafka.log.LogManager) [2021-06-17 23:52:07,209] INFO Logs loading complete in 11 ms. (kafka.log.LogManager) [2021-06-17 23:52:07,226] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager) [2021-06-17 23:52:07,229] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager) [2021-06-17 23:52:07,231] INFO Starting the log cleaner (kafka.log.LogCleaner) [2021-06-17 23:52:07,294] INFO [kafka-log-cleaner-thread-0]: Starting (kafka.log.LogCleaner) [2021-06-17 23:52:07,642] INFO Awaiting socket connections on 0.0.0.0:9091. (kafka.network.Acceptor) [2021-06-17 23:52:07,661] INFO Successfully logged in. (org.apache.kafka.common.security.authenticator.AbstractLogin) [2021-06-17 23:52:07,685] INFO [SocketServer brokerId=1] Created data-plane acceptor and processors for endpoint : EndPoint(0.0.0.0,9091,ListenerName(EXTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT) (kafka.network.SocketServer) [2021-06-17 23:52:07,686] INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.Acceptor) [2021-06-17 23:52:07,713] INFO [SocketServer brokerId=1] Created data-plane acceptor and processors for endpoint : EndPoint(0.0.0.0,9092,ListenerName(INTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT) (kafka.network.SocketServer) [2021-06-17 23:52:07,714] INFO [SocketServer brokerId=1] Started 2 acceptor threads for data-plane (kafka.network.SocketServer) [2021-06-17 23:52:07,735] INFO [ExpirationReaper-1-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2021-06-17 23:52:07,736] INFO [ExpirationReaper-1-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2021-06-17 23:52:07,737] INFO [ExpirationReaper-1-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2021-06-17 23:52:07,738] INFO [ExpirationReaper-1-ElectPreferredLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2021-06-17 23:52:07,750] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler) [2021-06-17 23:52:07,816] INFO Creating /brokers/ids/1 (is it secure? true) (kafka.zk.KafkaZkClient) [2021-06-17 23:52:07,836] INFO Stat of the created znode at /brokers/ids/1 is: 12884901932,12884901932,1623973927824,1623973927824,1,0,0,72058119452819456,365,0,12884901932 (kafka.zk.KafkaZkClient) [2021-06-17 23:52:07,837] INFO Registered broker 1 at path /brokers/ids/1 with addresses: ArrayBuffer(EndPoint(10.253.0.28,30491,ListenerName(EXTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT), EndPoint(onap-message-router-kafka-1.message-router-kafka.onap.svc.cluster.local,9092,ListenerName(INTERNAL_SASL_PLAINTEXT),SASL_PLAINTEXT)), czxid (broker epoch): 12884901932 (kafka.zk.KafkaZkClient) [2021-06-17 23:52:07,838] WARN No meta.properties file under dir /var/lib/kafka/data/meta.properties (kafka.server.BrokerMetadataCheckpoint) [2021-06-17 23:52:07,884] INFO [ControllerEventThread controllerId=1] Starting (kafka.controller.ControllerEventManager$ControllerEventThread) [2021-06-17 23:52:07,887] INFO [ExpirationReaper-1-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2021-06-17 23:52:07,891] INFO [ExpirationReaper-1-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2021-06-17 23:52:07,890] INFO [ExpirationReaper-1-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2021-06-17 23:52:07,906] DEBUG [Controller id=1] Broker 2 has been elected as the controller, so stopping the election process. (kafka.controller.KafkaController) [2021-06-17 23:52:07,911] INFO [GroupCoordinator 1]: Starting up. (kafka.coordinator.group.GroupCoordinator) [2021-06-17 23:52:07,929] INFO [GroupCoordinator 1]: Startup complete. (kafka.coordinator.group.GroupCoordinator) [2021-06-17 23:52:07,930] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-17 23:52:07,947] INFO [ProducerId Manager 1]: Acquired new producerId block (brokerId:1,blockStartProducerId:10000,blockEndProducerId:10999) by writing to Zk with path version 11 (kafka.coordinator.transaction.ProducerIdManager) [2021-06-17 23:52:07,968] INFO [TransactionCoordinator id=1] Starting up. (kafka.coordinator.transaction.TransactionCoordinator) [2021-06-17 23:52:07,969] INFO [Transaction Marker Channel Manager 1]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager) [2021-06-17 23:52:07,969] INFO [TransactionCoordinator id=1] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator) [2021-06-17 23:52:08,001] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) [2021-06-17 23:52:08,021] INFO [SocketServer brokerId=1] Started data-plane processors for 2 acceptors (kafka.network.SocketServer) [2021-06-17 23:52:08,024] INFO Kafka version: 5.3.1-ccs (org.apache.kafka.common.utils.AppInfoParser) [2021-06-17 23:52:08,024] INFO Kafka commitId: 03799faf9878a999 (org.apache.kafka.common.utils.AppInfoParser) [2021-06-17 23:52:08,024] INFO Kafka startTimeMs: 1623973928022 (org.apache.kafka.common.utils.AppInfoParser) [2021-06-17 23:52:08,025] INFO [KafkaServer id=1] started (kafka.server.KafkaServer) 2021-06-17T23:53:49.469+0000 INIT [cadi] Loading CADI Properties from /opt/app/osaaf/local/org.onap.dmaap.mr.location.props 2021-06-17T23:53:49.469+0000 INIT [cadi] Loading CADI Properties from /opt/app/osaaf/local/org.onap.dmaap.mr.cred.props 2021-06-17T23:53:49.472+0000 INIT [cadi] cadi_keyfile points to /opt/app/osaaf/local/org.onap.dmaap.mr.keyfile 2021-06-17T23:53:50.212+0000 INIT [cadi] cadi_protocols is set to TLSv1.1,TLSv1.2 2021-06-17T23:53:50.373+0000 INIT [cadi] AAFLocator for https://aaf-locate.onap:8095/locate/%CNS.%AAF_NS.service:2.1 could not be created. java.net.URISyntaxException: Malformed escape pair at index 36: https://aaf-locate.onap:8095/locate/%CNS.%AAF_NS.service:2.1 2021-06-17T23:53:50.373+0000 ERROR [cadi] Null Locator passed [Ljava.lang.Object;@53e064c2 org.onap.aaf.cadi.LocatorException: Null Locator passed at org.onap.aaf.cadi.http.HMangr.(HMangr.java:53) at org.onap.aaf.cadi.aaf.v2_0.AAFConHttp.(AAFConHttp.java:54) at org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider.setup(Cadi3AAFProvider.java:141) at org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider.(Cadi3AAFProvider.java:111) at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) [2021-06-17 23:53:50,375] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-17 23:53:50,375] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:423) at java.lang.Class.newInstance(Class.java:442) at java.util.ServiceLoader$LazyIterator.nextService(ServiceLoader.java:380) at java.util.ServiceLoader$LazyIterator.next(ServiceLoader.java:404) at java.util.ServiceLoader$1.next(ServiceLoader.java:480) at org.onap.dmaap.commonauth.kafka.base.authorization.AuthorizationProviderFactory.(AuthorizationProviderFactory.java:34) at org.onap.dmaap.commonauth.kafka.base.authorization.AuthorizationProviderFactory.(AuthorizationProviderFactory.java:29) at org.onap.dmaap.kafkaAuthorize.PlainSaslServer1.evaluateResponse(PlainSaslServer1.java:106) at org.apache.kafka.common.security.authenticator.SaslServerAuthenticator.handleSaslToken(SaslServerAuthenticator.java:451) at org.apache.kafka.common.security.authenticator.SaslServerAuthenticator.authenticate(SaslServerAuthenticator.java:291) at org.apache.kafka.common.network.KafkaChannel.prepare(KafkaChannel.java:173) at org.apache.kafka.common.network.Selector.pollSelectionKeys(Selector.java:547) at org.apache.kafka.common.network.Selector.poll(Selector.java:483) at kafka.network.Processor.poll(SocketServer.scala:863) at kafka.network.Processor.run(SocketServer.scala:762) at java.lang.Thread.run(Thread.java:748) [2021-06-18 00:02:07,928] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:02:59,702] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 2 from controller 2 epoch 8 for partition POLICY-PDP-PAP-1 (state.change.logger) [2021-06-18 00:02:59,702] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 2 from controller 2 epoch 8 for partition POLICY-PDP-PAP-2 (state.change.logger) [2021-06-18 00:02:59,702] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 2 from controller 2 epoch 8 for partition POLICY-PDP-PAP-0 (state.change.logger) [2021-06-18 00:02:59,712] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 2 from controller 2 epoch 8 starting the become-leader transition for partition POLICY-PDP-PAP-0 (state.change.logger) [2021-06-18 00:02:59,714] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(POLICY-PDP-PAP-0) (kafka.server.ReplicaFetcherManager) [2021-06-18 00:02:59,789] INFO [Log partition=POLICY-PDP-PAP-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:02:59,799] INFO [Log partition=POLICY-PDP-PAP-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 66 ms (kafka.log.Log) [2021-06-18 00:02:59,802] INFO Created log for partition POLICY-PDP-PAP-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:02:59,804] INFO [Partition POLICY-PDP-PAP-0 broker=1] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-0 (kafka.cluster.Partition) [2021-06-18 00:02:59,806] INFO Replica loaded for partition POLICY-PDP-PAP-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:02:59,807] INFO Replica loaded for partition POLICY-PDP-PAP-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:02:59,807] INFO Replica loaded for partition POLICY-PDP-PAP-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:02:59,809] INFO [Partition POLICY-PDP-PAP-0 broker=1] POLICY-PDP-PAP-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:02:59,825] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 2 for partition POLICY-PDP-PAP-0 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:02:59,826] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 2 from controller 2 epoch 8 for the become-leader transition for partition POLICY-PDP-PAP-0 (state.change.logger) [2021-06-18 00:02:59,827] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 2 from controller 2 epoch 8 starting the become-follower transition for partition POLICY-PDP-PAP-1 with leader 2 (state.change.logger) [2021-06-18 00:02:59,827] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 2 from controller 2 epoch 8 starting the become-follower transition for partition POLICY-PDP-PAP-2 with leader 0 (state.change.logger) [2021-06-18 00:02:59,829] INFO Replica loaded for partition POLICY-PDP-PAP-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:02:59,830] INFO Replica loaded for partition POLICY-PDP-PAP-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:02:59,842] INFO [Log partition=POLICY-PDP-PAP-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:02:59,845] INFO [Log partition=POLICY-PDP-PAP-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:02:59,846] INFO Created log for partition POLICY-PDP-PAP-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:02:59,847] INFO [Partition POLICY-PDP-PAP-1 broker=1] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-1 (kafka.cluster.Partition) [2021-06-18 00:02:59,847] INFO Replica loaded for partition POLICY-PDP-PAP-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:02:59,848] INFO Replica loaded for partition POLICY-PDP-PAP-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:02:59,860] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:02:59,862] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:02:59,863] INFO Created log for partition POLICY-PDP-PAP-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:02:59,864] INFO [Partition POLICY-PDP-PAP-2 broker=1] No checkpointed highwatermark is found for partition POLICY-PDP-PAP-2 (kafka.cluster.Partition) [2021-06-18 00:02:59,864] INFO Replica loaded for partition POLICY-PDP-PAP-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:02:59,864] INFO Replica loaded for partition POLICY-PDP-PAP-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:02:59,865] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(POLICY-PDP-PAP-2, POLICY-PDP-PAP-1) (kafka.server.ReplicaFetcherManager) [2021-06-18 00:02:59,866] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 2 for partition POLICY-PDP-PAP-2 with leader 0 (state.change.logger) [2021-06-18 00:02:59,866] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 2 for partition POLICY-PDP-PAP-1 with leader 2 (state.change.logger) [2021-06-18 00:02:59,869] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition POLICY-PDP-PAP-2 as part of become-follower request with correlation id 2 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:02:59,869] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition POLICY-PDP-PAP-1 as part of become-follower request with correlation id 2 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:02:59,892] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Starting (kafka.server.ReplicaFetcherThread) [2021-06-18 00:02:59,895] INFO [ReplicaFetcherManager on broker 1] Added fetcher to broker BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-PDP-PAP-1 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager) [2021-06-18 00:02:59,899] INFO [ReplicaFetcherManager on broker 1] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-PDP-PAP-2 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager) [2021-06-18 00:02:59,900] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Starting (kafka.server.ReplicaFetcherThread) [2021-06-18 00:02:59,901] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 2 for partition POLICY-PDP-PAP-2 with leader 0 (state.change.logger) [2021-06-18 00:02:59,902] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 2 for partition POLICY-PDP-PAP-1 with leader 2 (state.change.logger) [2021-06-18 00:02:59,903] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 2 from controller 2 epoch 8 for the become-follower transition for partition POLICY-PDP-PAP-1 with leader 2 (state.change.logger) [2021-06-18 00:02:59,903] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition POLICY-PDP-PAP-1 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:02:59,903] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 2 from controller 2 epoch 8 for the become-follower transition for partition POLICY-PDP-PAP-2 with leader 0 (state.change.logger) [2021-06-18 00:02:59,903] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition POLICY-PDP-PAP-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:02:59,908] INFO [Log partition=POLICY-PDP-PAP-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:02:59,907] INFO [Log partition=POLICY-PDP-PAP-1, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:02:59,929] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition POLICY-PDP-PAP-1 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 3 (state.change.logger) [2021-06-18 00:02:59,929] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition POLICY-PDP-PAP-2 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 3 (state.change.logger) [2021-06-18 00:02:59,929] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition POLICY-PDP-PAP-0 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 3 (state.change.logger) [2021-06-18 00:02:59,961] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition POLICY-PDP-PAP-2 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:00,042] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:03:00,043] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:03:00,900] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:03:00,900] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:03:05,848] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-13 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-46 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-9 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-42 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-21 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-17 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-30 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-26 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-5 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-38 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-1 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-34 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-16 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-45 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-12 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-41 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-24 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-20 (state.change.logger) [2021-06-18 00:03:05,849] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-49 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-0 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-29 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-25 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-8 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-37 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-4 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-33 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-15 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-48 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-11 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-44 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-23 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-19 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-32 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-28 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-7 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-40 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-3 (state.change.logger) [2021-06-18 00:03:05,850] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-36 (state.change.logger) [2021-06-18 00:03:05,851] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-47 (state.change.logger) [2021-06-18 00:03:05,851] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-14 (state.change.logger) [2021-06-18 00:03:05,851] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-43 (state.change.logger) [2021-06-18 00:03:05,851] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-10 (state.change.logger) [2021-06-18 00:03:05,851] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,0,2, zkVersion=0, replicas=1,0,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-22 (state.change.logger) [2021-06-18 00:03:05,851] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-18 (state.change.logger) [2021-06-18 00:03:05,851] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-31 (state.change.logger) [2021-06-18 00:03:05,851] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-27 (state.change.logger) [2021-06-18 00:03:05,851] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,2,1, zkVersion=0, replicas=0,2,1, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-39 (state.change.logger) [2021-06-18 00:03:05,851] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-6 (state.change.logger) [2021-06-18 00:03:05,851] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,1,0, zkVersion=0, replicas=2,1,0, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-35 (state.change.logger) [2021-06-18 00:03:05,851] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 4 from controller 2 epoch 8 for partition __consumer_offsets-2 (state.change.logger) [2021-06-18 00:03:05,890] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-10 (state.change.logger) [2021-06-18 00:03:05,890] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-7 (state.change.logger) [2021-06-18 00:03:05,890] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-4 (state.change.logger) [2021-06-18 00:03:05,890] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-1 (state.change.logger) [2021-06-18 00:03:05,890] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-49 (state.change.logger) [2021-06-18 00:03:05,890] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-46 (state.change.logger) [2021-06-18 00:03:05,890] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-43 (state.change.logger) [2021-06-18 00:03:05,891] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-40 (state.change.logger) [2021-06-18 00:03:05,891] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-37 (state.change.logger) [2021-06-18 00:03:05,891] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-34 (state.change.logger) [2021-06-18 00:03:05,891] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-31 (state.change.logger) [2021-06-18 00:03:05,891] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-19 (state.change.logger) [2021-06-18 00:03:05,891] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-28 (state.change.logger) [2021-06-18 00:03:05,891] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-16 (state.change.logger) [2021-06-18 00:03:05,891] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-25 (state.change.logger) [2021-06-18 00:03:05,891] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-22 (state.change.logger) [2021-06-18 00:03:05,891] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-leader transition for partition __consumer_offsets-13 (state.change.logger) [2021-06-18 00:03:05,892] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(__consumer_offsets-22, __consumer_offsets-4, __consumer_offsets-7, __consumer_offsets-46, __consumer_offsets-25, __consumer_offsets-49, __consumer_offsets-16, __consumer_offsets-28, __consumer_offsets-31, __consumer_offsets-37, __consumer_offsets-19, __consumer_offsets-13, __consumer_offsets-43, __consumer_offsets-1, __consumer_offsets-34, __consumer_offsets-10, __consumer_offsets-40) (kafka.server.ReplicaFetcherManager) [2021-06-18 00:03:05,906] INFO [Log partition=__consumer_offsets-10, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:05,909] INFO [Log partition=__consumer_offsets-10, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:05,910] INFO Created log for partition __consumer_offsets-10 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:05,912] INFO [Partition __consumer_offsets-10 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-10 (kafka.cluster.Partition) [2021-06-18 00:03:05,913] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:05,913] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:05,913] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:05,913] INFO [Partition __consumer_offsets-10 broker=1] __consumer_offsets-10 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:05,916] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-10 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:05,928] INFO [Log partition=__consumer_offsets-7, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:05,931] INFO [Log partition=__consumer_offsets-7, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log) [2021-06-18 00:03:05,932] INFO Created log for partition __consumer_offsets-7 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:05,932] INFO [Partition __consumer_offsets-7 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-7 (kafka.cluster.Partition) [2021-06-18 00:03:05,932] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:05,932] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:05,932] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:05,933] INFO [Partition __consumer_offsets-7 broker=1] __consumer_offsets-7 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:05,935] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-7 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:05,950] INFO [Log partition=__consumer_offsets-4, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:05,954] INFO [Log partition=__consumer_offsets-4, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 16 ms (kafka.log.Log) [2021-06-18 00:03:05,955] INFO Created log for partition __consumer_offsets-4 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:05,955] INFO [Partition __consumer_offsets-4 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-4 (kafka.cluster.Partition) [2021-06-18 00:03:05,955] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:05,956] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:05,956] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:05,956] INFO [Partition __consumer_offsets-4 broker=1] __consumer_offsets-4 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:05,960] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-4 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:05,974] INFO [Log partition=__consumer_offsets-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:05,977] INFO [Log partition=__consumer_offsets-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log) [2021-06-18 00:03:05,978] INFO Created log for partition __consumer_offsets-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:05,978] INFO [Partition __consumer_offsets-1 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-1 (kafka.cluster.Partition) [2021-06-18 00:03:05,978] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:05,978] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:05,978] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:05,979] INFO [Partition __consumer_offsets-1 broker=1] __consumer_offsets-1 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:05,982] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-1 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:05,996] INFO [Log partition=__consumer_offsets-49, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,000] INFO [Log partition=__consumer_offsets-49, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log) [2021-06-18 00:03:06,000] INFO Created log for partition __consumer_offsets-49 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,001] INFO [Partition __consumer_offsets-49 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-49 (kafka.cluster.Partition) [2021-06-18 00:03:06,001] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,001] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,001] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,002] INFO [Partition __consumer_offsets-49 broker=1] __consumer_offsets-49 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:06,004] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-49 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:06,017] INFO [Log partition=__consumer_offsets-46, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,020] INFO [Log partition=__consumer_offsets-46, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log) [2021-06-18 00:03:06,021] INFO Created log for partition __consumer_offsets-46 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,022] INFO [Partition __consumer_offsets-46 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-46 (kafka.cluster.Partition) [2021-06-18 00:03:06,022] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,022] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,022] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,022] INFO [Partition __consumer_offsets-46 broker=1] __consumer_offsets-46 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:06,025] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-46 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:06,042] INFO [Log partition=__consumer_offsets-43, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,046] INFO [Log partition=__consumer_offsets-43, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 17 ms (kafka.log.Log) [2021-06-18 00:03:06,046] INFO Created log for partition __consumer_offsets-43 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,047] INFO [Partition __consumer_offsets-43 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-43 (kafka.cluster.Partition) [2021-06-18 00:03:06,047] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,047] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,047] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,048] INFO [Partition __consumer_offsets-43 broker=1] __consumer_offsets-43 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:06,050] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-43 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:06,064] INFO [Log partition=__consumer_offsets-40, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,067] INFO [Log partition=__consumer_offsets-40, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log) [2021-06-18 00:03:06,068] INFO Created log for partition __consumer_offsets-40 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,068] INFO [Partition __consumer_offsets-40 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-40 (kafka.cluster.Partition) [2021-06-18 00:03:06,069] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,069] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,069] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,069] INFO [Partition __consumer_offsets-40 broker=1] __consumer_offsets-40 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:06,072] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-40 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:06,085] INFO [Log partition=__consumer_offsets-37, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,089] INFO [Log partition=__consumer_offsets-37, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log) [2021-06-18 00:03:06,090] INFO Created log for partition __consumer_offsets-37 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,090] INFO [Partition __consumer_offsets-37 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-37 (kafka.cluster.Partition) [2021-06-18 00:03:06,090] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,091] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,091] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,091] INFO [Partition __consumer_offsets-37 broker=1] __consumer_offsets-37 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:06,094] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-37 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:06,107] INFO [Log partition=__consumer_offsets-34, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,110] INFO [Log partition=__consumer_offsets-34, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log) [2021-06-18 00:03:06,111] INFO Created log for partition __consumer_offsets-34 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,112] INFO [Partition __consumer_offsets-34 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-34 (kafka.cluster.Partition) [2021-06-18 00:03:06,112] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,112] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,112] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,112] INFO [Partition __consumer_offsets-34 broker=1] __consumer_offsets-34 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:06,114] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-34 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:06,128] INFO [Log partition=__consumer_offsets-31, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,131] INFO [Log partition=__consumer_offsets-31, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log) [2021-06-18 00:03:06,132] INFO Created log for partition __consumer_offsets-31 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,132] INFO [Partition __consumer_offsets-31 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-31 (kafka.cluster.Partition) [2021-06-18 00:03:06,132] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,132] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,133] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,133] INFO [Partition __consumer_offsets-31 broker=1] __consumer_offsets-31 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:06,135] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-31 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:06,148] INFO [Log partition=__consumer_offsets-19, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,152] INFO [Log partition=__consumer_offsets-19, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log) [2021-06-18 00:03:06,154] INFO Created log for partition __consumer_offsets-19 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,155] INFO [Partition __consumer_offsets-19 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-19 (kafka.cluster.Partition) [2021-06-18 00:03:06,155] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,156] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,156] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,157] INFO [Partition __consumer_offsets-19 broker=1] __consumer_offsets-19 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:06,159] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-19 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:06,175] INFO [Log partition=__consumer_offsets-28, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,178] INFO [Log partition=__consumer_offsets-28, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log) [2021-06-18 00:03:06,178] INFO Created log for partition __consumer_offsets-28 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,179] INFO [Partition __consumer_offsets-28 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-28 (kafka.cluster.Partition) [2021-06-18 00:03:06,179] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,179] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,179] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,179] INFO [Partition __consumer_offsets-28 broker=1] __consumer_offsets-28 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:06,181] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-28 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:06,194] INFO [Log partition=__consumer_offsets-16, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,197] INFO [Log partition=__consumer_offsets-16, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,198] INFO Created log for partition __consumer_offsets-16 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,198] INFO [Partition __consumer_offsets-16 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-16 (kafka.cluster.Partition) [2021-06-18 00:03:06,199] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,199] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,199] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,199] INFO [Partition __consumer_offsets-16 broker=1] __consumer_offsets-16 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:06,201] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-16 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:06,213] INFO [Log partition=__consumer_offsets-25, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,215] INFO [Log partition=__consumer_offsets-25, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 11 ms (kafka.log.Log) [2021-06-18 00:03:06,216] INFO Created log for partition __consumer_offsets-25 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,216] INFO [Partition __consumer_offsets-25 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-25 (kafka.cluster.Partition) [2021-06-18 00:03:06,216] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,216] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,216] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,217] INFO [Partition __consumer_offsets-25 broker=1] __consumer_offsets-25 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:06,219] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-25 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:06,231] INFO [Log partition=__consumer_offsets-22, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,234] INFO [Log partition=__consumer_offsets-22, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log) [2021-06-18 00:03:06,235] INFO Created log for partition __consumer_offsets-22 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,235] INFO [Partition __consumer_offsets-22 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-22 (kafka.cluster.Partition) [2021-06-18 00:03:06,235] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,235] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,235] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,235] INFO [Partition __consumer_offsets-22 broker=1] __consumer_offsets-22 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:06,237] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-22 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:06,249] INFO [Log partition=__consumer_offsets-13, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,252] INFO [Log partition=__consumer_offsets-13, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,252] INFO Created log for partition __consumer_offsets-13 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,253] INFO [Partition __consumer_offsets-13 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-13 (kafka.cluster.Partition) [2021-06-18 00:03:06,253] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,253] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,253] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,253] INFO [Partition __consumer_offsets-13 broker=1] __consumer_offsets-13 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-13 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-10 (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-7 (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-4 (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-1 (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-49 (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-46 (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-43 (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-40 (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-37 (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-34 (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-31 (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-19 (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-28 (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-16 (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-25 (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-22 (state.change.logger) [2021-06-18 00:03:06,255] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-leader transition for partition __consumer_offsets-13 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-0 with leader 0 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-29 with leader 2 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-48 with leader 0 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-45 with leader 0 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-26 with leader 2 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-42 with leader 0 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-23 with leader 2 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-20 with leader 2 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-39 with leader 0 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-17 with leader 2 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-36 with leader 0 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-14 with leader 2 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-33 with leader 0 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-11 with leader 2 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-30 with leader 0 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-27 with leader 0 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-24 with leader 0 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-5 with leader 2 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-21 with leader 0 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-2 with leader 2 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-18 with leader 0 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-15 with leader 0 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-12 with leader 0 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-9 with leader 0 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-47 with leader 2 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-38 with leader 2 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-35 with leader 2 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-44 with leader 2 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-6 with leader 0 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-41 with leader 2 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-32 with leader 2 (state.change.logger) [2021-06-18 00:03:06,256] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 4 from controller 2 epoch 8 starting the become-follower transition for partition __consumer_offsets-3 with leader 0 (state.change.logger) [2021-06-18 00:03:06,257] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,270] INFO [Log partition=__consumer_offsets-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,272] INFO [Log partition=__consumer_offsets-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log) [2021-06-18 00:03:06,273] INFO Created log for partition __consumer_offsets-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,273] INFO [Partition __consumer_offsets-0 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-0 (kafka.cluster.Partition) [2021-06-18 00:03:06,273] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,274] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,274] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,285] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,288] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 11 ms (kafka.log.Log) [2021-06-18 00:03:06,289] INFO Created log for partition __consumer_offsets-29 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,289] INFO [Partition __consumer_offsets-29 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-29 (kafka.cluster.Partition) [2021-06-18 00:03:06,289] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,289] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,289] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,301] INFO [Log partition=__consumer_offsets-48, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,305] INFO [Log partition=__consumer_offsets-48, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log) [2021-06-18 00:03:06,305] INFO Created log for partition __consumer_offsets-48 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,306] INFO [Partition __consumer_offsets-48 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-48 (kafka.cluster.Partition) [2021-06-18 00:03:06,306] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,306] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,306] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,306] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,318] INFO [Log partition=__consumer_offsets-45, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,321] INFO [Log partition=__consumer_offsets-45, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,321] INFO Created log for partition __consumer_offsets-45 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,322] INFO [Partition __consumer_offsets-45 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-45 (kafka.cluster.Partition) [2021-06-18 00:03:06,322] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,322] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,322] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,335] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,338] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log) [2021-06-18 00:03:06,338] INFO Created log for partition __consumer_offsets-26 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,338] INFO [Partition __consumer_offsets-26 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-26 (kafka.cluster.Partition) [2021-06-18 00:03:06,339] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,339] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,349] INFO [Log partition=__consumer_offsets-42, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,352] INFO [Log partition=__consumer_offsets-42, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 11 ms (kafka.log.Log) [2021-06-18 00:03:06,353] INFO Created log for partition __consumer_offsets-42 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,353] INFO [Partition __consumer_offsets-42 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-42 (kafka.cluster.Partition) [2021-06-18 00:03:06,353] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,353] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,353] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,365] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,368] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,369] INFO Created log for partition __consumer_offsets-23 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,369] INFO [Partition __consumer_offsets-23 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-23 (kafka.cluster.Partition) [2021-06-18 00:03:06,369] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,369] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,369] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,369] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,381] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,384] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,385] INFO Created log for partition __consumer_offsets-20 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,385] INFO [Partition __consumer_offsets-20 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-20 (kafka.cluster.Partition) [2021-06-18 00:03:06,385] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,385] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,385] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,397] INFO [Log partition=__consumer_offsets-39, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,400] INFO [Log partition=__consumer_offsets-39, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,401] INFO Created log for partition __consumer_offsets-39 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,401] INFO [Partition __consumer_offsets-39 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-39 (kafka.cluster.Partition) [2021-06-18 00:03:06,401] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,401] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,413] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,416] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,417] INFO Created log for partition __consumer_offsets-17 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,417] INFO [Partition __consumer_offsets-17 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-17 (kafka.cluster.Partition) [2021-06-18 00:03:06,417] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,417] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,418] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,431] INFO [Log partition=__consumer_offsets-36, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,435] INFO [Log partition=__consumer_offsets-36, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log) [2021-06-18 00:03:06,436] INFO Created log for partition __consumer_offsets-36 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,436] INFO [Partition __consumer_offsets-36 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-36 (kafka.cluster.Partition) [2021-06-18 00:03:06,436] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,436] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,437] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,437] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,452] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,455] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log) [2021-06-18 00:03:06,455] INFO Created log for partition __consumer_offsets-14 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,456] INFO [Partition __consumer_offsets-14 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-14 (kafka.cluster.Partition) [2021-06-18 00:03:06,456] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,456] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,456] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,474] INFO [Log partition=__consumer_offsets-33, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,477] INFO [Log partition=__consumer_offsets-33, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 18 ms (kafka.log.Log) [2021-06-18 00:03:06,478] INFO Created log for partition __consumer_offsets-33 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,478] INFO [Partition __consumer_offsets-33 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-33 (kafka.cluster.Partition) [2021-06-18 00:03:06,478] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,478] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,491] INFO [Log partition=__consumer_offsets-11, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,494] INFO [Log partition=__consumer_offsets-11, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log) [2021-06-18 00:03:06,494] INFO Created log for partition __consumer_offsets-11 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,495] INFO [Partition __consumer_offsets-11 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-11 (kafka.cluster.Partition) [2021-06-18 00:03:06,495] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,495] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,495] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,507] INFO [Log partition=__consumer_offsets-30, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,510] INFO [Log partition=__consumer_offsets-30, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,510] INFO Created log for partition __consumer_offsets-30 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,511] INFO [Partition __consumer_offsets-30 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-30 (kafka.cluster.Partition) [2021-06-18 00:03:06,511] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,511] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,511] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,511] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,525] INFO [Log partition=__consumer_offsets-27, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,528] INFO [Log partition=__consumer_offsets-27, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log) [2021-06-18 00:03:06,529] INFO Created log for partition __consumer_offsets-27 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,529] INFO [Partition __consumer_offsets-27 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-27 (kafka.cluster.Partition) [2021-06-18 00:03:06,529] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,530] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,530] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,543] INFO [Log partition=__consumer_offsets-8, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,546] INFO [Log partition=__consumer_offsets-8, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log) [2021-06-18 00:03:06,546] INFO Created log for partition __consumer_offsets-8 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,547] INFO [Partition __consumer_offsets-8 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-8 (kafka.cluster.Partition) [2021-06-18 00:03:06,547] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,547] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,560] INFO [Log partition=__consumer_offsets-24, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,562] INFO [Log partition=__consumer_offsets-24, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,563] INFO Created log for partition __consumer_offsets-24 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,563] INFO [Partition __consumer_offsets-24 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-24 (kafka.cluster.Partition) [2021-06-18 00:03:06,563] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,563] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,563] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,576] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,579] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log) [2021-06-18 00:03:06,579] INFO Created log for partition __consumer_offsets-5 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,580] INFO [Partition __consumer_offsets-5 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-5 (kafka.cluster.Partition) [2021-06-18 00:03:06,580] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,580] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,580] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,580] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,619] INFO [Log partition=__consumer_offsets-21, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,622] INFO [Log partition=__consumer_offsets-21, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 39 ms (kafka.log.Log) [2021-06-18 00:03:06,623] INFO Created log for partition __consumer_offsets-21 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,624] INFO [Partition __consumer_offsets-21 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-21 (kafka.cluster.Partition) [2021-06-18 00:03:06,624] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,624] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,624] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,636] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,640] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log) [2021-06-18 00:03:06,640] INFO Created log for partition __consumer_offsets-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,640] INFO [Partition __consumer_offsets-2 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-2 (kafka.cluster.Partition) [2021-06-18 00:03:06,641] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,641] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,654] INFO [Log partition=__consumer_offsets-18, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,657] INFO [Log partition=__consumer_offsets-18, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 14 ms (kafka.log.Log) [2021-06-18 00:03:06,657] INFO Created log for partition __consumer_offsets-18 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,657] INFO [Partition __consumer_offsets-18 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-18 (kafka.cluster.Partition) [2021-06-18 00:03:06,657] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,658] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,658] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,658] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,670] INFO [Log partition=__consumer_offsets-15, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,673] INFO [Log partition=__consumer_offsets-15, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log) [2021-06-18 00:03:06,674] INFO Created log for partition __consumer_offsets-15 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,674] INFO [Partition __consumer_offsets-15 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-15 (kafka.cluster.Partition) [2021-06-18 00:03:06,674] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,674] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,686] INFO [Log partition=__consumer_offsets-12, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,689] INFO [Log partition=__consumer_offsets-12, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,689] INFO Created log for partition __consumer_offsets-12 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,690] INFO [Partition __consumer_offsets-12 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-12 (kafka.cluster.Partition) [2021-06-18 00:03:06,690] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,690] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,690] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,690] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,701] INFO [Log partition=__consumer_offsets-9, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,704] INFO [Log partition=__consumer_offsets-9, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,705] INFO Created log for partition __consumer_offsets-9 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,705] INFO [Partition __consumer_offsets-9 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-9 (kafka.cluster.Partition) [2021-06-18 00:03:06,705] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,706] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,717] INFO [Log partition=__consumer_offsets-47, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,720] INFO [Log partition=__consumer_offsets-47, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,720] INFO Created log for partition __consumer_offsets-47 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,721] INFO [Partition __consumer_offsets-47 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-47 (kafka.cluster.Partition) [2021-06-18 00:03:06,721] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,721] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,721] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,721] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,733] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,737] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log) [2021-06-18 00:03:06,737] INFO Created log for partition __consumer_offsets-38 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,738] INFO [Partition __consumer_offsets-38 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-38 (kafka.cluster.Partition) [2021-06-18 00:03:06,738] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,738] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,749] INFO [Log partition=__consumer_offsets-35, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,752] INFO [Log partition=__consumer_offsets-35, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,752] INFO Created log for partition __consumer_offsets-35 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,753] INFO [Partition __consumer_offsets-35 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-35 (kafka.cluster.Partition) [2021-06-18 00:03:06,753] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,753] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,753] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,753] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,764] INFO [Log partition=__consumer_offsets-44, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,767] INFO [Log partition=__consumer_offsets-44, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,768] INFO Created log for partition __consumer_offsets-44 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,768] INFO [Partition __consumer_offsets-44 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-44 (kafka.cluster.Partition) [2021-06-18 00:03:06,768] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,768] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,780] INFO [Log partition=__consumer_offsets-6, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,783] INFO [Log partition=__consumer_offsets-6, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,784] INFO Created log for partition __consumer_offsets-6 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,784] INFO [Partition __consumer_offsets-6 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-6 (kafka.cluster.Partition) [2021-06-18 00:03:06,784] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,784] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,784] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,797] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,799] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,800] INFO Created log for partition __consumer_offsets-41 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,800] INFO [Partition __consumer_offsets-41 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-41 (kafka.cluster.Partition) [2021-06-18 00:03:06,800] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,800] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,800] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,801] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,812] INFO [Log partition=__consumer_offsets-32, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,815] INFO [Log partition=__consumer_offsets-32, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,816] INFO Created log for partition __consumer_offsets-32 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,816] INFO [Partition __consumer_offsets-32 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-32 (kafka.cluster.Partition) [2021-06-18 00:03:06,816] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,817] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,817] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,828] INFO [Log partition=__consumer_offsets-3, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:03:06,831] INFO [Log partition=__consumer_offsets-3, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:03:06,832] INFO Created log for partition __consumer_offsets-3 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.bytes -> 104857600, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:03:06,832] INFO [Partition __consumer_offsets-3 broker=1] No checkpointed highwatermark is found for partition __consumer_offsets-3 (kafka.cluster.Partition) [2021-06-18 00:03:06,832] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:03:06,833] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(__consumer_offsets-6, __consumer_offsets-32, __consumer_offsets-14, __consumer_offsets-36, __consumer_offsets-18, __consumer_offsets-0, __consumer_offsets-41, __consumer_offsets-26, __consumer_offsets-23, __consumer_offsets-45, __consumer_offsets-8, __consumer_offsets-27, __consumer_offsets-12, __consumer_offsets-9, __consumer_offsets-35, __consumer_offsets-17, __consumer_offsets-39, __consumer_offsets-21, __consumer_offsets-47, __consumer_offsets-44, __consumer_offsets-3, __consumer_offsets-29, __consumer_offsets-48, __consumer_offsets-11, __consumer_offsets-30, __consumer_offsets-33, __consumer_offsets-15, __consumer_offsets-38, __consumer_offsets-20, __consumer_offsets-42, __consumer_offsets-5, __consumer_offsets-2, __consumer_offsets-24) (kafka.server.ReplicaFetcherManager) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-41 with leader 2 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-44 with leader 2 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-47 with leader 2 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-2 with leader 2 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-5 with leader 2 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-11 with leader 2 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-14 with leader 2 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-17 with leader 2 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-20 with leader 2 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-23 with leader 2 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-26 with leader 2 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-29 with leader 2 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-32 with leader 2 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-35 with leader 2 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-38 with leader 2 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-0 with leader 0 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-3 with leader 0 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-6 with leader 0 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-9 with leader 0 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-12 with leader 0 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-15 with leader 0 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-18 with leader 0 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-21 with leader 0 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-24 with leader 0 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-27 with leader 0 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-30 with leader 0 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-33 with leader 0 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-36 with leader 0 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-39 with leader 0 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-42 with leader 0 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-45 with leader 0 (state.change.logger) [2021-06-18 00:03:06,833] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-48 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-41 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-44 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-47 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-2 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-5 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-8 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-11 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-14 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-17 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-20 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-23 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-26 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-29 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-32 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-35 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-38 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-0 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-3 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-6 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-9 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-12 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-15 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-18 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-21 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-24 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-27 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-30 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-33 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-36 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-39 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-42 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-45 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,834] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition __consumer_offsets-48 as part of become-follower request with correlation id 4 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:03:06,838] INFO [ReplicaFetcherManager on broker 1] Added fetcher to broker BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(__consumer_offsets-8 -> (offset=0, leaderEpoch=0), __consumer_offsets-35 -> (offset=0, leaderEpoch=0), __consumer_offsets-41 -> (offset=0, leaderEpoch=0), __consumer_offsets-23 -> (offset=0, leaderEpoch=0), __consumer_offsets-47 -> (offset=0, leaderEpoch=0), __consumer_offsets-38 -> (offset=0, leaderEpoch=0), __consumer_offsets-17 -> (offset=0, leaderEpoch=0), __consumer_offsets-11 -> (offset=0, leaderEpoch=0), __consumer_offsets-2 -> (offset=0, leaderEpoch=0), __consumer_offsets-14 -> (offset=0, leaderEpoch=0), __consumer_offsets-20 -> (offset=0, leaderEpoch=0), __consumer_offsets-44 -> (offset=0, leaderEpoch=0), __consumer_offsets-5 -> (offset=0, leaderEpoch=0), __consumer_offsets-26 -> (offset=0, leaderEpoch=0), __consumer_offsets-29 -> (offset=0, leaderEpoch=0), __consumer_offsets-32 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager) [2021-06-18 00:03:06,838] INFO [ReplicaFetcherManager on broker 1] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(__consumer_offsets-30 -> (offset=0, leaderEpoch=0), __consumer_offsets-21 -> (offset=0, leaderEpoch=0), __consumer_offsets-27 -> (offset=0, leaderEpoch=0), __consumer_offsets-9 -> (offset=0, leaderEpoch=0), __consumer_offsets-33 -> (offset=0, leaderEpoch=0), __consumer_offsets-36 -> (offset=0, leaderEpoch=0), __consumer_offsets-42 -> (offset=0, leaderEpoch=0), __consumer_offsets-3 -> (offset=0, leaderEpoch=0), __consumer_offsets-18 -> (offset=0, leaderEpoch=0), __consumer_offsets-15 -> (offset=0, leaderEpoch=0), __consumer_offsets-24 -> (offset=0, leaderEpoch=0), __consumer_offsets-48 -> (offset=0, leaderEpoch=0), __consumer_offsets-6 -> (offset=0, leaderEpoch=0), __consumer_offsets-0 -> (offset=0, leaderEpoch=0), __consumer_offsets-39 -> (offset=0, leaderEpoch=0), __consumer_offsets-12 -> (offset=0, leaderEpoch=0), __consumer_offsets-45 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-41 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-44 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-47 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-2 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-5 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-11 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-14 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-17 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-20 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-23 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-26 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-29 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-32 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-35 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-38 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-0 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-3 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-6 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-9 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-12 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-15 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-18 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-21 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-24 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-27 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-30 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-33 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-36 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-39 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-42 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-45 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 4 for partition __consumer_offsets-48 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-0 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-29 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-48 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-45 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-26 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-42 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-23 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-20 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-39 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-17 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-36 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-14 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-33 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-11 with leader 2 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-30 with leader 0 (state.change.logger) [2021-06-18 00:03:06,839] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-27 with leader 0 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-8 with leader 2 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-24 with leader 0 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-5 with leader 2 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-21 with leader 0 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-2 with leader 2 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-18 with leader 0 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-15 with leader 0 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-12 with leader 0 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-9 with leader 0 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-47 with leader 2 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-38 with leader 2 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-35 with leader 2 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-44 with leader 2 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-6 with leader 0 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-41 with leader 2 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-32 with leader 2 (state.change.logger) [2021-06-18 00:03:06,840] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 4 from controller 2 epoch 8 for the become-follower transition for partition __consumer_offsets-3 with leader 0 (state.change.logger) [2021-06-18 00:03:06,841] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-22 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,842] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-25 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,842] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-28 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,842] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-31 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,842] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-34 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,842] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-37 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,842] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-40 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,842] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-43 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,842] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-46 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,842] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-49 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,842] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-1 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,842] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-4 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,842] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-7 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,842] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-10 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,842] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-13 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,842] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-16 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,842] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-19 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,843] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-41 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,843] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-44 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-47 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-2 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-5 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-8 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-11 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-14 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-17 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-20 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-23 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-26 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-29 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-32 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-35 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-38 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-0 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-3 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-6 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-9 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-12 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-15 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,844] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-18 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,845] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-21 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,845] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-24 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,845] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-27 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,845] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-30 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,845] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-33 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,845] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-36 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,845] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-39 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,845] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-42 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,845] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-45 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,845] INFO [GroupMetadataManager brokerId=1] Scheduling unloading of offsets and group metadata from __consumer_offsets-48 (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,850] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-13 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-46 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-9 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-42 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-21 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-17 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-30 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-26 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-5 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-38 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-1 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-34 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-16 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-45 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-12 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-41 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-24 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-20 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-49 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-0 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-29 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-25 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-8 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-37 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-4 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-33 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-15 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-48 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-11 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-44 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-23 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-19 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,851] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-32 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-28 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-7 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-40 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-3 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-36 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-47 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-14 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-43 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-10 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 0, 2], zkVersion=0, replicas=[1, 0, 2], offlineReplicas=[]) for partition __consumer_offsets-22 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-18 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition __consumer_offsets-31 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-27 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 2, 1], zkVersion=0, replicas=[0, 2, 1], offlineReplicas=[]) for partition __consumer_offsets-39 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition __consumer_offsets-6 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 1, 0], zkVersion=0, replicas=[2, 1, 0], offlineReplicas=[]) for partition __consumer_offsets-35 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition __consumer_offsets-2 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 5 (state.change.logger) [2021-06-18 00:03:06,853] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-22 in 11 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,854] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-25 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,854] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-28 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,854] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-31 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,854] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-34 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,854] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-37 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,855] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-40 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,855] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-43 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,855] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-46 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,855] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-49 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,855] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-1 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,855] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-4 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,856] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-7 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,856] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-10 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,856] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-13 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,856] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-16 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,856] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-19 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,858] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-41. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,858] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-44. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,858] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-47. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,858] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-2. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,859] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-5. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,859] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-8. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,859] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-11. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,859] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-14. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,859] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-17. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,859] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-20. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,859] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-23. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,859] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-26. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,859] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-29. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,859] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-32. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,859] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-35. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,859] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-38. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,860] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-0. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,860] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-3. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,860] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-6. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,860] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-9. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,860] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-12. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,860] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-15. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,860] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-18. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,860] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-21. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,860] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-24. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,860] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-27. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,860] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-30. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,860] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-33. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,861] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-36. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,861] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-39. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,861] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-42. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,861] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-45. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,861] INFO [GroupMetadataManager brokerId=1] Finished unloading __consumer_offsets-48. Removed 0 cached offsets and 0 cached groups. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:03:06,906] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:03:06,907] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:03:06,936] INFO [GroupCoordinator 1]: Preparing to rebalance group 248c9cd2-3695-441f-81c9-b61f334a46d2--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-7) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-6876513b-bcd0-4f71-a1d0-0fdfd514473a with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:03:07,087] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-6 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,087] INFO [Log partition=__consumer_offsets-6, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,087] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-39 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,087] INFO [Log partition=__consumer_offsets-39, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,087] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-21 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,087] INFO [Log partition=__consumer_offsets-21, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,088] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-36 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,088] INFO [Log partition=__consumer_offsets-36, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,088] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-3 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,088] INFO [Log partition=__consumer_offsets-3, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,088] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-18 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,088] INFO [Log partition=__consumer_offsets-18, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,088] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-48 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,088] INFO [Log partition=__consumer_offsets-48, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,088] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-33 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,088] INFO [Log partition=__consumer_offsets-33, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,088] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-30 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,088] INFO [Log partition=__consumer_offsets-30, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,089] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,089] INFO [Log partition=__consumer_offsets-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,089] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-15 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,089] INFO [Log partition=__consumer_offsets-15, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,089] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-45 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,089] INFO [Log partition=__consumer_offsets-45, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,089] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-27 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,089] INFO [Log partition=__consumer_offsets-27, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,089] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-12 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,089] INFO [Log partition=__consumer_offsets-12, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,089] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-9 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,089] INFO [Log partition=__consumer_offsets-9, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,090] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-42 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,090] INFO [Log partition=__consumer_offsets-42, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,090] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition __consumer_offsets-24 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,090] INFO [Log partition=__consumer_offsets-24, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,177] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-17 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,177] INFO [Log partition=__consumer_offsets-17, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,177] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-32 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,177] INFO [Log partition=__consumer_offsets-32, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,177] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-47 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,177] INFO [Log partition=__consumer_offsets-47, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,177] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-14 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,177] INFO [Log partition=__consumer_offsets-14, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,177] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-44 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,177] INFO [Log partition=__consumer_offsets-44, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,177] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-29 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,177] INFO [Log partition=__consumer_offsets-29, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,177] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-11 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,177] INFO [Log partition=__consumer_offsets-11, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,177] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-41 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,177] INFO [Log partition=__consumer_offsets-41, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,177] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-26 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,178] INFO [Log partition=__consumer_offsets-26, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,178] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-23 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,178] INFO [Log partition=__consumer_offsets-23, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,178] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-8 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,178] INFO [Log partition=__consumer_offsets-8, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,178] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-38 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,178] INFO [Log partition=__consumer_offsets-38, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,178] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-20 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,178] INFO [Log partition=__consumer_offsets-20, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,178] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-5 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,178] INFO [Log partition=__consumer_offsets-5, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,178] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-35 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,178] INFO [Log partition=__consumer_offsets-35, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,178] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition __consumer_offsets-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:03:07,178] INFO [Log partition=__consumer_offsets-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:03:07,598] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-30 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:07,598] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-21 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:07,598] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-27 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:07,599] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-9 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:07,599] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-33 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:07,599] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-36 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:07,599] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-42 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:07,599] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-3 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:07,599] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-18 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:07,599] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-15 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:07,599] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-24 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:07,599] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-48 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:07,599] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-6 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:07,599] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-0 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:07,599] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-39 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:07,599] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-12 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:07,599] ERROR [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Error for partition __consumer_offsets-45 at offset 0 (kafka.server.ReplicaFetcherThread) org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server does not host this topic-partition. [2021-06-18 00:03:09,946] INFO [GroupCoordinator 1]: Stabilized group 248c9cd2-3695-441f-81c9-b61f334a46d2--POLICY-PDP-PAP generation 1 (__consumer_offsets-7) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:03:09,958] INFO [GroupCoordinator 1]: Assignment received from leader for group 248c9cd2-3695-441f-81c9-b61f334a46d2--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:03:10,172] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:03:10,172] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:03:13,146] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:03:13,146] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:03:13,151] INFO [GroupCoordinator 1]: Preparing to rebalance group 5a8d784e-b3b1-4ea6-a037-3c97fa3cd3ab--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-31) (reason: Adding new member onap-policy-apex-pdp-0-3c1e2981-c3d0-4bce-952e-3bdd3ebd32ca with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:03:16,153] INFO [GroupCoordinator 1]: Stabilized group 5a8d784e-b3b1-4ea6-a037-3c97fa3cd3ab--POLICY-PDP-PAP generation 1 (__consumer_offsets-31) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:03:16,156] INFO [GroupCoordinator 1]: Assignment received from leader for group 5a8d784e-b3b1-4ea6-a037-3c97fa3cd3ab--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:03:16,280] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:03:16,280] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:03:56,281] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:03:56,281] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:03:59,494] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:03:59,494] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:04:03,426] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=0,1,2, zkVersion=0, replicas=0,1,2, isNew=true) correlation id 6 from controller 2 epoch 8 for partition POLICY-NOTIFICATION-0 (state.change.logger) [2021-06-18 00:04:03,426] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1,2,0, zkVersion=0, replicas=1,2,0, isNew=true) correlation id 6 from controller 2 epoch 8 for partition POLICY-NOTIFICATION-1 (state.change.logger) [2021-06-18 00:04:03,426] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=2,0,1, zkVersion=0, replicas=2,0,1, isNew=true) correlation id 6 from controller 2 epoch 8 for partition POLICY-NOTIFICATION-2 (state.change.logger) [2021-06-18 00:04:03,428] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 2 epoch 8 starting the become-leader transition for partition POLICY-NOTIFICATION-1 (state.change.logger) [2021-06-18 00:04:03,428] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(POLICY-NOTIFICATION-1) (kafka.server.ReplicaFetcherManager) [2021-06-18 00:04:03,443] INFO [Log partition=POLICY-NOTIFICATION-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:04:03,446] INFO [Log partition=POLICY-NOTIFICATION-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 15 ms (kafka.log.Log) [2021-06-18 00:04:03,447] INFO Created log for partition POLICY-NOTIFICATION-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:04:03,450] INFO [Partition POLICY-NOTIFICATION-1 broker=1] No checkpointed highwatermark is found for partition POLICY-NOTIFICATION-1 (kafka.cluster.Partition) [2021-06-18 00:04:03,450] INFO Replica loaded for partition POLICY-NOTIFICATION-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:04:03,450] INFO Replica loaded for partition POLICY-NOTIFICATION-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:04:03,450] INFO Replica loaded for partition POLICY-NOTIFICATION-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:04:03,450] INFO [Partition POLICY-NOTIFICATION-1 broker=1] POLICY-NOTIFICATION-1 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:04:03,452] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 6 for partition POLICY-NOTIFICATION-1 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:04:03,452] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 2 epoch 8 for the become-leader transition for partition POLICY-NOTIFICATION-1 (state.change.logger) [2021-06-18 00:04:03,452] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 2 epoch 8 starting the become-follower transition for partition POLICY-NOTIFICATION-0 with leader 0 (state.change.logger) [2021-06-18 00:04:03,452] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 6 from controller 2 epoch 8 starting the become-follower transition for partition POLICY-NOTIFICATION-2 with leader 2 (state.change.logger) [2021-06-18 00:04:03,452] INFO Replica loaded for partition POLICY-NOTIFICATION-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:04:03,464] INFO [Log partition=POLICY-NOTIFICATION-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:04:03,467] INFO [Log partition=POLICY-NOTIFICATION-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:04:03,468] INFO Created log for partition POLICY-NOTIFICATION-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:04:03,469] INFO [Partition POLICY-NOTIFICATION-0 broker=1] No checkpointed highwatermark is found for partition POLICY-NOTIFICATION-0 (kafka.cluster.Partition) [2021-06-18 00:04:03,469] INFO Replica loaded for partition POLICY-NOTIFICATION-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:04:03,469] INFO Replica loaded for partition POLICY-NOTIFICATION-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:04:03,469] INFO Replica loaded for partition POLICY-NOTIFICATION-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:04:03,469] INFO Replica loaded for partition POLICY-NOTIFICATION-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:04:03,480] INFO [Log partition=POLICY-NOTIFICATION-2, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:04:03,484] INFO [Log partition=POLICY-NOTIFICATION-2, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:04:03,484] INFO Created log for partition POLICY-NOTIFICATION-2 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:04:03,485] INFO [Partition POLICY-NOTIFICATION-2 broker=1] No checkpointed highwatermark is found for partition POLICY-NOTIFICATION-2 (kafka.cluster.Partition) [2021-06-18 00:04:03,485] INFO Replica loaded for partition POLICY-NOTIFICATION-2 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:04:03,485] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(POLICY-NOTIFICATION-2, POLICY-NOTIFICATION-0) (kafka.server.ReplicaFetcherManager) [2021-06-18 00:04:03,485] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 6 for partition POLICY-NOTIFICATION-2 with leader 2 (state.change.logger) [2021-06-18 00:04:03,485] TRACE [Broker id=1] Stopped fetchers as part of become-follower request from controller 2 epoch 8 with correlation id 6 for partition POLICY-NOTIFICATION-0 with leader 0 (state.change.logger) [2021-06-18 00:04:03,485] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition POLICY-NOTIFICATION-2 as part of become-follower request with correlation id 6 from controller 2 epoch 8 with leader 2 (state.change.logger) [2021-06-18 00:04:03,485] TRACE [Broker id=1] Truncated logs and checkpointed recovery boundaries for partition POLICY-NOTIFICATION-0 as part of become-follower request with correlation id 6 from controller 2 epoch 8 with leader 0 (state.change.logger) [2021-06-18 00:04:03,486] INFO [ReplicaFetcherManager on broker 1] Added fetcher to broker BrokerEndPoint(id=2, host=onap-message-router-kafka-2.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-NOTIFICATION-2 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager) [2021-06-18 00:04:03,486] INFO [ReplicaFetcherManager on broker 1] Added fetcher to broker BrokerEndPoint(id=0, host=onap-message-router-kafka-0.message-router-kafka.onap.svc.cluster.local:9092) for partitions Map(POLICY-NOTIFICATION-0 -> (offset=0, leaderEpoch=0)) (kafka.server.ReplicaFetcherManager) [2021-06-18 00:04:03,486] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 6 for partition POLICY-NOTIFICATION-2 with leader 2 (state.change.logger) [2021-06-18 00:04:03,486] TRACE [Broker id=1] Started fetcher to new leader as part of become-follower request from controller 2 epoch 8 with correlation id 6 for partition POLICY-NOTIFICATION-0 with leader 0 (state.change.logger) [2021-06-18 00:04:03,486] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 2 epoch 8 for the become-follower transition for partition POLICY-NOTIFICATION-0 with leader 0 (state.change.logger) [2021-06-18 00:04:03,486] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 6 from controller 2 epoch 8 for the become-follower transition for partition POLICY-NOTIFICATION-2 with leader 2 (state.change.logger) [2021-06-18 00:04:03,489] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0, 1, 2], zkVersion=0, replicas=[0, 1, 2], offlineReplicas=[]) for partition POLICY-NOTIFICATION-0 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 7 (state.change.logger) [2021-06-18 00:04:03,489] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1, 2, 0], zkVersion=0, replicas=[1, 2, 0], offlineReplicas=[]) for partition POLICY-NOTIFICATION-1 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 7 (state.change.logger) [2021-06-18 00:04:03,489] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2, 0, 1], zkVersion=0, replicas=[2, 0, 1], offlineReplicas=[]) for partition POLICY-NOTIFICATION-2 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 7 (state.change.logger) [2021-06-18 00:04:03,751] INFO [ReplicaFetcher replicaId=1, leaderId=2, fetcherId=0] Truncating partition POLICY-NOTIFICATION-2 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:04:03,751] INFO [Log partition=POLICY-NOTIFICATION-2, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:04:03,754] INFO [ReplicaFetcher replicaId=1, leaderId=0, fetcherId=0] Truncating partition POLICY-NOTIFICATION-0 to local high watermark 0 (kafka.server.ReplicaFetcherThread) [2021-06-18 00:04:03,754] INFO [Log partition=POLICY-NOTIFICATION-0, dir=/var/lib/kafka/data] Truncating to 0 has no effect as the largest offset in the log is -1 (kafka.log.Log) [2021-06-18 00:04:35,553] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:04:35,553] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:05:37,622] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:05:37,622] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:06:31,770] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:06:31,771] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:08:44,245] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:08:44,246] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:08:47,385] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:08:47,385] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:10:07,141] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=0, leaderEpoch=0, isr=[0], zkVersion=0, replicas=[0], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-0 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 8 (state.change.logger) [2021-06-18 00:10:07,141] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_READY-1 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 8 (state.change.logger) [2021-06-18 00:10:08,828] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1, zkVersion=0, replicas=1, isNew=true) correlation id 9 from controller 2 epoch 8 for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 (state.change.logger) [2021-06-18 00:10:08,829] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 9 from controller 2 epoch 8 starting the become-leader transition for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 (state.change.logger) [2021-06-18 00:10:08,829] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(org.onap.dmaap.mr.PNF_REGISTRATION-1) (kafka.server.ReplicaFetcherManager) [2021-06-18 00:10:08,842] INFO [Log partition=org.onap.dmaap.mr.PNF_REGISTRATION-1, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:10:08,845] INFO [Log partition=org.onap.dmaap.mr.PNF_REGISTRATION-1, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:10:08,845] INFO Created log for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:10:08,848] INFO [Partition org.onap.dmaap.mr.PNF_REGISTRATION-1 broker=1] No checkpointed highwatermark is found for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 (kafka.cluster.Partition) [2021-06-18 00:10:08,848] INFO Replica loaded for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:10:08,848] INFO [Partition org.onap.dmaap.mr.PNF_REGISTRATION-1 broker=1] org.onap.dmaap.mr.PNF_REGISTRATION-1 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:10:08,850] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 9 for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:10:08,850] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 9 from controller 2 epoch 8 for the become-leader transition for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 (state.change.logger) [2021-06-18 00:10:08,852] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-1 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 10 (state.change.logger) [2021-06-18 00:10:08,853] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition org.onap.dmaap.mr.PNF_REGISTRATION-0 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 10 (state.change.logger) [2021-06-18 00:10:10,115] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1, zkVersion=0, replicas=1, isNew=true) correlation id 11 from controller 2 epoch 8 for partition org.onap.dmaap.mr.mirrormakeragent-0 (state.change.logger) [2021-06-18 00:10:10,116] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 11 from controller 2 epoch 8 starting the become-leader transition for partition org.onap.dmaap.mr.mirrormakeragent-0 (state.change.logger) [2021-06-18 00:10:10,116] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(org.onap.dmaap.mr.mirrormakeragent-0) (kafka.server.ReplicaFetcherManager) [2021-06-18 00:10:10,129] INFO [Log partition=org.onap.dmaap.mr.mirrormakeragent-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:10:10,132] INFO [Log partition=org.onap.dmaap.mr.mirrormakeragent-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log) [2021-06-18 00:10:10,133] INFO Created log for partition org.onap.dmaap.mr.mirrormakeragent-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:10:10,136] INFO [Partition org.onap.dmaap.mr.mirrormakeragent-0 broker=1] No checkpointed highwatermark is found for partition org.onap.dmaap.mr.mirrormakeragent-0 (kafka.cluster.Partition) [2021-06-18 00:10:10,136] INFO Replica loaded for partition org.onap.dmaap.mr.mirrormakeragent-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:10:10,136] INFO [Partition org.onap.dmaap.mr.mirrormakeragent-0 broker=1] org.onap.dmaap.mr.mirrormakeragent-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:10:10,138] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 11 for partition org.onap.dmaap.mr.mirrormakeragent-0 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:10:10,138] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 11 from controller 2 epoch 8 for the become-leader transition for partition org.onap.dmaap.mr.mirrormakeragent-0 (state.change.logger) [2021-06-18 00:10:10,140] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition org.onap.dmaap.mr.mirrormakeragent-0 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 12 (state.change.logger) [2021-06-18 00:10:22,317] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-6876513b-bcd0-4f71-a1d0-0fdfd514473a in group 248c9cd2-3695-441f-81c9-b61f334a46d2--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:10:22,318] INFO [GroupCoordinator 1]: Preparing to rebalance group 248c9cd2-3695-441f-81c9-b61f334a46d2--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-7) (reason: removing member onap-policy-pap-5d9d484bff-z2229-6876513b-bcd0-4f71-a1d0-0fdfd514473a on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:10:22,319] INFO [GroupCoordinator 1]: Group 248c9cd2-3695-441f-81c9-b61f334a46d2--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-7) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:12:07,933] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 5 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:14:23,228] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:14:23,228] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:14:23,239] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:14:23,239] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:14:23,242] INFO [GroupCoordinator 1]: Preparing to rebalance group eda2fc80-ae9d-46f3-85fd-5366275760ff--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-22) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-f3cb2572-f8ad-46a7-9eca-ea569384f87c with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:14:26,244] INFO [GroupCoordinator 1]: Stabilized group eda2fc80-ae9d-46f3-85fd-5366275760ff--POLICY-PDP-PAP generation 1 (__consumer_offsets-22) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:14:26,247] INFO [GroupCoordinator 1]: Assignment received from leader for group eda2fc80-ae9d-46f3-85fd-5366275760ff--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:14:26,366] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:14:26,366] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:15:34,862] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=2, leaderEpoch=0, isr=[2], zkVersion=0, replicas=[2], offlineReplicas=[]) for partition SDC-DISTR-NOTIF-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 13 (state.change.logger) [2021-06-18 00:15:36,245] TRACE [Broker id=1] Received LeaderAndIsr request PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=1, zkVersion=0, replicas=1, isNew=true) correlation id 14 from controller 2 epoch 8 for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (state.change.logger) [2021-06-18 00:15:36,246] TRACE [Broker id=1] Handling LeaderAndIsr request correlationId 14 from controller 2 epoch 8 starting the become-leader transition for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (state.change.logger) [2021-06-18 00:15:36,247] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set(SDC-DISTR-STATUS-TOPIC-AUTO-0) (kafka.server.ReplicaFetcherManager) [2021-06-18 00:15:36,259] INFO [Log partition=SDC-DISTR-STATUS-TOPIC-AUTO-0, dir=/var/lib/kafka/data] Loading producer state till offset 0 with message format version 2 (kafka.log.Log) [2021-06-18 00:15:36,262] INFO [Log partition=SDC-DISTR-STATUS-TOPIC-AUTO-0, dir=/var/lib/kafka/data] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 12 ms (kafka.log.Log) [2021-06-18 00:15:36,262] INFO Created log for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 in /var/lib/kafka/data with properties {compression.type -> producer, message.downconversion.enable -> true, min.insync.replicas -> 1, segment.jitter.ms -> 0, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.bytes -> 1073741824, retention.ms -> 604800000, flush.messages -> 9223372036854775807, message.format.version -> 2.3-IV1, file.delete.delay.ms -> 60000, max.compaction.lag.ms -> 9223372036854775807, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, segment.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760}. (kafka.log.LogManager) [2021-06-18 00:15:36,264] INFO [Partition SDC-DISTR-STATUS-TOPIC-AUTO-0 broker=1] No checkpointed highwatermark is found for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (kafka.cluster.Partition) [2021-06-18 00:15:36,264] INFO Replica loaded for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 with initial high watermark 0 (kafka.cluster.Replica) [2021-06-18 00:15:36,264] INFO [Partition SDC-DISTR-STATUS-TOPIC-AUTO-0 broker=1] SDC-DISTR-STATUS-TOPIC-AUTO-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2021-06-18 00:15:36,266] TRACE [Broker id=1] Stopped fetchers as part of become-leader request from controller 2 epoch 8 with correlation id 14 for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (last update controller epoch 8) (state.change.logger) [2021-06-18 00:15:36,266] TRACE [Broker id=1] Completed LeaderAndIsr request correlationId 14 from controller 2 epoch 8 for the become-leader transition for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 (state.change.logger) [2021-06-18 00:15:36,268] TRACE [Broker id=1] Cached leader info PartitionState(controllerEpoch=8, leader=1, leaderEpoch=0, isr=[1], zkVersion=0, replicas=[1], offlineReplicas=[]) for partition SDC-DISTR-STATUS-TOPIC-AUTO-0 in response to UpdateMetadata request sent by controller 2 epoch 8 with correlation id 15 (state.change.logger) [2021-06-18 00:15:40,110] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:15:40,110] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:17:28,627] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:17:28,627] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:17:28,654] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:17:28,654] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:17:28,661] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:17:28,661] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:17:28,664] INFO [GroupCoordinator 1]: Preparing to rebalance group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-46) (reason: Adding new member policy-id-40571818-8c9d-4c34-872a-0311dbf7b143 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:17:31,665] INFO [GroupCoordinator 1]: Stabilized group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:17:31,667] INFO [GroupCoordinator 1]: Assignment received from leader for group policy-group--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:17:54,670] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:17:54,670] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:18:26,378] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:18:26,378] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:18:26,382] INFO [GroupCoordinator 1]: Preparing to rebalance group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-19) (reason: Adding new member ves-openapi-manager-258c682c-3337-48de-9797-f4634145b8cb with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:18:29,382] INFO [GroupCoordinator 1]: Stabilized group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:18:29,384] INFO [GroupCoordinator 1]: Assignment received from leader for group ves-openapi-manager--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:18:33,209] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:18:33,209] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:19:40,378] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:19:40,378] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:19:40,381] INFO [GroupCoordinator 1]: Preparing to rebalance group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-1) (reason: Adding new member clamp-afdca24f-ee18-451b-a7b3-1b05d4b6d3e7 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:19:43,382] INFO [GroupCoordinator 1]: Stabilized group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:19:43,384] INFO [GroupCoordinator 1]: Assignment received from leader for group clamp--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:19:55,030] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:19:55,030] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:19:55,038] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:19:55,038] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:19:55,041] INFO [GroupCoordinator 1]: Preparing to rebalance group 8ef4854d-e52f-4121-b62d-e4195f5166cb--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-28) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-6e4f4cea-0c80-43e3-921a-1fcda12dd6d9 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:19:58,043] INFO [GroupCoordinator 1]: Stabilized group 8ef4854d-e52f-4121-b62d-e4195f5166cb--POLICY-PDP-PAP generation 1 (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:19:58,044] INFO [GroupCoordinator 1]: Assignment received from leader for group 8ef4854d-e52f-4121-b62d-e4195f5166cb--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:19:58,169] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:19:58,170] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:20:37,760] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:20:37,760] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:21:37,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-f3cb2572-f8ad-46a7-9eca-ea569384f87c in group eda2fc80-ae9d-46f3-85fd-5366275760ff--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:21:37,312] INFO [GroupCoordinator 1]: Preparing to rebalance group eda2fc80-ae9d-46f3-85fd-5366275760ff--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-22) (reason: removing member onap-policy-pap-5d9d484bff-z2229-f3cb2572-f8ad-46a7-9eca-ea569384f87c on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:21:37,312] INFO [GroupCoordinator 1]: Group eda2fc80-ae9d-46f3-85fd-5366275760ff--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-22) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:22:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:25:25,582] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:25:25,582] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:27:07,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-6e4f4cea-0c80-43e3-921a-1fcda12dd6d9 in group 8ef4854d-e52f-4121-b62d-e4195f5166cb--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:27:07,313] INFO [GroupCoordinator 1]: Preparing to rebalance group 8ef4854d-e52f-4121-b62d-e4195f5166cb--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-28) (reason: removing member onap-policy-pap-5d9d484bff-z2229-6e4f4cea-0c80-43e3-921a-1fcda12dd6d9 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:27:07,313] INFO [GroupCoordinator 1]: Group 8ef4854d-e52f-4121-b62d-e4195f5166cb--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:27:15,521] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:27:15,522] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:27:15,528] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:27:15,528] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:27:15,531] INFO [GroupCoordinator 1]: Preparing to rebalance group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO in state PreparingRebalance with old generation 0 (__consumer_offsets-28) (reason: Adding new member sdc-COpenSource-Env11-sdnc-dockero-31f8e5f6-cd00-4bc0-b047-027e44750573 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:27:18,532] INFO [GroupCoordinator 1]: Stabilized group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO generation 1 (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:27:18,534] INFO [GroupCoordinator 1]: Assignment received from leader for group sdc-OpenSource-Env1-sdnc-dockero--SDC-DISTR-NOTIF-TOPIC-AUTO for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:30:55,575] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:30:55,575] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:32:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:35:38,023] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:35:38,023] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:36:25,586] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:36:25,586] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:41:54,372] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:41:54,372] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:42:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:45:38,241] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:45:38,241] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:47:24,524] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:47:24,524] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:47:24,531] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:47:24,531] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:47:24,534] INFO [GroupCoordinator 1]: Preparing to rebalance group 166651f2-a128-4ad8-9fd2-ba57da9def88--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-16) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-63351546-0ec0-4895-90bc-ab3f01aff949 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:47:27,535] INFO [GroupCoordinator 1]: Stabilized group 166651f2-a128-4ad8-9fd2-ba57da9def88--POLICY-PDP-PAP generation 1 (__consumer_offsets-16) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:47:27,537] INFO [GroupCoordinator 1]: Assignment received from leader for group 166651f2-a128-4ad8-9fd2-ba57da9def88--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:47:27,652] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:47:27,652] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:52:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 00:52:55,865] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:52:55,865] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:54:37,311] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-63351546-0ec0-4895-90bc-ab3f01aff949 in group 166651f2-a128-4ad8-9fd2-ba57da9def88--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:54:37,312] INFO [GroupCoordinator 1]: Preparing to rebalance group 166651f2-a128-4ad8-9fd2-ba57da9def88--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-16) (reason: removing member onap-policy-pap-5d9d484bff-z2229-63351546-0ec0-4895-90bc-ab3f01aff949 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:54:37,312] INFO [GroupCoordinator 1]: Group 166651f2-a128-4ad8-9fd2-ba57da9def88--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-16) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 00:58:22,936] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:58:22,936] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:58:26,073] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 00:58:26,073] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:02:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 01:04:02,635] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:04:02,635] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:04:02,638] INFO [GroupCoordinator 1]: Preparing to rebalance group f6064c30-57b1-45a7-9a13-b1b0efd51f6d--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-1) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-e687d589-4006-42c4-acf3-e29312f0c0a3 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:04:05,639] INFO [GroupCoordinator 1]: Stabilized group f6064c30-57b1-45a7-9a13-b1b0efd51f6d--POLICY-PDP-PAP generation 1 (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:04:05,641] INFO [GroupCoordinator 1]: Assignment received from leader for group f6064c30-57b1-45a7-9a13-b1b0efd51f6d--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:04:05,755] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:04:05,755] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:09:32,631] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:09:32,632] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:09:32,638] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:09:32,638] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:09:32,640] INFO [GroupCoordinator 1]: Preparing to rebalance group c1601874-ce3a-48b6-b7ef-1e203b4a8fa7--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-19) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-c44ad93c-f367-43d1-8f9e-159aea8cb848 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:09:35,641] INFO [GroupCoordinator 1]: Stabilized group c1601874-ce3a-48b6-b7ef-1e203b4a8fa7--POLICY-PDP-PAP generation 1 (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:09:35,643] INFO [GroupCoordinator 1]: Assignment received from leader for group c1601874-ce3a-48b6-b7ef-1e203b4a8fa7--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:09:35,756] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:09:35,756] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:11:22,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-e687d589-4006-42c4-acf3-e29312f0c0a3 in group f6064c30-57b1-45a7-9a13-b1b0efd51f6d--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:11:22,312] INFO [GroupCoordinator 1]: Preparing to rebalance group f6064c30-57b1-45a7-9a13-b1b0efd51f6d--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-1) (reason: removing member onap-policy-pap-5d9d484bff-z2229-e687d589-4006-42c4-acf3-e29312f0c0a3 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:11:22,313] INFO [GroupCoordinator 1]: Group f6064c30-57b1-45a7-9a13-b1b0efd51f6d--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:12:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 01:15:02,814] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:15:02,814] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:15:05,946] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:15:05,946] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:16:52,311] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-c44ad93c-f367-43d1-8f9e-159aea8cb848 in group c1601874-ce3a-48b6-b7ef-1e203b4a8fa7--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:16:52,312] INFO [GroupCoordinator 1]: Preparing to rebalance group c1601874-ce3a-48b6-b7ef-1e203b4a8fa7--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-19) (reason: removing member onap-policy-pap-5d9d484bff-z2229-c44ad93c-f367-43d1-8f9e-159aea8cb848 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:16:52,312] INFO [GroupCoordinator 1]: Group c1601874-ce3a-48b6-b7ef-1e203b4a8fa7--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-19) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:20:32,741] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:20:32,741] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:20:35,880] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:20:35,880] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:22:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 01:26:02,829] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:26:02,829] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:26:02,832] INFO [GroupCoordinator 1]: Preparing to rebalance group 04bc75dd-a715-4e2d-b5f0-2f44960ce844--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-49) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-a7883e20-91d9-4007-ba30-5559b6c8404f with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:26:05,833] INFO [GroupCoordinator 1]: Stabilized group 04bc75dd-a715-4e2d-b5f0-2f44960ce844--POLICY-PDP-PAP generation 1 (__consumer_offsets-49) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:26:05,834] INFO [GroupCoordinator 1]: Assignment received from leader for group 04bc75dd-a715-4e2d-b5f0-2f44960ce844--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:26:05,950] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:26:05,950] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:30:39,006] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:30:39,006] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:31:34,057] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:31:34,057] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:32:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 01:33:07,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-a7883e20-91d9-4007-ba30-5559b6c8404f in group 04bc75dd-a715-4e2d-b5f0-2f44960ce844--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:33:07,312] INFO [GroupCoordinator 1]: Preparing to rebalance group 04bc75dd-a715-4e2d-b5f0-2f44960ce844--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-49) (reason: removing member onap-policy-pap-5d9d484bff-z2229-a7883e20-91d9-4007-ba30-5559b6c8404f on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:33:07,312] INFO [GroupCoordinator 1]: Group 04bc75dd-a715-4e2d-b5f0-2f44960ce844--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-49) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:37:04,752] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:37:04,752] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:37:04,755] INFO [GroupCoordinator 1]: Preparing to rebalance group df69ab47-af4e-4a35-8046-5d56afca0859--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-28) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-c738fdd9-b6d8-4cbf-8b48-d8caa551e897 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:37:07,757] INFO [GroupCoordinator 1]: Stabilized group df69ab47-af4e-4a35-8046-5d56afca0859--POLICY-PDP-PAP generation 1 (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:37:07,758] INFO [GroupCoordinator 1]: Assignment received from leader for group df69ab47-af4e-4a35-8046-5d56afca0859--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:37:07,871] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:37:07,871] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:42:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 01:42:38,544] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:42:38,544] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:44:22,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-c738fdd9-b6d8-4cbf-8b48-d8caa551e897 in group df69ab47-af4e-4a35-8046-5d56afca0859--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:44:22,312] INFO [GroupCoordinator 1]: Preparing to rebalance group df69ab47-af4e-4a35-8046-5d56afca0859--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-28) (reason: removing member onap-policy-pap-5d9d484bff-z2229-c738fdd9-b6d8-4cbf-8b48-d8caa551e897 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:44:22,312] INFO [GroupCoordinator 1]: Group df69ab47-af4e-4a35-8046-5d56afca0859--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 01:45:39,252] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:45:39,252] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:48:05,020] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:48:05,021] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:48:08,151] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:48:08,151] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:52:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 01:53:45,088] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 01:53:45,088] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:02:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 02:04:30,114] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:04:30,115] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:04:33,250] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:04:33,250] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:05:39,633] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:05:39,633] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:10:02,336] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:10:02,337] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:10:05,474] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:10:05,474] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:12:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 02:15:38,308] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:15:38,308] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:21:03,523] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:21:03,524] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:21:03,527] INFO [GroupCoordinator 1]: Preparing to rebalance group 0859e064-eec0-4c2f-b0f2-dd75614d6814--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-46) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-f107886e-78bf-4c80-a984-781b223c9d07 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 02:21:06,528] INFO [GroupCoordinator 1]: Stabilized group 0859e064-eec0-4c2f-b0f2-dd75614d6814--POLICY-PDP-PAP generation 1 (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 02:21:06,530] INFO [GroupCoordinator 1]: Assignment received from leader for group 0859e064-eec0-4c2f-b0f2-dd75614d6814--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 02:21:06,643] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:21:06,643] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:22:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 02:26:34,294] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:26:34,294] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:28:07,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-f107886e-78bf-4c80-a984-781b223c9d07 in group 0859e064-eec0-4c2f-b0f2-dd75614d6814--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 02:28:07,312] INFO [GroupCoordinator 1]: Preparing to rebalance group 0859e064-eec0-4c2f-b0f2-dd75614d6814--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-46) (reason: removing member onap-policy-pap-5d9d484bff-z2229-f107886e-78bf-4c80-a984-781b223c9d07 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 02:28:07,312] INFO [GroupCoordinator 1]: Group 0859e064-eec0-4c2f-b0f2-dd75614d6814--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-46) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 02:30:40,144] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:30:40,144] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:32:03,439] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:32:03,439] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:32:06,568] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:32:06,568] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:32:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 02:37:36,265] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:37:36,265] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:42:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 02:43:05,855] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:43:05,855] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:48:44,539] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:48:44,539] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:48:44,545] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:48:44,545] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:48:44,548] INFO [GroupCoordinator 1]: Preparing to rebalance group 82c26290-f97b-4c10-9fd8-1f78779e8b05--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-13) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-02b4fa2a-0a33-4a32-8d35-9219dd89b7f9 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 02:48:47,550] INFO [GroupCoordinator 1]: Stabilized group 82c26290-f97b-4c10-9fd8-1f78779e8b05--POLICY-PDP-PAP generation 1 (__consumer_offsets-13) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 02:48:47,552] INFO [GroupCoordinator 1]: Assignment received from leader for group 82c26290-f97b-4c10-9fd8-1f78779e8b05--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 02:48:47,668] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:48:47,668] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:52:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 02:54:24,915] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:54:24,915] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:54:24,918] INFO [GroupCoordinator 1]: Preparing to rebalance group 1126ff8a-62d5-4c38-82dd-e27c061a8606--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-10) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-090a2b6c-376e-4588-96c9-9fbb6e6dcf7e with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 02:54:27,919] INFO [GroupCoordinator 1]: Stabilized group 1126ff8a-62d5-4c38-82dd-e27c061a8606--POLICY-PDP-PAP generation 1 (__consumer_offsets-10) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 02:54:27,921] INFO [GroupCoordinator 1]: Assignment received from leader for group 1126ff8a-62d5-4c38-82dd-e27c061a8606--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 02:54:28,037] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:54:28,037] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:56:22,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-02b4fa2a-0a33-4a32-8d35-9219dd89b7f9 in group 82c26290-f97b-4c10-9fd8-1f78779e8b05--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 02:56:22,313] INFO [GroupCoordinator 1]: Preparing to rebalance group 82c26290-f97b-4c10-9fd8-1f78779e8b05--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-13) (reason: removing member onap-policy-pap-5d9d484bff-z2229-02b4fa2a-0a33-4a32-8d35-9219dd89b7f9 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 02:56:22,313] INFO [GroupCoordinator 1]: Group 82c26290-f97b-4c10-9fd8-1f78779e8b05--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-13) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 02:59:54,430] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:59:54,431] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:59:57,562] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 02:59:57,562] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:01:37,313] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-090a2b6c-376e-4588-96c9-9fbb6e6dcf7e in group 1126ff8a-62d5-4c38-82dd-e27c061a8606--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 03:01:37,313] INFO [GroupCoordinator 1]: Preparing to rebalance group 1126ff8a-62d5-4c38-82dd-e27c061a8606--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-10) (reason: removing member onap-policy-pap-5d9d484bff-z2229-090a2b6c-376e-4588-96c9-9fbb6e6dcf7e on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 03:01:37,313] INFO [GroupCoordinator 1]: Group 1126ff8a-62d5-4c38-82dd-e27c061a8606--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-10) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 03:02:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 03:05:23,722] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:05:23,722] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:05:26,856] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:05:26,856] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:10:51,521] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:10:51,521] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:10:54,647] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:10:54,647] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:12:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 03:16:26,766] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:16:26,766] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:21:53,023] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:21:53,024] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:21:56,152] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:21:56,152] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:22:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 03:27:26,052] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:27:26,052] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:30:41,152] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:30:41,152] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:32:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 03:32:57,752] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:32:57,752] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:38:21,515] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:38:21,515] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:38:21,521] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:38:21,521] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:38:21,524] INFO [GroupCoordinator 1]: Preparing to rebalance group 24ee979a-cc10-48ea-8cce-ef1fa106d756--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-7) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-1a6a4d4a-41b8-4dd3-b92a-dd4c9a85b130 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 03:38:24,525] INFO [GroupCoordinator 1]: Stabilized group 24ee979a-cc10-48ea-8cce-ef1fa106d756--POLICY-PDP-PAP generation 1 (__consumer_offsets-7) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 03:38:24,527] INFO [GroupCoordinator 1]: Assignment received from leader for group 24ee979a-cc10-48ea-8cce-ef1fa106d756--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 03:38:24,639] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:38:24,639] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:42:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 03:44:08,940] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:44:08,940] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:45:41,474] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:45:41,474] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:45:52,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-1a6a4d4a-41b8-4dd3-b92a-dd4c9a85b130 in group 24ee979a-cc10-48ea-8cce-ef1fa106d756--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 03:45:52,313] INFO [GroupCoordinator 1]: Preparing to rebalance group 24ee979a-cc10-48ea-8cce-ef1fa106d756--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-7) (reason: removing member onap-policy-pap-5d9d484bff-z2229-1a6a4d4a-41b8-4dd3-b92a-dd4c9a85b130 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 03:45:52,313] INFO [GroupCoordinator 1]: Group 24ee979a-cc10-48ea-8cce-ef1fa106d756--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-7) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 03:49:34,755] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:49:34,755] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:52:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 03:55:04,766] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:55:04,767] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:55:41,657] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 03:55:41,657] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:00:35,062] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:00:35,062] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:02:07,928] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 04:05:41,865] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:05:41,865] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:06:02,646] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:06:02,646] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:06:05,779] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:06:05,779] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:11:30,915] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:11:30,915] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:11:30,922] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:11:30,922] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:11:30,925] INFO [GroupCoordinator 1]: Preparing to rebalance group 76a7221f-986c-481f-b4d2-e4348f00170a--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-13) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-98c11618-52eb-4288-8328-0b4ac8fae451 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:11:33,926] INFO [GroupCoordinator 1]: Stabilized group 76a7221f-986c-481f-b4d2-e4348f00170a--POLICY-PDP-PAP generation 1 (__consumer_offsets-13) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:11:33,928] INFO [GroupCoordinator 1]: Assignment received from leader for group 76a7221f-986c-481f-b4d2-e4348f00170a--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:11:34,040] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:11:34,040] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:12:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 04:17:04,916] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:17:04,916] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:17:04,919] INFO [GroupCoordinator 1]: Preparing to rebalance group 1ca6cef7-30ba-4ca4-ab61-56cda4a0400a--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-4) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-5319a26f-d2e7-45a0-ad0d-3635bcde3cbe with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:17:07,920] INFO [GroupCoordinator 1]: Stabilized group 1ca6cef7-30ba-4ca4-ab61-56cda4a0400a--POLICY-PDP-PAP generation 1 (__consumer_offsets-4) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:17:07,921] INFO [GroupCoordinator 1]: Assignment received from leader for group 1ca6cef7-30ba-4ca4-ab61-56cda4a0400a--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:17:08,036] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:17:08,036] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:18:37,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-98c11618-52eb-4288-8328-0b4ac8fae451 in group 76a7221f-986c-481f-b4d2-e4348f00170a--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:18:37,312] INFO [GroupCoordinator 1]: Preparing to rebalance group 76a7221f-986c-481f-b4d2-e4348f00170a--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-13) (reason: removing member onap-policy-pap-5d9d484bff-z2229-98c11618-52eb-4288-8328-0b4ac8fae451 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:18:37,312] INFO [GroupCoordinator 1]: Group 76a7221f-986c-481f-b4d2-e4348f00170a--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-13) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:20:42,137] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:20:42,137] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:22:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 04:22:35,171] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:22:35,171] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:24:22,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-5319a26f-d2e7-45a0-ad0d-3635bcde3cbe in group 1ca6cef7-30ba-4ca4-ab61-56cda4a0400a--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:24:22,312] INFO [GroupCoordinator 1]: Preparing to rebalance group 1ca6cef7-30ba-4ca4-ab61-56cda4a0400a--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-4) (reason: removing member onap-policy-pap-5d9d484bff-z2229-5319a26f-d2e7-45a0-ad0d-3635bcde3cbe on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:24:22,312] INFO [GroupCoordinator 1]: Group 1ca6cef7-30ba-4ca4-ab61-56cda4a0400a--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-4) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:28:08,070] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:28:08,070] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:30:42,259] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:30:42,259] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:32:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 04:38:42,125] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:38:42,125] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:38:42,128] INFO [GroupCoordinator 1]: Preparing to rebalance group 81f28179-b6ca-4181-a614-77576f79b9fa--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-37) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-67b19396-2d69-4f3a-b748-e5981fc5e402 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:38:45,130] INFO [GroupCoordinator 1]: Stabilized group 81f28179-b6ca-4181-a614-77576f79b9fa--POLICY-PDP-PAP generation 1 (__consumer_offsets-37) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:38:45,132] INFO [GroupCoordinator 1]: Assignment received from leader for group 81f28179-b6ca-4181-a614-77576f79b9fa--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:38:45,248] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:38:45,249] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:40:42,465] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:40:42,465] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:42:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 04:44:24,025] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:44:24,025] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:44:27,149] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:44:27,149] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:46:07,314] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-67b19396-2d69-4f3a-b748-e5981fc5e402 in group 81f28179-b6ca-4181-a614-77576f79b9fa--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:46:07,314] INFO [GroupCoordinator 1]: Preparing to rebalance group 81f28179-b6ca-4181-a614-77576f79b9fa--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-37) (reason: removing member onap-policy-pap-5d9d484bff-z2229-67b19396-2d69-4f3a-b748-e5981fc5e402 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:46:07,314] INFO [GroupCoordinator 1]: Group 81f28179-b6ca-4181-a614-77576f79b9fa--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-37) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:49:53,045] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:49:53,045] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:49:53,049] INFO [GroupCoordinator 1]: Preparing to rebalance group e945c7f4-dd42-4605-98ff-2a09d6c0f391--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-34) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-bc655bdd-5ac7-4602-bca5-87365d4dd2da with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:49:56,053] INFO [GroupCoordinator 1]: Stabilized group e945c7f4-dd42-4605-98ff-2a09d6c0f391--POLICY-PDP-PAP generation 1 (__consumer_offsets-34) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:49:56,055] INFO [GroupCoordinator 1]: Assignment received from leader for group e945c7f4-dd42-4605-98ff-2a09d6c0f391--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:49:56,170] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:49:56,170] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:50:42,598] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:50:42,598] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:52:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 04:55:27,976] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:55:27,976] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 04:57:07,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-bc655bdd-5ac7-4602-bca5-87365d4dd2da in group e945c7f4-dd42-4605-98ff-2a09d6c0f391--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:57:07,312] INFO [GroupCoordinator 1]: Preparing to rebalance group e945c7f4-dd42-4605-98ff-2a09d6c0f391--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-34) (reason: removing member onap-policy-pap-5d9d484bff-z2229-bc655bdd-5ac7-4602-bca5-87365d4dd2da on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 04:57:07,312] INFO [GroupCoordinator 1]: Group e945c7f4-dd42-4605-98ff-2a09d6c0f391--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-34) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:00:42,802] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:00:42,802] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:00:53,148] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:00:53,148] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:00:53,152] INFO [GroupCoordinator 1]: Preparing to rebalance group 37ef5749-3f48-4298-87ec-04a904bf32cc--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-43) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-bed3eba5-2d87-4897-bd11-36c9e139d2e5 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:00:56,176] INFO [GroupCoordinator 1]: Stabilized group 37ef5749-3f48-4298-87ec-04a904bf32cc--POLICY-PDP-PAP generation 1 (__consumer_offsets-43) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:00:56,178] INFO [GroupCoordinator 1]: Assignment received from leader for group 37ef5749-3f48-4298-87ec-04a904bf32cc--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:00:56,295] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:00:56,295] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:02:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 05:06:23,721] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:06:23,721] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:06:26,852] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:06:26,852] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:08:07,313] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-bed3eba5-2d87-4897-bd11-36c9e139d2e5 in group 37ef5749-3f48-4298-87ec-04a904bf32cc--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:08:07,313] INFO [GroupCoordinator 1]: Preparing to rebalance group 37ef5749-3f48-4298-87ec-04a904bf32cc--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-43) (reason: removing member onap-policy-pap-5d9d484bff-z2229-bed3eba5-2d87-4897-bd11-36c9e139d2e5 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:08:07,313] INFO [GroupCoordinator 1]: Group 37ef5749-3f48-4298-87ec-04a904bf32cc--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-43) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:11:54,431] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:11:54,431] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:11:54,436] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:11:54,436] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:11:54,439] INFO [GroupCoordinator 1]: Preparing to rebalance group e7267384-b585-4e07-b889-c9ad58cb166b--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-28) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-f894dfc0-218c-41d0-9fcd-a5c64ccedd42 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:11:57,440] INFO [GroupCoordinator 1]: Stabilized group e7267384-b585-4e07-b889-c9ad58cb166b--POLICY-PDP-PAP generation 1 (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:11:57,442] INFO [GroupCoordinator 1]: Assignment received from leader for group e7267384-b585-4e07-b889-c9ad58cb166b--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:11:57,552] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:11:57,552] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:12:07,928] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 05:17:27,443] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:17:27,443] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:19:07,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-f894dfc0-218c-41d0-9fcd-a5c64ccedd42 in group e7267384-b585-4e07-b889-c9ad58cb166b--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:19:07,313] INFO [GroupCoordinator 1]: Preparing to rebalance group e7267384-b585-4e07-b889-c9ad58cb166b--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-28) (reason: removing member onap-policy-pap-5d9d484bff-z2229-f894dfc0-218c-41d0-9fcd-a5c64ccedd42 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:19:07,313] INFO [GroupCoordinator 1]: Group e7267384-b585-4e07-b889-c9ad58cb166b--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:22:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 05:22:53,615] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:22:53,615] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:22:53,618] INFO [GroupCoordinator 1]: Preparing to rebalance group a70f1fe3-f859-4b3d-a25e-bde29ea272f6--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-7) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-f27a1680-f9ad-4113-a685-982d723e6692 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:22:56,619] INFO [GroupCoordinator 1]: Stabilized group a70f1fe3-f859-4b3d-a25e-bde29ea272f6--POLICY-PDP-PAP generation 1 (__consumer_offsets-7) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:22:56,621] INFO [GroupCoordinator 1]: Assignment received from leader for group a70f1fe3-f859-4b3d-a25e-bde29ea272f6--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:22:56,734] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:22:56,734] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:25:43,293] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:25:43,293] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:28:26,121] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:28:26,121] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:28:26,125] INFO [GroupCoordinator 1]: Preparing to rebalance group f008ea6b-0d8c-4f32-9286-055553912a03--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-13) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-da0114e0-ef53-4c10-8bda-4a42dc5aa09c with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:28:29,125] INFO [GroupCoordinator 1]: Stabilized group f008ea6b-0d8c-4f32-9286-055553912a03--POLICY-PDP-PAP generation 1 (__consumer_offsets-13) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:28:29,127] INFO [GroupCoordinator 1]: Assignment received from leader for group f008ea6b-0d8c-4f32-9286-055553912a03--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:28:29,239] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:28:29,239] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:30:07,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-f27a1680-f9ad-4113-a685-982d723e6692 in group a70f1fe3-f859-4b3d-a25e-bde29ea272f6--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:30:07,312] INFO [GroupCoordinator 1]: Preparing to rebalance group a70f1fe3-f859-4b3d-a25e-bde29ea272f6--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-7) (reason: removing member onap-policy-pap-5d9d484bff-z2229-f27a1680-f9ad-4113-a685-982d723e6692 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:30:07,312] INFO [GroupCoordinator 1]: Group a70f1fe3-f859-4b3d-a25e-bde29ea272f6--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-7) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:32:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 05:34:08,052] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:34:08,052] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:35:43,448] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:35:43,448] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:35:52,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-da0114e0-ef53-4c10-8bda-4a42dc5aa09c in group f008ea6b-0d8c-4f32-9286-055553912a03--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:35:52,312] INFO [GroupCoordinator 1]: Preparing to rebalance group f008ea6b-0d8c-4f32-9286-055553912a03--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-13) (reason: removing member onap-policy-pap-5d9d484bff-z2229-da0114e0-ef53-4c10-8bda-4a42dc5aa09c on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:35:52,312] INFO [GroupCoordinator 1]: Group f008ea6b-0d8c-4f32-9286-055553912a03--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-13) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:39:37,221] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:39:37,221] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:39:37,225] INFO [GroupCoordinator 1]: Preparing to rebalance group 1df9a251-daa9-4091-aab7-7028845fca90--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-25) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-cfad1ab2-8a12-4788-86cb-92e8619e5e4b with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:39:40,225] INFO [GroupCoordinator 1]: Stabilized group 1df9a251-daa9-4091-aab7-7028845fca90--POLICY-PDP-PAP generation 1 (__consumer_offsets-25) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:39:40,227] INFO [GroupCoordinator 1]: Assignment received from leader for group 1df9a251-daa9-4091-aab7-7028845fca90--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:39:40,340] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:39:40,340] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:42:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 05:45:14,625] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:45:14,625] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:45:14,628] INFO [GroupCoordinator 1]: Preparing to rebalance group 3f72d21a-dc99-4b59-856a-2544dfa95515--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-43) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-a27cde8f-cea0-4bf4-9c74-39a8b3da4d72 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:45:17,629] INFO [GroupCoordinator 1]: Stabilized group 3f72d21a-dc99-4b59-856a-2544dfa95515--POLICY-PDP-PAP generation 1 (__consumer_offsets-43) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:45:17,631] INFO [GroupCoordinator 1]: Assignment received from leader for group 3f72d21a-dc99-4b59-856a-2544dfa95515--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:45:17,741] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:45:17,742] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:46:52,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-cfad1ab2-8a12-4788-86cb-92e8619e5e4b in group 1df9a251-daa9-4091-aab7-7028845fca90--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:46:52,313] INFO [GroupCoordinator 1]: Preparing to rebalance group 1df9a251-daa9-4091-aab7-7028845fca90--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-25) (reason: removing member onap-policy-pap-5d9d484bff-z2229-cfad1ab2-8a12-4788-86cb-92e8619e5e4b on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:46:52,313] INFO [GroupCoordinator 1]: Group 1df9a251-daa9-4091-aab7-7028845fca90--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-25) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:50:43,675] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:50:43,675] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:50:43,750] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:50:43,750] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:50:43,753] INFO [GroupCoordinator 1]: Preparing to rebalance group 2263f43a-43b1-4235-a19f-965da28ad8d6--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-13) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-4ad68248-d88a-4639-b883-cc7369905fcb with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:50:46,754] INFO [GroupCoordinator 1]: Stabilized group 2263f43a-43b1-4235-a19f-965da28ad8d6--POLICY-PDP-PAP generation 1 (__consumer_offsets-13) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:50:46,756] INFO [GroupCoordinator 1]: Assignment received from leader for group 2263f43a-43b1-4235-a19f-965da28ad8d6--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:50:46,869] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:50:46,869] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:52:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 05:52:37,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-a27cde8f-cea0-4bf4-9c74-39a8b3da4d72 in group 3f72d21a-dc99-4b59-856a-2544dfa95515--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:52:37,312] INFO [GroupCoordinator 1]: Preparing to rebalance group 3f72d21a-dc99-4b59-856a-2544dfa95515--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-43) (reason: removing member onap-policy-pap-5d9d484bff-z2229-a27cde8f-cea0-4bf4-9c74-39a8b3da4d72 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:52:37,313] INFO [GroupCoordinator 1]: Group 3f72d21a-dc99-4b59-856a-2544dfa95515--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-43) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:56:18,951] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:56:18,951] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 05:57:52,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-4ad68248-d88a-4639-b883-cc7369905fcb in group 2263f43a-43b1-4235-a19f-965da28ad8d6--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:57:52,312] INFO [GroupCoordinator 1]: Preparing to rebalance group 2263f43a-43b1-4235-a19f-965da28ad8d6--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-13) (reason: removing member onap-policy-pap-5d9d484bff-z2229-4ad68248-d88a-4639-b883-cc7369905fcb on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 05:57:52,312] INFO [GroupCoordinator 1]: Group 2263f43a-43b1-4235-a19f-965da28ad8d6--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-13) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:00:43,891] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:00:43,891] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:01:46,551] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:01:46,551] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:01:46,554] INFO [GroupCoordinator 1]: Preparing to rebalance group 2cff32ca-57fa-4e03-a40b-2def7d1c907a--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-10) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-fb5ea1b7-d21f-4781-aa5f-47ccf1c739e9 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:01:49,556] INFO [GroupCoordinator 1]: Stabilized group 2cff32ca-57fa-4e03-a40b-2def7d1c907a--POLICY-PDP-PAP generation 1 (__consumer_offsets-10) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:01:49,558] INFO [GroupCoordinator 1]: Assignment received from leader for group 2cff32ca-57fa-4e03-a40b-2def7d1c907a--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:01:49,673] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:01:49,673] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:02:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 06:07:14,649] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:07:14,649] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:07:17,772] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:07:17,772] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:08:52,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-fb5ea1b7-d21f-4781-aa5f-47ccf1c739e9 in group 2cff32ca-57fa-4e03-a40b-2def7d1c907a--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:08:52,312] INFO [GroupCoordinator 1]: Preparing to rebalance group 2cff32ca-57fa-4e03-a40b-2def7d1c907a--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-10) (reason: removing member onap-policy-pap-5d9d484bff-z2229-fb5ea1b7-d21f-4781-aa5f-47ccf1c739e9 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:08:52,312] INFO [GroupCoordinator 1]: Group 2cff32ca-57fa-4e03-a40b-2def7d1c907a--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-10) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:10:44,055] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:10:44,055] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:12:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 06:12:43,925] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:12:43,925] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:12:43,931] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:12:43,931] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:12:43,934] INFO [GroupCoordinator 1]: Preparing to rebalance group 79192f3d-048c-4865-8b56-18455f1cd2d1--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-31) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-39e7795b-8668-468e-aaa3-c3ae3d797a2a with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:12:46,934] INFO [GroupCoordinator 1]: Stabilized group 79192f3d-048c-4865-8b56-18455f1cd2d1--POLICY-PDP-PAP generation 1 (__consumer_offsets-31) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:12:46,936] INFO [GroupCoordinator 1]: Assignment received from leader for group 79192f3d-048c-4865-8b56-18455f1cd2d1--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:12:47,047] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:12:47,047] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:18:12,743] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:18:12,743] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:18:12,746] INFO [GroupCoordinator 1]: Preparing to rebalance group a9ee43e4-9bea-4b44-a72f-b194202b2996--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-28) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-a0b280f3-7e87-4302-88f6-005cf747ecb6 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:18:15,746] INFO [GroupCoordinator 1]: Stabilized group a9ee43e4-9bea-4b44-a72f-b194202b2996--POLICY-PDP-PAP generation 1 (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:18:15,748] INFO [GroupCoordinator 1]: Assignment received from leader for group a9ee43e4-9bea-4b44-a72f-b194202b2996--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:18:15,857] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:18:15,857] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:19:52,311] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-39e7795b-8668-468e-aaa3-c3ae3d797a2a in group 79192f3d-048c-4865-8b56-18455f1cd2d1--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:19:52,311] INFO [GroupCoordinator 1]: Preparing to rebalance group 79192f3d-048c-4865-8b56-18455f1cd2d1--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-31) (reason: removing member onap-policy-pap-5d9d484bff-z2229-39e7795b-8668-468e-aaa3-c3ae3d797a2a on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:19:52,311] INFO [GroupCoordinator 1]: Group 79192f3d-048c-4865-8b56-18455f1cd2d1--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-31) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:22:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 06:23:59,547] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:23:59,547] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:25:37,312] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-a0b280f3-7e87-4302-88f6-005cf747ecb6 in group a9ee43e4-9bea-4b44-a72f-b194202b2996--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:25:37,312] INFO [GroupCoordinator 1]: Preparing to rebalance group a9ee43e4-9bea-4b44-a72f-b194202b2996--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-28) (reason: removing member onap-policy-pap-5d9d484bff-z2229-a0b280f3-7e87-4302-88f6-005cf747ecb6 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:25:37,313] INFO [GroupCoordinator 1]: Group a9ee43e4-9bea-4b44-a72f-b194202b2996--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-28) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:29:32,636] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:29:32,636] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:29:32,640] INFO [GroupCoordinator 1]: Preparing to rebalance group e808a695-77bb-4fe3-97e0-b478c04df22b--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-22) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-421a9710-f3d2-447f-8fc6-507005a565ad with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:29:35,640] INFO [GroupCoordinator 1]: Stabilized group e808a695-77bb-4fe3-97e0-b478c04df22b--POLICY-PDP-PAP generation 1 (__consumer_offsets-22) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:29:35,642] INFO [GroupCoordinator 1]: Assignment received from leader for group e808a695-77bb-4fe3-97e0-b478c04df22b--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:29:35,755] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:29:35,755] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:30:44,441] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:30:44,441] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:32:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 06:35:05,873] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:35:05,873] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:36:37,311] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-421a9710-f3d2-447f-8fc6-507005a565ad in group e808a695-77bb-4fe3-97e0-b478c04df22b--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:36:37,312] INFO [GroupCoordinator 1]: Preparing to rebalance group e808a695-77bb-4fe3-97e0-b478c04df22b--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-22) (reason: removing member onap-policy-pap-5d9d484bff-z2229-421a9710-f3d2-447f-8fc6-507005a565ad on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:36:37,312] INFO [GroupCoordinator 1]: Group e808a695-77bb-4fe3-97e0-b478c04df22b--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-22) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:40:37,522] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:40:37,522] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:40:37,526] INFO [GroupCoordinator 1]: Preparing to rebalance group 72c67338-8ea5-4b8f-b007-e2788cd3789e--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-1) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-c5dc516b-7934-439a-b30a-745f58f3bb28 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:40:40,527] INFO [GroupCoordinator 1]: Stabilized group 72c67338-8ea5-4b8f-b007-e2788cd3789e--POLICY-PDP-PAP generation 1 (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:40:40,529] INFO [GroupCoordinator 1]: Assignment received from leader for group 72c67338-8ea5-4b8f-b007-e2788cd3789e--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:40:40,640] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:40:40,640] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:40:44,633] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:40:44,634] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:42:07,929] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2021-06-18 06:46:12,957] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:46:12,957] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:46:12,963] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:46:12,964] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:46:12,967] INFO [GroupCoordinator 1]: Preparing to rebalance group 730f2d3e-5b7f-4c4c-b927-fa063e6bae52--POLICY-PDP-PAP in state PreparingRebalance with old generation 0 (__consumer_offsets-31) (reason: Adding new member onap-policy-pap-5d9d484bff-z2229-6746fda3-8a9c-481f-ad28-8d8d6490d374 with group instanceid None) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:46:15,967] INFO [GroupCoordinator 1]: Stabilized group 730f2d3e-5b7f-4c4c-b927-fa063e6bae52--POLICY-PDP-PAP generation 1 (__consumer_offsets-31) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:46:15,971] INFO [GroupCoordinator 1]: Assignment received from leader for group 730f2d3e-5b7f-4c4c-b927-fa063e6bae52--POLICY-PDP-PAP for generation 1 (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:46:16,084] INFO ^Event received with username admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:46:16,084] INFO by passes the authentication for the admin admin (org.onap.dmaap.commonauth.kafka.base.authorization.Cadi3AAFProvider) [2021-06-18 06:47:52,311] INFO [GroupCoordinator 1]: Member onap-policy-pap-5d9d484bff-z2229-c5dc516b-7934-439a-b30a-745f58f3bb28 in group 72c67338-8ea5-4b8f-b007-e2788cd3789e--POLICY-PDP-PAP has left, removing it from the group (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:47:52,312] INFO [GroupCoordinator 1]: Preparing to rebalance group 72c67338-8ea5-4b8f-b007-e2788cd3789e--POLICY-PDP-PAP in state PreparingRebalance with old generation 1 (__consumer_offsets-1) (reason: removing member onap-policy-pap-5d9d484bff-z2229-c5dc516b-7934-439a-b30a-745f58f3bb28 on LeaveGroup) (kafka.coordinator.group.GroupCoordinator) [2021-06-18 06:47:52,312] INFO [GroupCoordinator 1]: Group 72c67338-8ea5-4b8f-b007-e2788cd3789e--POLICY-PDP-PAP with generation 2 is now empty (__consumer_offsets-1) (kafka.coordinator.group.GroupCoordinator)