目录
定位启动项
需要解决的问题:已存在kafka镜像, 需要配置主题的过期时间。
获取容器ID
[root@localhost files]# docker container ls
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1a5a10f50ee9 nginx "nginx -g 'daemon of…" 28 minutes ago Up 28 minutes 0.0.0.0:80->80/tcp nginx
e253113fa975 data_engine_v2 "./entry" 28 minutes ago Up 28 minutes 0.0.0.0:8999->8999/tcp data_engine
396085fec743 kafka "start-kafka.sh" 28 minutes ago Up 28 minutes 0.0.0.0:9092->9092/tcp kafka
18aad55b9f7c php:encrypt-safe "docker-php-entrypoi…" 28 minutes ago Up 28 minutes 0.0.0.0:9000->9000/tcp php-fpm
6a7baf75c448 mysql "docker-entrypoint.s…" 28 minutes ago Up 28 minutes 0.0.0.0:3306->3306/tcp, 33060/tcp mysql-db
20d864a50688 es:6.3.2 "/usr/local/bin/dock…" 28 minutes ago Up 28 minutes 0.0.0.0:9200->9200/tcp, 9300/tcp elasticsearch
89117b4ab8e2 zookeeper "/bin/sh -c '/usr/sb…" 28 minutes ago Up 28 minutes 22/tcp, 2888/tcp, 3888/tcp, 0.0.0.0:2181->2181/tcp zookeeper
c438951bf60d redis "docker-entrypoint.s…" 28 minutes ago Up 28 minutes 0.0.0.0:6379->6379/tcp redis-db
f2cb059a1383 mongo "docker-entrypoint.s…" 28 minutes ago Up 28 minutes 0.0.0.0:27017->27017/tcp mongodb
获取容器信息 "Path": "start-kafka.sh",
[root@localhost files]# docker inspect 396085fec743
[
{
"Id": "396085fec743c315560f73575f4cae477f122c4e9ebc69fd097e3385b9b0e27b",
"Created": "2020-04-10T07:38:01.568696529Z",
"Path": "start-kafka.sh",
"Args": [],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 7583,
"ExitCode": 0,
"Error": "",
"StartedAt": "2020-04-10T07:38:03.615514944Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:988f4a6ca13cd52a7407932aa36b41deb27a5352fab5ce4e3a0fc1647d5ea588",
"ResolvConfPath": "/var/lib/docker/containers/396085fec743c315560f73575f4cae477f122c4e9ebc69fd097e3385b9b0e27b/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/396085fec743c315560f73575f4cae477f122c4e9ebc69fd097e3385b9b0e27b/hostname",
"HostsPath": "/var/lib/docker/containers/396085fec743c315560f73575f4cae477f122c4e9ebc69fd097e3385b9b0e27b/hosts",
"LogPath": "/var/lib/docker/containers/396085fec743c315560f73575f4cae477f122c4e9ebc69fd097e3385b9b0e27b/396085fec743c315560f73575f4cae477f122c4e9ebc69fd097e3385b9b0e27b-json.log",
"Name": "/kafka",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/etc/localtime:/etc/localtime:rw",
"/home/geagle/deploy/box/files/kafka:/kafka:rw"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "files_my-net",
"PortBindings": {
"9092/tcp": [
{
"HostIp": "",
"HostPort": "9092"
}
]
},
"RestartPolicy": {
"Name": "always",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": [],
"CapAdd": null,
"CapDrop": null,
"Dns": null,
"DnsOptions": null,
"DnsSearch": null,
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "shareable",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"label=disable"
],
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"ConsoleSize": [
0,
0
],
"Isolation": "",
"CpuShares": 0,
"Memory": 0,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": null,
"BlkioDeviceReadBps": null,
"BlkioDeviceWriteBps": null,
"BlkioDeviceReadIOps": null,
"BlkioDeviceWriteIOps": null,
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": null,
"DeviceCgroupRules": null,
"DiskQuota": 0,
"KernelMemory": 0,
"MemoryReservation": 0,
"MemorySwap": 0,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": 0,
"Ulimits": null,
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/75d9e51a679fa23e176456d0fa49b8069f90631b8ef9bb869ca5cb81dd235bd3-init/diff:/var/lib/docker/overlay2/bebc0605cbef69d6cf36a64a5834e4b686ebfc7ca25c0236533899ee8ee51033/diff:/var/lib/docker/overlay2/4d9d84e80770cc69f3cc0d7b39088c11a88c760ce1cf8f2f03ce525cde8d6a2e/diff:/var/lib/docker/overlay2/905eb7221ce54bf4fad87cc9b97e14e231a7bc91faf88ae79a841281cacc12d8/diff:/var/lib/docker/overlay2/37e5ded59b83a802f0aee52b1fdd728773c456c3780bc8e767d87b7a2960dd1f/diff:/var/lib/docker/overlay2/0eea6ffeecde2607379a4a3d4ab7e271d5d9ba15e52117007ee917468a273be0/diff:/var/lib/docker/overlay2/a422bf7216c1fb7a0d270aa50d62e35d8a8c293f95d5f88eee298be831a7ba96/diff",
"MergedDir": "/var/lib/docker/overlay2/75d9e51a679fa23e176456d0fa49b8069f90631b8ef9bb869ca5cb81dd235bd3/merged",
"UpperDir": "/var/lib/docker/overlay2/75d9e51a679fa23e176456d0fa49b8069f90631b8ef9bb869ca5cb81dd235bd3/diff",
"WorkDir": "/var/lib/docker/overlay2/75d9e51a679fa23e176456d0fa49b8069f90631b8ef9bb869ca5cb81dd235bd3/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/etc/localtime",
"Destination": "/etc/localtime",
"Mode": "rw",
"RW": true,
"Propagation": "rprivate"
},
{
"Type": "bind",
"Source": "/home/geagle/deploy/box/files/kafka",
"Destination": "/kafka",
"Mode": "rw",
"RW": true,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "396085fec743",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"9092/tcp": {}
},
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"KAFKA_ADVERTISED_HOST_NAME=kafka",
"KAFKA_CREATE_TOPICS=td_skyeye_property:5:1",
"KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181",
"KAFKA_BROKER_ID=1",
"KAFKA_MESSAGE_MAX_BYTES=20000000",
"KAFKA_AUTO_CREATE_TOPICS_ENABLE=true",
"KAFKA_LOG_RETENTION_MS=300000",
"KAFKA_LOG_DIRS=/kafka/kafka-logs-df49ee89e690",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/jvm/java-1.8-openjdk/jre/bin:/usr/lib/jvm/java-1.8-openjdk/bin:/opt/kafka/bin",
"LANG=C.UTF-8",
"JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk/jre",
"JAVA_VERSION=8u212",
"JAVA_ALPINE_VERSION=8.212.04-r0",
"KAFKA_VERSION=2.3.0",
"SCALA_VERSION=2.12",
"KAFKA_HOME=/opt/kafka",
"GLIBC_VERSION=2.29-r0"
],
"Cmd": [
"start-kafka.sh"
],
"ArgsEscaped": true,
"Image": "kafka",
"Volumes": {
"/etc/localtime": {},
"/kafka": {}
},
"WorkingDir": "",
"Entrypoint": null,
"OnBuild": null,
"Labels": {
"com.docker.compose.config-hash": "9d5650f9b09f9c4cce9f9dc3a981a2376b3ed60d1f97a17a08d0aadc9675e3cf",
"com.docker.compose.container-number": "1",
"com.docker.compose.oneoff": "False",
"com.docker.compose.project": "files",
"com.docker.compose.service": "kafka",
"com.docker.compose.version": "1.24.1",
"maintainer": "wurstmeister",
"org.label-schema.build-date": "2019-07-11T14:41:43Z",
"org.label-schema.description": "Apache Kafka",
"org.label-schema.name": "kafka",
"org.label-schema.schema-version": "1.0",
"org.label-schema.vcs-ref": "37f85717412018870328fc9f02ba6a34afa5ff50",
"org.label-schema.vcs-url": "https://github.com/wurstmeister/kafka-docker",
"org.label-schema.version": "2.12_2.3.0"
}
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "9f002d536ef1fd67529a5c238357be51225bdadf7fb6e75c8fa5c95147088be4",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {
"9092/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "9092"
}
]
},
"SandboxKey": "/var/run/docker/netns/9f002d536ef1",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"files_my-net": {
"IPAMConfig": null,
"Links": null,
"Aliases": [
"396085fec743",
"kafka"
],
"NetworkID": "5e00295c96ba4654f28ebca02cbeab8ad67ed8c19fd44c26047e5d7951e7df68",
"EndpointID": "17965be9f51faccd238ccdb5f2db5e5ec7d6cb2049ddce1f8ce8c78ace7d373b",
"Gateway": "172.18.0.1",
"IPAddress": "172.18.0.7",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:12:00:07",
"DriverOpts": null
}
}
}
}
]
查看启动项
查看启动的脚本:
[root@localhost files]# docker exec -it 396085fec743 /bin/bash
bash-4.4# find / -name start-kafka.sh
/usr/bin/start-kafka.sh
bash-4.4# cat /usr/bin/start-kafka.sh
#!/bin/bash -e
# Allow specific kafka versions to perform any unique bootstrap operations
OVERRIDE_FILE="/opt/overrides/${KAFKA_VERSION}.sh"
if [[ -x "$OVERRIDE_FILE" ]]; then
echo "Executing override file $OVERRIDE_FILE"
eval "$OVERRIDE_FILE"
fi
# Store original IFS config, so we can restore it at various stages
ORIG_IFS=$IFS
if [[ -z "$KAFKA_ZOOKEEPER_CONNECT" ]]; then
echo "ERROR: missing mandatory config: KAFKA_ZOOKEEPER_CONNECT"
exit 1
fi
if [[ -z "$KAFKA_PORT" ]]; then
export KAFKA_PORT=9092
fi
create-topics.sh &
unset KAFKA_CREATE_TOPICS
if [[ -z "$KAFKA_ADVERTISED_PORT" && \
-z "$KAFKA_LISTENERS" && \
-z "$KAFKA_ADVERTISED_LISTENERS" && \
-S /var/run/docker.sock ]]; then
KAFKA_ADVERTISED_PORT=$(docker port "$(hostname)" $KAFKA_PORT | sed -r 's/.*:(.*)/\1/g')
export KAFKA_ADVERTISED_PORT
fi
if [[ -z "$KAFKA_BROKER_ID" ]]; then
if [[ -n "$BROKER_ID_COMMAND" ]]; then
KAFKA_BROKER_ID=$(eval "$BROKER_ID_COMMAND")
export KAFKA_BROKER_ID
else
# By default auto allocate broker ID
export KAFKA_BROKER_ID=-1
fi
fi
if [[ -z "$KAFKA_LOG_DIRS" ]]; then
export KAFKA_LOG_DIRS="/kafka/kafka-logs-$HOSTNAME"
fi
if [[ -n "$KAFKA_HEAP_OPTS" ]]; then
sed -r -i 's/(export KAFKA_HEAP_OPTS)="(.*)"/\1="'"$KAFKA_HEAP_OPTS"'"/g' "$KAFKA_HOME/bin/kafka-server-start.sh"
unset KAFKA_HEAP_OPTS
fi
if [[ -n "$HOSTNAME_COMMAND" ]]; then
HOSTNAME_VALUE=$(eval "$HOSTNAME_COMMAND")
# Replace any occurences of _{HOSTNAME_COMMAND} with the value
IFS=$'\n'
for VAR in $(env); do
if [[ $VAR =~ ^KAFKA_ && "$VAR" =~ "_{HOSTNAME_COMMAND}" ]]; then
eval "export ${VAR//_\{HOSTNAME_COMMAND\}/$HOSTNAME_VALUE}"
fi
done
IFS=$ORIG_IFS
fi
if [[ -n "$PORT_COMMAND" ]]; then
PORT_VALUE=$(eval "$PORT_COMMAND")
# Replace any occurences of _{PORT_COMMAND} with the value
IFS=$'\n'
for VAR in $(env); do
if [[ $VAR =~ ^KAFKA_ && "$VAR" =~ "_{PORT_COMMAND}" ]]; then
eval "export ${VAR//_\{PORT_COMMAND\}/$PORT_VALUE}"
fi
done
IFS=$ORIG_IFS
fi
if [[ -n "$RACK_COMMAND" && -z "$KAFKA_BROKER_RACK" ]]; then
KAFKA_BROKER_RACK=$(eval "$RACK_COMMAND")
export KAFKA_BROKER_RACK
fi
# Try and configure minimal settings or exit with error if there isn't enough information
if [[ -z "$KAFKA_ADVERTISED_HOST_NAME$KAFKA_LISTENERS" ]]; then
if [[ -n "$KAFKA_ADVERTISED_LISTENERS" ]]; then
echo "ERROR: Missing environment variable KAFKA_LISTENERS. Must be specified when using KAFKA_ADVERTISED_LISTENERS"
exit 1
elif [[ -z "$HOSTNAME_VALUE" ]]; then
echo "ERROR: No listener or advertised hostname configuration provided in environment."
echo " Please define KAFKA_LISTENERS / (deprecated) KAFKA_ADVERTISED_HOST_NAME"
exit 1
fi
# Maintain existing behaviour
# If HOSTNAME_COMMAND is provided, set that to the advertised.host.name value if listeners are not defined.
export KAFKA_ADVERTISED_HOST_NAME="$HOSTNAME_VALUE"
fi
#Issue newline to config file in case there is not one already
echo "" >> "$KAFKA_HOME/config/server.properties"
(
function updateConfig() {
key=$1
value=$2
file=$3
# Omit $value here, in case there is sensitive information
echo "[Configuring] '$key' in '$file'"
# If config exists in file, replace it. Otherwise, append to file.
if grep -E -q "^#?$key=" "$file"; then
sed -r -i "s@^#?$key=.*@$key=$value@g" "$file" #note that no config values may contain an '@' char
else
echo "$key=$value" >> "$file"
fi
}
# Fixes #312
# KAFKA_VERSION + KAFKA_HOME + grep -rohe KAFKA[A-Z0-0_]* /opt/kafka/bin | sort | uniq | tr '\n' '|'
EXCLUSIONS="|KAFKA_VERSION|KAFKA_HOME|KAFKA_DEBUG|KAFKA_GC_LOG_OPTS|KAFKA_HEAP_OPTS|KAFKA_JMX_OPTS|KAFKA_JVM_PERFORMANCE_OPTS|KAFKA_LOG|KAFKA_OPTS|"
# Read in env as a new-line separated array. This handles the case of env variables have spaces and/or carriage returns. See #313
IFS=$'\n'
for VAR in $(env)
do
env_var=$(echo "$VAR" | cut -d= -f1)
if [[ "$EXCLUSIONS" = *"|$env_var|"* ]]; then
echo "Excluding $env_var from broker config"
continue
fi
if [[ $env_var =~ ^KAFKA_ ]]; then
kafka_name=$(echo "$env_var" | cut -d_ -f2- | tr '[:upper:]' '[:lower:]' | tr _ .)
updateConfig "$kafka_name" "${!env_var}" "$KAFKA_HOME/config/server.properties"
fi
if [[ $env_var =~ ^LOG4J_ ]]; then
log4j_name=$(echo "$env_var" | tr '[:upper:]' '[:lower:]' | tr _ .)
updateConfig "$log4j_name" "${!env_var}" "$KAFKA_HOME/config/log4j.properties"
fi
done
)
if [[ -n "$CUSTOM_INIT_SCRIPT" ]] ; then
eval "$CUSTOM_INIT_SCRIPT"
fi
exec "$KAFKA_HOME/bin/kafka-server-start.sh" "$KAFKA_HOME/config/server.properties"
bash-4.4#
发现脚本自动提取配置的环境变量将之格式化后写入到配置文件 :
if [[ $env_var =~ ^KAFKA_ ]]; then
kafka_name=$(echo "$env_var" | cut -d_ -f2- | tr '[:upper:]' '[:lower:]' | tr _ .)
updateConfig "$kafka_name" "${!env_var}" "$KAFKA_HOME/config/server.properties"
fi
修改docker-compose.yml
kafka:
image: harbor.ops.360es.cn/ytc_cloud_scan/box/kafka
privileged: true
ports:
- "9092:9092"
restart: always
environment:
KAFKA_ADVERTISED_HOST_NAME: kafka
KAFKA_CREATE_TOPICS: "td_skyeye_property:5:1"
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_BROKER_ID: 1
KAFKA_MESSAGE_MAX_BYTES: 20000000
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
KAFKA_LOG_RETENTION_MS: 300000
KAFKA_LOG_DIRS: /kafka/kafka-logs-df49ee89e690
container_name: kafka
volumes:
- ./kafka:/kafka
- /etc/localtime:/etc/localtime
networks:
- my-net
depends_on:
- zookeeper
docker-compose down
后 docker-compose up -d
使文件修改生效。再次进入容器内,查看配置是否生效。
KAFKA_LOG_DIRS: /kafka/kafka-logs-df49ee89e690
为了解决每次docker-compose down
之后,存储目录自动改变。KAFKA_LOG_RETENTION_MS
配置全局主题过期时间
配置生效如下:(问题解决 :))
############################# Log Basics #############################
# A comma separated list of directories under which to store log files
log.dirs=/kafka/kafka-logs-df49ee89e690
# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=1
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1
......
############################# Group Coordinator Settings #############################
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
# The default value for this is 3 seconds.
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
group.initial.rebalance.delay.ms=0
advertised.host.name=kafka
message.max.bytes=20000000
port=9092
auto.create.topics.enable=true
log.retention.ms=300000