[docker]一些經常用到的docker容器啟動方法
阿新 • • 發佈:2018-12-31
busybox 命令參考
相當於
docker run -d –name=b1 busybox i=0; while true; do echo “
docker logs -f b1
docker exec -ti busybox -- nslookup kubernetes.default
docker exec busybox cat /etc/resolv.conf
dockerfile拷貝目錄的一個坑
## 拷貝檔案不需要寫目標
FROM centos
COPY 2.txt /usr/local/
## 拷貝目錄則需要這樣寫,目標,不然拷貝不進去
FROM centos
COPY mysql /usr/local/mysql
一些經常或不經常用到的映象啟動方法
設定容器的TZ另一種辦法
## override default time zone (Etc/UTC) if TZ variable is set
if [ ! -z "$TZ" ]; then
ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
fi
帶ssh的centos
docker run -d -p 0.0.0.0:2222:22 tutum/centos6
docker run -d -p 0.0.0.0:2222:22 tutum/centos
docker run -d -p 0.0.0.0:2222:22 -v /etc/localtime:/etc/localtime:ro -v /etc/timezone:/etc/timezone:ro tutum/centos6
docker run -d -p 0.0.0.0:2222:22 -v /etc/localtime:/etc/localtime:ro -v /etc/timezone:/etc/timezone:ro tutum/centos
支援兩種驗證方式:
docker run -d -p 0.0.0.0:2222:22 -v /etc/localtime:/etc/localtime:ro -v /etc/timezone:/etc/timezone:ro -e ROOT_PASS="mypass" tutum/centos
docker run -d -p 2222:22 -e AUTHORIZED_KEYS="`cat ~/.ssh/id_rsa.pub`" tutum/centos
docker logs <CONTAINER_ID>
ssh -p <port> [email protected]<host>
帶ping/curl/nslookup的busybox
docker run -itd --name=test1 --net=test-network radial/busyboxplus /bin/sh
nginx
mkdir -p /data/nginx-html
echo "maotai" > /data/nginx-html/index.html
docker run -d \
--net=host \
--restart=always \
-v /etc/nginx/nginx.conf:/etc/nginx/nginx.conf:ro \
-v /etc/localtime:/etc/localtime:ro \
-v /data/nginx-html:/usr/share/nginx/html \
--name nginx \
nginx
portainer多單節點管理介面的部署
cp /etc/docker/daemon.json /etc/docker/daemon.json.bak.$(date +%F)
cat >/etc/docker/daemon.json<<EOF
{
"registry-mirrors": ["https://registry.docker-cn.com"],
"hosts": [
"tcp://0.0.0.0:2375",
"unix:///var/run/docker.sock"
]
}
EOF
systemctl daemon-reload
systemctl restart docker && systemctl enable docker
docker run -d \
-p 9000:9000 \
--restart=always \
-v /etc/localtime:/etc/localtime:ro \
-v /var/run/docker.sock:/var/run/docker.sock \
portainer/portainer
nginx配置
mv /etc/nginx /etc/nginx_$(date +%F)
mkdir -p /etc/nginx/conf.d/
mkdir -p /data/nginx-html
echo "maotai" > /data/nginx-html/index.html
cat >> /etc/nginx/nginx.conf<<EOF
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
server_name_in_redirect off;
client_max_body_size 20m;
client_header_buffer_size 16k;
large_client_header_buffers 4 16k;
sendfile on;
tcp_nopush on;
keepalive_timeout 65;
server_tokens off;
gzip on;
gzip_min_length 1k;
gzip_buffers 4 16k;
gzip_proxied any;
gzip_http_version 1.1;
gzip_comp_level 3;
gzip_types text/plain application/x-javascript text/css application/xml;
gzip_vary on;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
log_format json '{"@timestamp": "$time_iso8601",'
'"@version": "1",'
'"client": "$remote_addr",'
'"url": "$uri", '
'"status": $status, '
'"domain": "$host", '
'"host": "$server_addr",'
'"size":"$body_bytes_sent", '
'"response_time": $request_time, '
'"referer": "$http_referer", '
'"http_x_forwarded_for": "$http_x_forwarded_for", '
'"ua": "$http_user_agent" } ';
access_log /var/log/nginx/access.log json;
include /etc/nginx/conf.d/*.conf;
}
EOF
tree /etc/nginx/
cat >> /etc/nginx/conf.d/default.conf <<EOF
server {
listen 80;
server_name localhost;
#charset koi8-r;
#access_log /var/log/nginx/host.access.log json;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
EOF
tree /etc/nginx/
nginx-lb
docker run --name nginx-lb \
-d \
-v /etc/nginx/nginx.conf:/etc/nginx/nginx.conf:ro \
--net=host \
--restart=always \
-v /etc/localtime:/etc/localtime \
nginx:1.13.3-alpine
lnmp(每個元件獨立)
docker-compose up
啟動一個mysql
cat /root/dockerfile/mysql/start.sh
docker run -p 3306:3306 -v /data/mysql:/var/lib/mysql -v /etc/localtime:/etc/localtime --name mysql5 --restart=always -d mysql:5.6.23 --character-set-server=utf8 --collation-server=utf8_general_ci
docker run \
-p 3306:3306 \
-v /data/mysql:/var/lib/mysql \
-v /etc/localtime:/etc/localtime \
--name mysql5 \
--restart=always \
-e MYSQL_ROOT_PASSWORD=123456 \
-d mysql:5.6.23 --character-set-server=utf8 --collation-server=utf8_general_ci
show VARIABLES like '%max_allowed_packet%';
show variables like '%storage_engine%';
show variables like 'collation_%';
show variables like 'character_set_%';
mysql主從庫
#+++++++++++++++++++++++++++
# mysql主從庫
#+++++++++++++++++++++++++++
docker run -d -e REPLICATION_MASTER=true -e REPLICATION_PASS=mypass -p 3306:3306 --name mysql tutum/mysql
docker run -d -e REPLICATION_SLAVE=true -p 3307:3306 --link mysql:mysql tutum/mysql
gogs安裝(不過建議用gitlab)
docker run -itd \
-p 53000:3000 -p 50022:22 \
-v /data/gogs:/data \
-v /etc/localtime:/etc/localtime \
--restart=always \
gogs/gogs
cowcloud
docker run -v /data/owncloud-data:/var/www/html -v /etc/localtime:/etc/localtime -v :/var/www/html/config --restart=always -itd -p 8000:80 owncloud
nextcloud(和owncloud一樣,據說這個支援線上md記錄筆記,總之感覺功能更強大)
docker run -d \
-p 8080:80
-v nextcloud:/var/www/html \
nextcloud
安裝confluence
docker run \
-v /data/confluence/conflu_data:/var/atlassian/application-data/confluence \
-v /etc/localtime:/etc/localtime \
-v /data/confluence/server.xml:/opt/atlassian/confluence/conf/server.xml \
--restart=always \
--link mysql5:db \
--name="confluence" -d \
-p 8090:8090 \
-p 8091:8091 \
cptactionhank/atlassian-confluence
- 配置confluence
- 建立資料庫
create database confluence default character set utf8 collate utf8_bin;
grant all on confluence.* to 'confluence'@"172.17.0.%" identified by "confluenceman";
grant all on confluence.* to 'confluence'@"192.168.6.%";
grant all on confluence.* to 'confluence'@"192.168.8.%";
- 安裝破解
1.匯出後用破機器破解
docker cp confluence:/opt/atlassian/confluence/confluence/WEB-INF/lib/atlassian-extras-decoder-v2-3.2.jar ./
mv atlassian-extras-decoder-v2-3.2.jar atlassian-extras-2.4.jar
2. 將破解檔案匯入系統
mv atlassian-extras-2.4.jar atlassian-extras-decoder-v2-3.2.jar
docker cp ./atlassian-extras-decoder-v2-3.2.jar confluence:/opt/atlassian/confluence/confluence/WEB-INF/lib/
3.重啟confluence
docker stop confluence
docker start confluence
- 1.貼上破機器的序列號
- 2.選jdbc連mysql url寫:
jdbc:mysql://db:3306/confluence?sessionVariables=storage_engine%3DInnoDB&amp;useUnicode=true&amp;characterEncoding=utf8
3.匯入既有的資料
參考:https://www.ilanni.com/?p=11989
如:xmlexport-20170902-100808-153.zip
這裡包含了資料庫資料.4.安裝完畢
管理員帳號密碼登陸 http://192.168.x.x:8090
admin
xxxxx
- 5.配置郵箱
這裡我沒用server.xml裡配置(配了測試有問題),直接smtp用新浪郵箱配的
smtp.sina.com
[email protected].com
123456
phabricator審計系統(客服給開發提bug)
docker run -d \
-p 9080:80 -p 9443:443 -p 9022:22 \
--env PHABRICATOR_HOST=sj.pp100.net \
--env MYSQL_HOST=192.168.x.x \
--env MYSQL_USER=root \
--env MYSQL_PASS=elc123 \
--env PHABRICATOR_REPOSITORY_PATH=/repos \
--env PHABRICATOR_HOST_KEYS_PATH=/hostkeys/persisted \
-v /data/phabricator/hostkeys:/hostkeys \
-v /data/phabricator/repo:/repos \
redpointgames/phabricator
hackmarkdown安裝(內網markdown伺服器,支援貼圖許可權,還有專門的客戶端等)
docker-compose up -d
容器啟動常用選項
- 1, 時區
- 2, 自動重啟
- 3, 日誌
docker run \
-v /etc/localtime:/etc/localtime:ro
-v /etc/timezone:/etc/timezone:ro
--restart=always \
docker run \
-v /etc/localtime:/etc/localtime:ro
-v /etc/timezone:/etc/timezone:ro
-v /etc/localtime:/etc/localtime:ro -v /etc/timezone:/etc/timezone:ro
記錄兩份 一份是前臺輸出,另一份
docker run -it --rm -p 80:80 nginx
ll /var/lib/docker/containers/*/*.log
針對容器的日誌切割(不然日誌越滾越大)
容器日誌目錄: /var/lib/docker/containers//.log.*
docker run -d -v /var/lib/docker/containers:/var/lib/docker/containers:rw \
-v /etc/localtime:/etc/localtime:ro \
--restart=always \
tutum/logrotate
- 原理(logrotated的一個copytruncate選項很好,不截斷日誌情況下滾動日誌)
## 可以進到容器裡看看日誌滾動策略.
#https://hub.docker.com/r/tutum/logrotate/
/ # cat /etc/logrotate.conf
/var/lib/docker/containers/*/*.log {
rotate 0
copytruncate
sharedscripts
maxsize 10M
postrotate
rm -f /var/lib/docker/containers/*/*.log.*
endscript
#logrotate說明copytruncate
# http://www.lightxue.com/how-logrotate-works
#讓我聯想起了nginx日誌切割
cat > /etc/logrotate.d/nginx
/usr/local/nginx/logs/*.log {
daily
missingok
rotate 7
dateext
compress
delaycompress
notifempty
sharedscripts
postrotate
if [ -f /usr/local/nginx/logs/nginx.pid ]; then
kill -USR1 `cat /usr/local/nginx/logs/nginx.pid`
fi
endscript
}
清理長時間不用的映象和volumes
docker run -d \
--privileged \
-v /var/run:/var/run:rw \
-v /var/lib/docker:/var/lib/docker:rw \
-e IMAGE_CLEAN_INTERVAL=1 \
-e IMAGE_CLEAN_DELAYED=1800 \
-e VOLUME_CLEAN_INTERVAL=1800 \
-e IMAGE_LOCKED="ubuntu:trusty, tutum/curl:trusty" \
tutum/cleanup
# https://hub.docker.com/r/tutum/cleanup/
# IMAGE_CLEAN_INTERVAL (optional) How long to wait between cleanup runs (in seconds), 1 by default.
# IMAGE_CLEAN_DELAYED (optional) How long to wait to consider an image unused (in seconds), 1800 by default.
# VOLUME_CLEAN_INTERVAL (optional) How long to wait to consider a volume unused (in seconds), 1800 by default.
# IMAGE_LOCKED (optional) A list of images that will not be cleaned by this container, separated by ,
- 原理:呼叫二進位制程式
/ # cat run.sh
#!/bin/sh
if [ ! -e "/var/run/docker.sock" ]; then
echo "=> Cannot find docker socket(/var/run/docker.sock), please check the command!"
exit 1
fi
if [ "${IMAGE_LOCKED}" == "**None**" ]; then
exec /cleanup \
-imageCleanInterval ${IMAGE_CLEAN_INTERVAL} \
-imageCleanDelayed ${IMAGE_CLEAN_DELAYED}
else
exec /cleanup \
-imageCleanInterval ${IMAGE_CLEAN_INTERVAL} \
-imageCleanDelayed ${IMAGE_CLEAN_DELAYED} \
-imageLocked "${IMAGE_LOCKED}"
fi
zk叢集
version: '2'
services:
zoo1:
image: zookeeper
restart: always
container_name: zoo1
volumes:
- /etc/localtime:/etc/localtime
ports:
- "2181:2181"
environment:
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
zoo2:
image: zookeeper
restart: always
container_name: zoo2
volumes:
- /etc/localtime:/etc/localtime
ports:
- "2182:2181"
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
zoo3:
image: zookeeper
restart: always
volumes:
- /etc/localtime:/etc/localtime
container_name: zoo3
ports:
- "2183:2181"
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
檢查:
echo stat|nc127.0.0.1 2181
或者進入到容器去看
#docker exec zoo1 /zookeeper-3.4.10/bin/zkCli.sh -server 127.0.0.1:2181
#/zookeeper-3.4.10/bin/zkCli.sh -server 127.0.0.1:2181
zabbix(monitoringartist這小夥把元件搞在一個映象了)
docker run \
-d \
--name dockbix-db \
-v /backups:/backups \
-v /etc/localtime:/etc/localtime:ro \
--volumes-from dockbix-db-storage \
--env="MARIADB_USER=zabbix" \
--env="MARIADB_PASS=my_password" \
monitoringartist/zabbix-db-mariadb
# Start Dockbix linked to the started DB
docker run \
-d \
--name dockbix \
-p 80:80 \
-p 10051:10051 \
-v /etc/localtime:/etc/localtime:ro \
--link dockbix-db:dockbix.db \
--env="ZS_DBHost=dockbix.db" \
--env="ZS_DBUser=zabbix" \
--env="ZS_DBPassword=my_password" \
--env="XXL_zapix=true" \
--env="XXL_grapher=true" \
monitoringartist/dockbix-xxl:latest
分開的zabbix,這個我沒測
docker run --name zabbix-server-mysql -t \
-v /etc/localtime:/etc/localtime:ro \
-v /data/zabbix-alertscripts:/usr/lib/zabbix/alertscripts \
-v /etc/zabbix/zabbix_server.conf:/etc/zabbix/zabbix_server.conf \
-e DB_SERVER_HOST="192.168.14.132" \
-e MYSQL_DATABASE="zabbix" \
-e MYSQL_USER="zabbix" \
-e MYSQL_PASSWORD="Tx66sup" \
-e MYSQL_ROOT_PASSWORD="Tinsu" \
-e ZBX_JAVAGATEWAY="127.0.0.1" \
--network=host \
-d registry.docker-cn.com/zabbix/zabbix-server-mysql:ubuntu-3.4.0
docker run --name mysql-server -t \
-v /etc/localtime:/etc/localtime:ro \
-v /etc/my.cnf:/etc/my.cnf \
-v /data/mysql-data:/var/lib/mysql \
-e MYSQL_DATABASE="zabbix" \
-e MYSQL_USER="zabbix" \
-e MYSQL_PASSWORD="bix66sup" \
-e MYSQL_ROOT_PASSWORD="adminsu" \
-p 3306:3306 \
-d registry.docker-cn.com/mysql/mysql-server:5.7
docker run --name zabbix-java-gateway -t \
-v /etc/localtime:/etc/localtime:ro \
--network=host \
-d registry.docker-cn.com/zabbix/zabbix-java-gateway:latest
bdocker run --name zabbix-web-nginx-mysql -t \
-v /etc/localtime:/etc/localtime:ro \
-e DB_SERVER_HOST="192.168.14.132" \
-e MYSQL_DATABASE="zabbix" \
-e MYSQL_USER="zabbix" \
-e MYSQL_PASSWORD="TCzp" \
-e MYSQL_ROOT_PASSWORD="TC6u" \
-e PHP_TZ="Asia/Shanghai" \
--network=host \
-d registry.docker-cn.com/zabbix/zabbix-web-nginx-mysql:ubuntu-3.4.0
docker監控advisor
docker run \
--volume=/:/rootfs:ro \
--volume=/var/run:/var/run:rw \
--volume=/sys:/sys:ro \
--volume=/var/lib/docker/:/var/lib/docker:ro \
--publish=8080:8080 \
--detach=true \
--name=cadvisor \
google/cadvisor:latest
http://192.168.14.133:8080/
centos7跑cAdvisor-InfluxDB-Grafana
- 參考
http://www.pangxie.space/docker/456
https://www.brianchristner.io/how-to-setup-docker-monitoring/
https://github.com/vegasbrianc/docker-monitoring/blob/master/docker-monitoring-0.9.json
- 啟動influxdb(使用最新的發現不好使)
docker run -d -p 8083:8083 -p 8086:8086 --expose 8090 --expose 8099 --name influxsrv tutum/influxdb:0.10
- 建立db
docker exec -it influxsrv bash
use cadvisor
CREATE USER "root" WITH PASSWORD 'root' WITH ALL PRIVILEGES
CREATE DATABASE cadvisor
show users
- 啟動cadvisor
docker run --volume=/:/rootfs:ro --volume=/var/run:/var/run:rw --volume=/sys:/sys:ro --volume=/var/lib/docker/:/var/lib/docker:ro --publish=8080:8080 --detach=true --link influxsrv:influxsrv --name=cadvisor google/cadvisor:latest -storage_driver=influxdb -storage_driver_db=cadvisor -storage_driver_host=influxsrv:8086
- 啟動grafna, 加db源.匯入dashboard
docker run -d -p 3000:3000 -e INFLUXDB_HOST=192.168.14.133 -e INFLUXDB_PORT=8086 -e INFLUXDB_NAME=cadvisor -e INFLUXDB_USER=root -e INFLUXDB_PASS=root --link influxsrv:influxsrv --name grafana grafana/grafana
Prometheus+Grafana(這個比cAdvisor-InfluxDB-Grafana展示效果更好一些)
A Prometheus & Grafana docker-compose stack
docker-compose up -d
elk
sysctl -w vm.max_map_count=262144
docker run -d -v /etc/localtime:/etc/localtime --restart=always -p 5601:5601 -p 9200:9200 -p 5044:5044 -it --name elk sebp/elk
docker run -d -v /etc/localtime:/etc/localtime --restart=always -p 9100:9100 mobz/elasticsearch-head:5
或
docker-compose up -d
純手動安裝elastic+kibana(elk)
useradd elk
cd /usr/local/src/
tar xf elasticsearch-5.6.4.tar.gz -C /usr/local/
tar xf kibana-5.6.4-linux-x86_64.tar.gz -C /usr/local/
ln -s /usr/local/elasticsearch-5.6.4 /usr/local/elasticsearch
ln -s /usr/local/kibana-5.6.4-linux-x86_64 /usr/local/kibana
chown -R elk. /usr/local/elasticsearch
chown -R elk. /usr/local/elasticsearch/
chown -R elk. /usr/local/kibana
chown -R elk. /usr/local/kibana/
mkdir /data/es/{data,logs} -p
chown -R elk. /data
修改es配置
0.0.0.0
http.cors.enabled: true
http.cors.allow-origin: "*"
修改核心:
vim /etc/security/limits.conf
* soft nproc 65536
* hard nproc 65536
* soft nofile 65536
* hard nofile 65536
sysctl -w vm.max_map_count=262144
sysctl -p
nohup /bin/su - elk -c "/usr/local/elasticsearch/bin/elasticsearch" > /data/es/es-start.log 2>&1 &
nohup /bin/su - elk -c "/usr/local/kibana/bin/kibana" > /data/es/kibana-start.log 2>&1 &
docker run -d -v /etc/localtime:/etc/localtime --restart=always -p 9100:9100 mobz/elasticsearch-head:5
安裝elk的head外掛
先修改es的配置檔案: elasticsearch.yml追加
http.cors.enabled: true
http.cors.allow-origin: "*"
docker run -d -v /etc/localtime:/etc/localtime --restart=always -p 9100:9100 mobz/elasticsearch-head:5
物理機安裝elk之前的優化操作
sudo sysctl -w vm.max_map_count=262144
make it persistent:
$ vim /etc/sysctl.conf
vm.max_map_count=262144
## es常用操作參考: http://www.cnblogs.com/lishouguang/p/4560930.html
## 備份,擴容等指令碼,有點老,但是思路可以參考,https://github.com/gregbkr/docker-elk-cadvisor-dashboards
http://192.168.14.133:9200/_cat/health?v #檢視叢集狀態
http://192.168.14.133:9200/_cat/nodes?v #檢視節點狀態
http://192.168.14.133:9200/_cat/indices?v #檢視index列表
#建立index
curl -XPUT http://vm1:9200/customer?pretty
#新增一個document
[es@vm1 ~]$ curl -XPUT vm1:9200/customer/external/1?pretty -d '{"name":"lisg"}'
#檢索一個document
[es@vm1 ~]$ curl -XGET vm1:9200/customer/external/1?pretty
#刪除一個document
[es@vm1 ~]$ curl -XDELETE vm1:9200/customer/external/1?pretty
#刪除一個type
[es@vm1 ~]$ curl -XDELETE vm1:9200/customer/external?pretty
#刪除一個index
[es@vm1 ~]$ curl -XDELETE vm1:9200/customer?pretty
#POST方式可以新增一個document,不用指定ID
[es@vm1 ~]$ curl -XPOST vm1:9200/customer/external?pretty -d '{"name":"zhangsan"}'
#使用doc更新document
[es@vm1 ~]$ curl -XPUT vm1:9200/customer/external/1?pretty -d '{"name":"lisg4", "age":28}'
#使用script更新document(1.4.3版本動態指令碼是被禁止的)
[es@vm1 ~]$ curl -XPOST vm1:9200/customer/external/1/_update?pretty -d '{"script":"ctx._source.age += 5"}'
啟動jenkins
docker run -d -u root \
-p 8080:8080 \
-v /var/run/docker.sock:/var/run/docker.sock \
-v $(which docker):/bin/docker \
-v /var/jenkins_home:/var/jenkins_home \
jenkins
帶ssh的tomcat
之前一直使用單個app的容器,如tomcat,我只需要catalina.sh run來啟動前臺容器.其中方法:我可以CMD [‘run.sh’],其中run.sh有了我想執行的命令.
我也可以通過ENTRYPOINT [“docker-entrypoint.sh”],這樣更加靈活了.可以通過CMD往這個指令碼傳參了.
後臺tomcat容器需要ssh進去管理.這就意味著必須sshd也要同時前臺啟動,只能用supervisor來管理了.
參考:http://blog.csdn.net/iiiiher/article/details/70918045,其中包含了,
但是我感覺還是不太完善.
- 1,熟悉dockerfile語法
- 2,手動構建centos7
- 3,使用官網centos7
- 4,系統層–基於官網cenos7 新增 supervisor+ssh,啟動後即啟動ssh
- 5,執行層—安裝jdk
- 6,app層安裝tomcat,暴露8080.—supervisor接管.
[supervisord]
nodaemon = true
[program:sshd]
command=/usr/sbin/sshd -D
process_name=%(program_name)s
auto_start = true
autorestart = true
[program:tomcat]
command=/data/tomcat/bin/catalina.sh run
process_name=%(program_name)s
auto_start = true
autorestart = true
stdout_logfile = /dev/stdout
stdout_logfile_maxbytes = 0
stderr_logfile = /dev/stderr
stderr_logfile_maxbytes = 0
這是tomcat的dockerfile[tomcat+ssh映象],
其中要準備,下載解壓這些目錄到Dockerfile所在目錄, jdk, tomcat,tomcat的server.xml(後期我k8s叢集使用cm來覆