# Ansible delegate Jinja2 Roles
# 1. Ansible delegate
# 1.1 什么是 Task 委派
简单来说,就是本来需要在当前 "被控制端主机" 执行的操作,被委派给其他主机执行。
# 1.2 TASK 委派场景实践 1
场景说明:
- 1. 为 172.16.1.7 服务器添加一条 hosts 记录:1.1.1.1 oldxu.com
- 2. 同时要把这个 hosts 记录写一份至到 172.16.1.5 节点
- 3. 除此任务以外 172.16.1.7 的其他任务都不会委派给 172.16.1.5 执行。
1. 使用 delegate_to 关键字实现 task 委派
[root@manager delegate]# cat delegate_1.yml | |
- hosts: 192.168.1.7 | |
tasks: | |
- name: Add WebServers DNS | |
shell: "echo 1.1.1.1 oldxu.com >> /etc/hosts" | |
- name: delegate_to Host 192.168.1.8 | |
shell: "echo 1.1.1.1 oldxu.com >> /etc/hosts" | |
delegate_to: 192.168.1.8 | |
- name: Add WebServers DNS | |
shell: "echo 2.2.2.2 oldxu2.com >> /etc/hosts" |
2. 执行 playbook 测试验证
[root@manager delegate]# ansible-playbook delegate_1.yml | |
# 192.168.1.7 | |
[root@web01 ~]# cat /etc/hosts | |
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 | |
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 | |
1.1.1.1 oldxu.com | |
2.2.2.2 oldxu2.com | |
# 192.168.1.8 | |
[root@web02 ~]# cat /etc/hosts | |
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 | |
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 | |
1.1.1.1 oldxu.com |
3. 如果该任务要对 ansible 控制节点执行怎么办?可以委派 127.0.0.1 或者使用 local_action 来实现。
[root@manager delegate]# cat delegate_1.yml | |
- hosts: 192.168.1.7 | |
tasks: | |
- name: Add WebServers DNS | |
shell: "echo 1.1.1.1 oldxu.com >> /etc/hosts" | |
- name: delegate_to Host 192.168.1.8 | |
shell: "echo 1.1.1.1 oldxu.com >> /etc/hosts" | |
delegate_to: 192.168.1.8 | |
- name: delegate_to Host 127.0.0.1 | |
shell: "echo 1.1.1.1 oldxu.com >> /etc/hosts" | |
delegate_to: 127.0.0.1 | |
delegate_facts: True # 收集被委托机器的 facts |
# 1.3 TASK 委派场景实践 2
1. 创建普通用户管理 ansible
[root@manager delegate]# cat user_manager_ansible.yml | |
- hosts: all | |
vars: | |
- user_manager: xuyong | |
tasks: | |
# 管理机 | |
- name: Create Manager <!--swig0--> | |
user: | |
name: "" | |
# passwd: | |
generate_ssh_key: yes | |
ssh_key_bits: 2048 | |
ssh_key_file: .ssh/id_rsa | |
register: user_messages | |
delegate_to: localhost | |
run_once: true # 委派任务仅执行一次 | |
- name: debug | |
debug: | |
msg: | |
- "" | |
# 被控端 | |
- name: Create User | |
user: | |
name: "" | |
- name: Create User .ssh Directory | |
file: | |
path: "/home//.ssh" | |
state: directory | |
owner: "" | |
group: "" | |
mode: "0700" | |
- name: Save Key | |
copy: | |
content: "" | |
dest: "/home//.ssh/authorized_keys" | |
owner: "" | |
group: "" | |
mode: "0600" | |
- name: Add <!--swig11--> Sudoers | |
lineinfile: | |
path: /etc/sudoers | |
line: " ALL=(ALL) NOPASSWD:ALL" |
2. 修改控制端 /etc/ansible/ansible.cfg 主配置文件,配置普通用户提权
[root@manager ~]# vim /etc/ansible/ansible.cfg | |
[privilege_escalation] | |
become=True | |
become_method=sudo | |
become_user=root | |
become_ask_pass=False |
3. 测试验证
[xuyong@manager opt]$ ansible all -m shell -a "mkdir /opt/test" | |
#如未修改控制端 /etc/ansible/ansible.cfg 主配置文件,可使用 - b 参数提权 | |
[xuyong@manager opt]$ ansible all -m shell -a "mkdir /opt/test" -b |
# 1.4 TASK 委派场景实践 3
- 1. 首先搭建 Haproxy + web_cluster 集群环境。
- 2. 当 web 节点代码需要更新时,需要下线节点,这个时候需要将下线节点的任务委派给 Haproxy
- 3. 操作 web_cluster 集群,将新的代码替换上
- 4. 当 web 节点代码更新成功后,需要上线节点,这个时候需要将上线节点的任务委派给 Haproxy
- 5. 然后依次循环,直到完成所有节点的代码更新与替换
# 1.4.1 Ansible 构建 haproxy 集群
1. 配置 Haproxy 负载均衡
[root@manager haproxy]# cat haproxy_install.yml | |
- hosts: lbservers | |
tasks: | |
- name: Configure Haproxy Server | |
copy: | |
src: ./haproxy.cfg.j2 | |
dest: /etc/haproxy/haproxy.cfg | |
notify: Restart Haproxy Server | |
- name: Started Haproxy Server | |
systemd: | |
name: haproxy | |
state: started | |
handlers: | |
- name: Restart Haproxy Server | |
systemd: | |
name: haproxy | |
state: restarted |
- Haproxy 配置文件
[root@manager haproxy]# cat haproxy.cfg.j2 | |
#--------------------------------------------------------------------- | |
# Global settings | |
#--------------------------------------------------------------------- | |
global | |
log 127.0.0.1 local2 | |
chroot /var/lib/haproxy | |
pidfile /var/run/haproxy.pid | |
maxconn 4000 | |
user haproxy | |
group haproxy | |
daemon | |
# turn on stats unix socket | |
stats socket /var/lib/haproxy/stats level admin | |
#nbproc 4 | |
nbthread 8 | |
cpu-map 1 0 | |
cpu-map 2 1 | |
cpu-map 3 2 | |
cpu-map 4 3 | |
defaults | |
mode http | |
log global | |
option httplog | |
option dontlognull | |
option http-server-close | |
option forwardfor except 127.0.0.0/8 | |
option redispatch | |
retries 3 | |
timeout http-request 10s | |
timeout queue 1m | |
timeout connect 10s | |
timeout client 1m | |
timeout server 1m | |
timeout http-keep-alive 10s | |
timeout check 10s | |
maxconn 3000 | |
#--------------------------------------------------------------------- | |
# main frontend which proxys to the backends | |
#--------------------------------------------------------------------- | |
#---------------------------------------------------------------- | |
# Listen settings | |
#---------------------------------------------------------------- | |
## | |
listen haproxy-stats_2 | |
bind *:9999 | |
stats enable | |
stats refresh 1s | |
stats hide-version | |
stats uri /haproxy?stats | |
stats realm "HAProxy statistics" | |
stats auth admin:123456 | |
stats admin if TRUE | |
frontend web | |
bind *:8789 | |
mode http | |
acl ansible_domain hdr_reg(host) -i ha.oldxu.net | |
use_backend ansible_cluster if ansible_domain | |
backend ansible_cluster | |
balance roundrobin | |
server 172.16.1.6 172.16.1.6:80 check port 80 inter 3s rise 2 fall 3 | |
server 172.16.1.7 172.16.1.7:80 check port 80 inter 3s rise 2 fall 3 | |
server 172.16.1.8 172.16.1.8:80 check port 80 inter 3s rise 2 fall 3 |
3. 配置 Nginx
[root@manager haproxy]# cat nginx_install.yml
- hosts: webservers
tasks:
- name: Installed Nginx Server
yum:
name: nginx
state: present
- name: Configure Nginx WebSite
copy:
src: ha.oldxu.net.conf.j2
dest: /etc/nginx/conf.d/haproxy.oldxu.net.conf
notify: Restart Nginx Server
- name: Web Content
copy:
content: "App Version {{ ansible_eth1.ipv4.address.split('.')[-1]}}"
dest: /opt/index.html
- name: Started Nginx Server
systemd:
name: nginx
state: started
handlers:
- name: Restart Nginx Server
systemd:
name: nginx
state: restarted
4. 准备 Nginx 配置文件
[root@manager haproxy]# cat ha.oldxu.net.conf.j2 | |
server { | |
listen 80; | |
server_name ha.oldxu.net; | |
root /opt; | |
location / { | |
index index.html; | |
} | |
} |
# 1.4.2 Ansible 清单配置
[root@manager haproxy]# cat host_group | |
[webservers] | |
172.16.1.6 | |
172.16.1.7 | |
172.16.1.8 | |
[dbservers] | |
172.16.1.41 | |
[lbservers] | |
172.16.1.5 |
# 1.4.3 Ansible 委派配置
[root@manager haproxy]# cat haproxy_deploy.yml
- hosts: webservers
# 控制一次操作多少台主机
serial: 1
tasks:
- name: Down Node {{ inventory_hostname }}
haproxy:
state: disabled
host: '{{ inventory_hostname }}' # 获取当前操作节点主机名称
socket: /var/lib/haproxy/stats
backend: ansible_cluster
delegate_to: # 下线节点任务委派给负载均衡节点
- 172.16.1.5
- name: Sleep
shell:
cmd: sleep 5
- name: Update Nginx Code
copy:
content: "App Version New {{ ansible_eth1.ipv4.address.split('.')[-1]}}"
dest: /opt/index.html
- name: Up Node {{ inventory_hostname }}
haproxy:
state: enabled
host: '{{ inventory_hostname }}'
socket: /var/lib/haproxy/stats
backend: ansible_cluster
wait: yes
delegate_to: 172.16.1.5
# 1.4.4 Ansible 委派验证
[root@manager haproxy]# cat get_ha_page.sh | |
#!/bin/bash | |
while true | |
do | |
curl -HHost:ha.oldxu.net http://10.0.0.5:8789 | |
echo "" | |
sleep 1 | |
done |
# 2.Ansible Jinja2
# 2.1 什么是 jinja2
Jinja2 是 Python 的全功能模板引擎,Ansible 需要使用 Jinja2 模板来修改被管理主机的配置文件。
- 场景 1:给 10 台主机装上 Nginx 服务,但是要求每台主机的端口都不一样,如何解决?
- 场景 2:一份配置文件,渲染出 keepalived 的 Master 和 Slave 配置文件
# 2.2 Ansible 如何使用 inja2
ansible 使用 jinja2 模板需要借助 template 模块实现,那 template 模块是用来做什么呢?
template 模块和 copy 模块完全一样,都是拷贝文件至远程主机,区别在于 template 模块会解析要拷贝的文件中变量的值,而 copy 则是原封不动的将文件拷贝至被控端。
# 2.3 jinja 模板基本语法
- 1)要想在配置文件中使用 jinj2 , playbook 中的 tasks 必须使用 template 模块
- 2)配置文件里面使用变量
# 2.4 jinja 模板逻辑关系
1. 循环表达式
{% for i in EXPR %} ... {% endfor %}
2. 判断表达式
{% if EXPR %} ʢ {% elif EXPR %} ... {% endif %}
3. 注释
# 2.5 jinja 模板示例
1. 使用 Playbook 推送文件
[root@manager jinja]# cat jinja2.yml | |
- hosts: webservers | |
tasks: | |
- name: Copy Template File /etc/motd | |
template: | |
src: ./motd.j2 | |
dest: /etc/motd |
2. 准备 motd.j2 文件
[root@manager jinja]# cat motd.j2 | |
Welcome to <!--swig21--> | |
This system total Memory is: <!--swig22--> MB | |
This system free Memory is: <!--swig23--> MB |
3. 执行 playbook
[root@manager jinja]# ansible-playbook jinja2.yml |
4. 检查执行后的状态
[root@web01 ~]# cat /etc/motd | |
Welcome to web01 | |
This system total Memory is: 7821 MB | |
This system free Memory is: 6571 MB | |
[root@web02 ~]# cat /etc/motd | |
Welcome to web02 | |
This system total Memory is: 7821 MB | |
This system free Memory is: 6740 MB |
上面的例子展示了如何使用 facts 变量,当 playbook 被执行后, ansible_hostname 和 ansible_memtotal_mb 将会被替换成被管理主机上搜集的 facts 变量的值。
# 2.6 案例 1-Jinja2 管理 Nginx
ansible 使用 jinja2 的 for 循环表达式渲染出 nginx 负载均衡的配置文件
1. 使用 Playbook 推送文件
[root@manager jinja]# cat nginx_for_jinja.yml | |
- hosts: webservers | |
vars: | |
- ha_domain: ha.oldxu.net | |
- ha_port: 80 | |
- ha_node_port: 80 | |
tasks: | |
- name: Copy | |
template: | |
src: ./nginx.conf.j2 | |
dest: /tmp/nginx.conf |
2. 准备 nginx.conf.j2 配置文件
[root@manager jinja]# cat nginx.conf.j2 | |
upstream <!--swig24--> { | |
<!--swig25--> | |
} | |
server { | |
listen <!--swig26-->; | |
server_name <!--swig27-->; | |
location / { | |
proxy_pass http://<!--swig28-->; | |
} | |
} |
3. 执行 playbook
[root@manager jinja]# ansible-playbook nginx_for_jinja.yml |
4. 检查 jinja 模板渲染出来的配置文件
[root@web_7 ~]# cat /tmp/nginx.conf | |
upstream ha.oldxu.net { | |
server 172.16.1.7:80; | |
server 172.16.1.8:80; | |
} | |
server { | |
listen 80; | |
server_name ha.oldxu.net; | |
location / { | |
proxy_pass http://ha.oldxu.net; | |
} | |
} |
# 2.7 案例 2-Jinja2 管理 Keepalived
ansible 使用 jinja2 的 if 判断,渲染出 keepalived 的 Master 和 Slave 配置文件。并推送至 lb 组,思路如下:
- 1. 设定 Inventory 中 host_vars 然后根据不同主机设定不同的变量
- 2. 在 Playbook 中使用 when 判断主机名称,然后分发不同的配置文件。
- 3. 使用 jinja2 的方式渲染出不同的配置文件。
1. 使用 playbook 推送 keeplaived 配置文件
[root@manager jinja]# cat keepalived_if_jinja.yml | |
- hosts: lbservers | |
tasks: | |
- name: Copy | |
template: | |
src: ./keepalived.conf.j2 | |
dest: /tmp/keepalived.conf |
2. 准备 keepalived.j2 配置文件
[root@manager jinja]# cat keepalived.conf.j2 | |
global_defs { | |
router_id <!--swig29--> # 当前物理设备的标识名称 | |
} | |
vrrp_instance VI_1 { | |
<!--swig30--> | |
interface eth0 eth1 # 绑定当前虚拟路由使用的物理接口; | |
virtual_router_id 50 # 当前虚拟路由标识,VRID; | |
advert_int 3 # vrrp 通告时间间隔,默认 1s; | |
authentication { | |
auth_type PASS # 密码类型,简单密码; | |
auth_pass 1111 # 密码不超过 8 位字符; | |
} | |
virtual_ipaddress { | |
10.0.0.100 dev eth0 lable eth0:0 # VIP 地址 | |
} | |
} |
3. 执行 playbook
[root@manager jinja]# ansible-playbook keepalived_if_jinja.yml |
4. 检查 lb01 Master 的 keepalived 配置文件
[root@proxy01 ~]# cat /tmp/keepalived.conf | |
global_defs { | |
router_id proxy01 # 当前物理设备的标识名称 | |
} | |
vrrp_instance VI_1 { | |
state MASTER # 角色状态; | |
priority 200 # 当前物理节点在虚拟路由中的优先级; | |
interface eth0 eth1 # 绑定当前虚拟路由使用的物理接口; | |
virtual_router_id 50 # 当前虚拟路由标识,VRID; | |
advert_int 3 # vrrp 通告时间间隔,默认 1s; | |
authentication { | |
auth_type PASS # 密码类型,简单密码; | |
auth_pass 1111 # 密码不超过 8 位字符; | |
} | |
virtual_ipaddress { | |
10.0.0.100 dev eth0 lable eth0:0 # VIP 地址 | |
} | |
} |
5. 检查 lb02 Backup 的 keepalived 配置文件
[root@proxy02 ~]# cat /tmp/keepalived.conf | |
global_defs { | |
router_id proxy02 # 当前物理设备的标识名称 | |
} | |
vrrp_instance VI_1 { | |
state BACKUP # 角色状态; | |
priority 100 # 当前物理节点在虚拟路由中的优先级; | |
interface eth0 eth1 # 绑定当前虚拟路由使用的物理接口; | |
virtual_router_id 50 # 当前虚拟路由标识,VRID; | |
advert_int 3 # vrrp 通告时间间隔,默认 1s; | |
authentication { | |
auth_type PASS # 密码类型,简单密码; | |
auth_pass 1111 # 密码不超过 8 位字符; | |
} | |
virtual_ipaddress { | |
10.0.0.100 dev eth0 lable eth0:0 # VIP 地址 | |
} | |
} |
# 2.8 案例 3-Jinja2 循环生成 haproxy 配置
1. 使用 Playbook 推送文件
[root@manager jinja]# cat haproxy_for_jinja.yml | |
- hosts: lbservers | |
vars: | |
- ha_domain: ha.oldxu.net | |
- ha_port: 80 | |
- ha_node_port: 80 | |
tasks: | |
- name: Copy | |
template: | |
src: ./haproxy.cfg.j2 | |
dest: /tmp/haproxy.cfg |
2. 准备 nginx.conf.j2 配置文件
[root@manager jinja]# cat haproxy.cfg.j2 | |
#--------------------------------------------------------------------- | |
# Global settings | |
#--------------------------------------------------------------------- | |
global | |
log 127.0.0.1 local2 | |
chroot /var/lib/haproxy | |
pidfile /var/run/haproxy.pid | |
maxconn 4000 | |
user haproxy | |
group haproxy | |
daemon | |
# turn on stats unix socket | |
stats socket /var/lib/haproxy/stats level admin | |
#nbproc 4 | |
nbthread 8 | |
cpu-map 1 0 | |
cpu-map 2 1 | |
cpu-map 3 2 | |
cpu-map 4 3 | |
defaults | |
mode http | |
log global | |
option httplog | |
option dontlognull | |
option http-server-close | |
option forwardfor except 127.0.0.0/8 | |
option redispatch | |
retries 3 | |
timeout http-request 10s | |
timeout queue 1m | |
timeout connect 10s | |
timeout client 1m | |
timeout server 1m | |
timeout http-keep-alive 10s | |
timeout check 10s | |
maxconn 3000 | |
#--------------------------------------------------------------------- | |
# main frontend which proxys to the backends | |
#--------------------------------------------------------------------- | |
#---------------------------------------------------------------- | |
# Listen settings | |
#---------------------------------------------------------------- | |
## | |
listen haproxy-stats_2 | |
bind *:9999 | |
stats enable | |
stats refresh 1s | |
stats hide-version | |
stats uri /haproxy?stats | |
stats realm "HAProxy statistics" | |
stats auth admin:123456 | |
stats admin if TRUE | |
frontend web | |
bind *:<!--swig31--> | |
mode http | |
acl ansible_domain hdr_reg(host) -i ha.oldxu.net | |
use_backend ansible_cluster if ansible_domain | |
backend ansible_cluster | |
balance roundrobin | |
<!--swig32--> |
3. 执行 playbook
[root@manager jinja]# ansible-playbook haproxy_for_jinja.yml |
4. 检查 jinja 模板渲染出来的配置文件
[root@proxy01 ~]# cat /tmp/haproxy.cfg | |
#--------------------------------------------------------------------- | |
# Global settings | |
#--------------------------------------------------------------------- | |
global | |
log 127.0.0.1 local2 | |
chroot /var/lib/haproxy | |
pidfile /var/run/haproxy.pid | |
maxconn 4000 | |
user haproxy | |
group haproxy | |
daemon | |
# turn on stats unix socket | |
stats socket /var/lib/haproxy/stats level admin | |
#nbproc 4 | |
nbthread 8 | |
cpu-map 1 0 | |
cpu-map 2 1 | |
cpu-map 3 2 | |
cpu-map 4 3 | |
defaults | |
mode http | |
log global | |
option httplog | |
option dontlognull | |
option http-server-close | |
option forwardfor except 127.0.0.0/8 | |
option redispatch | |
retries 3 | |
timeout http-request 10s | |
timeout queue 1m | |
timeout connect 10s | |
timeout client 1m | |
timeout server 1m | |
timeout http-keep-alive 10s | |
timeout check 10s | |
maxconn 3000 | |
#--------------------------------------------------------------------- | |
# main frontend which proxys to the backends | |
#--------------------------------------------------------------------- | |
#---------------------------------------------------------------- | |
# Listen settings | |
#---------------------------------------------------------------- | |
## | |
listen haproxy-stats_2 | |
bind *:9999 | |
stats enable | |
stats refresh 1s | |
stats hide-version | |
stats uri /haproxy?stats | |
stats realm "HAProxy statistics" | |
stats auth admin:123456 | |
stats admin if TRUE | |
frontend web | |
bind *:80 | |
mode http | |
acl ansible_domain hdr_reg(host) -i ha.oldxu.net | |
use_backend ansible_cluster if ansible_domain | |
backend ansible_cluster | |
balance roundrobin | |
server 172.16.1.7 172.16.1.7:80 check port 80 inter 3s rise 2 fall 3 | |
server 172.16.1.8 172.16.1.8:80 check port 80 inter 3s rise 2 fall 3 | |
server 172.16.1.9 172.16.1.9:80 check port 80 inter 3s rise 2 fall 3 |
# 3.Ansible Roles
# 3.1 Roles 基本概述
Roles 是组织 Playbook 最好的一种方式,它基于一个已知的文件结构,去自动的加载 vars,tasks 以及 handlers 以便 playbook 更好的调用。 roles 相比 playbook 的结构更加的清晰有层次,但 roles 要比 playbook 稍微麻烦一些;
比如:安装任何软件都需要先安装时间同步服务,那么每个 playbook 都要编写时间同步服务的 task ,会显得整个配置比较臃肿,且难以维护;
如果使用 Role:我们则可以将时间同步服务 task 任务编写好,等到需要使用的时候进行调用就行了,减少重复编写 task 带来的文件臃肿;
# 3.2 Roles 目录结构
roles 官方目录结构,必须按如下方式定义。在每个目录中必须有 main.yml 文件,这些属于强制要求。
[root@manager ~]# cd /etc/ansible/roles/ | |
[root@manager roles]# mkdir {nfs,rsync,web}/{vars,tasks,templates,handlers,files,meta} -p | |
[root@manager roles]# tree nfs/ | |
nfs/ #角色名称 | |
├── files #存放文件 | |
├── handlers #触发任务 | |
├── meta #依赖关系 | |
├── tasks #具体任务 | |
├── templates #模板文件 | |
└── vars #定义变量 |
# 3.3 Roles 依赖关系
roles 允许在使用时自动引入其他 role , role 依赖关系存储在 meta/main.yml 文件中。
例如:安装 wordpress 项目时:
- 1. 需要先确保 nginx 与 php-fpm 的 role 都能正常运行
- 2. 然后在 wordpress 的 role 中定义,依赖关系
- 3. 依赖的 role 有 nginx 以及 php-fpm
# 3.4 案例 1-Roles 部署 NFS
1. 目录结构如下
[root@manager ~]# mkdir nfs-server/{tasks,templates,handlers} -p | |
[root@manager ~]# tree roles/ | |
roles/ | |
├── ansible.cfg | |
├── group_vars | |
│ └── all | |
├── host_group | |
├── nfs-server | |
│ ├── handlers | |
│ │ └── main.yml | |
│ ├── tasks | |
│ │ └── main.yml | |
│ └── templates | |
│ └── exports.j2 | |
└── top.yml |
2. 定义 roles 主机清单
[root@manager roles]# cat host_group | |
[webservers] | |
172.16.1.7 | |
172.16.1.8 | |
[webservers:vars] | |
port=8848 | |
ansible_vars=inventory_Group_vars | |
[dbservers] | |
172.16.1.41 | |
[lbservers] | |
172.16.1.5 | |
172.16.1.6 |
3. 查看 nfs 角色的 tasks 任务
[root@manager roles]# cat nfs-server/tasks/main.yml | |
- name: Installed NFS Server | |
yum: | |
name: nfs-utils | |
state: present | |
- name: Configure NFS Server | |
template: | |
src: exports.j2 | |
dest: /etc/exports | |
notify: Restart NFS Server | |
- name: Create Group | |
group: | |
name: "" | |
gid: "" | |
- name: Create User | |
user: | |
name: "" | |
uid: "" | |
group: "" | |
- name: Create NFS Share Directory | |
file: | |
path: "" | |
state: directory | |
owner: "" | |
group: "" | |
recurse: yes | |
- name: Start NFS Server | |
systemd: | |
name: nfs | |
state: started | |
enabled: yes |
4. 查看 nfs 角色的 handlers
[root@manager roles]# cat nfs-server/handlers/main.yml | |
- name: Restart NFS Server | |
systemd: | |
name: nfs | |
state: restarted |
5. 查看 nfs 角色的 templates 目录
[root@manager roles]# cat nfs-server/templates/exports.j2 | |
### | |
<!--swig41--> <!--swig42-->(rw,all_squash,anonuid=<!--swig43-->,anongid=<!--swig44-->) |
- nfs 对应的变量定义
[root@manager roles]# cat group_vars/all | |
### nfs | |
nfs_share_dir: /data | |
nfs_share_ip_range: 172.16.1.0/24 | |
nfs_uid: 666 | |
nfs_gid: 666 | |
nfs_user: www | |
nfs_group: www |
7. 在 playbook 中使用 role ,指定 nfs 主机组,执行 nfs 服务的 roles
[root@manager roles]# cat top.yml | |
- hosts: webservers | |
roles: | |
- role: nfs-server | |
[root@manager roles]# ansible-playbook top.yml |
# 3.5 案例 2-Roles 部署 Rsync
1. 目录结构如下
[root@manager ~]# mkdir rsync-server/{tasks,templates,handlers} -p | |
[root@manager ~]# tree roles/ | |
roles/ | |
├── ansible.cfg | |
├── group_vars | |
│ └── all | |
├── host_group | |
├── rsync-server | |
│ ├── handlers | |
│ │ └── main.yml | |
│ ├── tasks | |
│ │ └── main.yml | |
│ └── templates | |
│ └── rsyncd.conf.j2 | |
└── top.yml |
2. 定义 roles 主机清单
[root@manager roles]# cat host_group | |
[webservers] | |
172.16.1.7 | |
172.16.1.8 | |
[webservers:vars] | |
port=8848 | |
ansible_vars=inventory_Group_vars | |
[dbservers] | |
172.16.1.41 | |
[lbservers] | |
172.16.1.5 | |
172.16.1.6 |
3. 查看 Rsync 角色的 tasks 任务
[root@manager rsync-server]# cat tasks/main.yml | |
- name: Installed Rsync Server | |
yum: | |
name: rsync | |
state: present | |
- name: Configure Rsync Server | |
template: | |
src: rsyncd.conf.j2 | |
dest: /etc/rsyncd.conf | |
notify: Restart Rsync Server | |
- name: Create Group | |
group: | |
name: "" | |
- name: Create User | |
user: | |
name: "" | |
- name: Rsync Server Virtual User Passwd | |
copy: | |
content: ":redhat" | |
dest: /etc/rsync.passwd | |
owner: root | |
group: root | |
mode: "0600" | |
- name: Rsync Module Path | |
file: | |
path: "/" | |
state: directory | |
owner: "" | |
group: "" | |
mode: "0755" | |
recurse: yes | |
- name: Started Rsync Server | |
systemd: | |
name: rsyncd | |
state: started | |
enabled: yes |
4. 查看 Rsync 角色的 handlers
[root@manager rsync-server]# cat handlers/main.yml | |
- name: Restart Rsync Server | |
systemd: | |
name: rsyncd | |
state: restarted |
5. 查看 Rsync 角色的 templates 目录
[root@manager rsync-server]# cat templates/rsyncd.conf.j2 | |
uid = <!--swig51--> | |
gid = <!--swig52--> | |
port = <!--swig53--> | |
fake super = yes | |
use chroot = no | |
max connections = <!--swig54--> | |
timeout = 600 | |
#ignore errors | |
read only = false | |
list = false | |
auth users = <!--swig55--> | |
secrets file = <!--swig56--> | |
log file = /var/log/rsyncd.log | |
##################################### | |
[<!--swig57-->] | |
path = /<!--swig58--> |
- Rsync 对应的变量定义
[root@manager roles]# cat group_vars/all | |
# rsync | |
rsync_user: ansible_rsync | |
rsync_group: ansible_rsync | |
rsync_port: 873 | |
rsync_connection_max: 3000 | |
rsync_virtual_user: ansible_rsync_backup_2 | |
rsync_virtual_path: /etc/rsync.passwd | |
rsync_module_name: ansible_backup_2 |
7. 在 playbook 中使用 role ,指定 Rsync 主机组,执行 Rsync 服务的 roles
[root@manager roles]# cat top.yml | |
- hosts: webservers | |
roles: | |
- role: nfs-server | |
- hosts: webservers | |
roles: | |
- role: rsync-server | |
tags: rsync | |
[root@manager roles]# ansible-playbook top.yml -t rsync |
