当前位置: 首页 > news >正文

Ansible - 详解

Ansible - 详解

基于ssh远程命令的部署工具

文章目录

    • 1. 执行任务
    • 2. hadoop+flink安装示例
      • 2.1. 整体目录
      • 2.2. inventories/hosts
      • 2.3. 入口 playbook:site.yml
      • 2.4. Role 1:hadoop
      • 2.5. Role 2:flink
      • 2.6. 一键执行
      • 2.7. 验证

1. 执行任务

ansible-playbook -i host.ini task.yml -t mytag

2. hadoop+flink安装示例

2.1. 整体目录

bigdata-ansible/
├── inventories
│   └── hosts
├── site.yml
└── roles/├── hadoop/│   ├── defaults/main.yml│   ├── tasks/main.yml│   ├── handlers/main.yml│   └── templates/hadoop-env.sh.j2└── flink/├── defaults/main.yml├── tasks/main.yml├── handlers/main.yml└── templates/flink-conf.yaml.j2

2.2. inventories/hosts

[bigdata]
192.168.56.10 ansible_user=ubuntu ansible_become=yes

2.3. 入口 playbook:site.yml

---
- name: 部署 Hadoop
hosts: bigdata
become: yes # 使用root权限,ansible会用sudo执行远程命令
roles:
- hadoop
tags: deploy_hadoop
- name: 部署 Flink
hosts: bigdata
become: yes # 使用root权限,ansible会用sudo执行远程命令
roles:
- flink
tags: deploy_flink

2.4. Role 1:hadoop

  1. roles/hadoop/defaults/main.yml

    用来存放默认变量,优先级最低

    hadoop_version: 3.3.6
    hadoop_home:    /opt/hadoop-{{ hadoop_version }}
    java_home:      /usr/lib/jvm/java-8-openjdk-amd64
    hadoop_user:    hadoop
  2. roles/hadoop/tasks/main.yml

    任务配置

    ---
    - name: 安装 Java8
    apt:
    name: openjdk-8-jdk
    state: present
    update_cache: yes
    - name: 创建 hadoop 用户
    user:
    name: "{{ hadoop_user }}"
    system: yes
    shell: /bin/bash
    home: "/home/{{ hadoop_user }}"
    - name: 下载 Hadoop 二进制包
    get_url:
    url: "https://downloads.apache.org/hadoop/common/hadoop-{{ hadoop_version }}/hadoop-{{ hadoop_version }}.tar.gz"
    dest: /tmp/hadoop-{{ hadoop_version }}.tar.gz
    checksum: sha512:https://downloads.apache.org/hadoop/common/hadoop-{{ hadoop_version }}/hadoop-{{ hadoop_version }}.tar.gz.sha512
    - name: 解压到 /opt
    unarchive:
    src: /tmp/hadoop-{{ hadoop_version }}.tar.gz
    dest: /opt
    remote_src: yes
    owner: root
    group: root
    - name: 创建软链接 /opt/hadoop
    file:
    src: "{{ hadoop_home }}"
    dest: /opt/hadoop
    state: link
    - name: 渲染 hadoop-env.sh
    template:
    src: hadoop-env.sh.j2
    dest: "{{ hadoop_home }}/etc/hadoop/hadoop-env.sh"
    notify: restart hadoop
    - name: 初始化 core-site.xml(伪分布式)
    copy:
    content: |
    <configuration><property><name>fs.defaultFS</name><value>hdfs://localhost:9000</value></property>
    </configuration>
    dest: "{{ hadoop_home }}/etc/hadoop/core-site.xml"
    notify: restart hadoop
    - name: 创建 namenode/datanode 本地目录
    file:
    path: "/data/hdfs/{{ item }}"
    state: directory
    owner: "{{ hadoop_user }}"
    mode: '0755'
    loop:
    - namenode
    - datanode
    - name: 格式化 NameNode
    become_user: "{{ hadoop_user }}"
    shell: "{{ hadoop_home }}/bin/hdfs namenode -format -nonInteractive"
    args:
    creates: /data/hdfs/namenode/current/VERSION
    - name: 启动 NameNode & DataNode
    become_user: "{{ hadoop_user }}"
    shell: "{{ hadoop_home }}/sbin/hadoop-daemon.sh --config {{ hadoop_home }}/etc/hadoop --script hdfs start {{ item }}"
    loop:
    - namenode
    - datanode
    - name: 等待 NameNode 9870 端口
    wait_for:
    port: 9870
    timeout: 60
  3. roles/hadoop/templates/hadoop-env.sh.j2

    模板文件

    export JAVA_HOME={{ java_home }}
    export HADOOP_HOME={{ hadoop_home }}
    export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
    export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
  4. roles/hadoop/handlers/main.yml

    定义可以被任务notify的动作,通常是重启,notify 不会立即执行,而是等 当前 play 所有 task 跑完后 再按顺序去调用 handler。

    - name: restart hadoop
    become_user: "{{ hadoop_user }}"
    shell: |
    {{ hadoop_home }}/sbin/hadoop-daemon.sh stop namenode || true
    {{ hadoop_home }}/sbin/hadoop-daemon.sh stop datanode || true
    {{ hadoop_home }}/sbin/hadoop-daemon.sh start namenode
    {{ hadoop_home }}/sbin/hadoop-daemon.sh start datanode

2.5. Role 2:flink

  1. roles/flink/defaults/main.yml

    flink_version: 1.18.0
    scala_version: 2.12
    flink_name: "flink-{{ flink_version }}-bin-scala_{{ scala_version }}"
    flink_tar: "{{ flink_name }}.tgz"
    flink_home: /opt/{{ flink_name }}
    java_home: /usr/lib/jvm/java-8-openjdk-amd64
  2. roles/flink/tasks/main.yml

    ---
    - name: 下载 Flink 安装包
    get_url:
    url: "https://downloads.apache.org/flink/flink-{{ flink_version }}/{{ flink_tar }}"
    dest: /tmp/{{ flink_tar }}
    checksum: sha512:https://downloads.apache.org/flink/flink-{{ flink_version }}/{{ flink_tar }}.sha512
    - name: 解压到 /opt
    unarchive:
    src: /tmp/{{ flink_tar }}
    dest: /opt
    remote_src: yes
    - name: 创建软链接 /opt/flink
    file:
    src: "{{ flink_home }}"
    dest: /opt/flink
    state: link
    - name: 渲染 flink-conf.yaml
    template:
    src: flink-conf.yaml.j2
    dest: "{{ flink_home }}/conf/flink-conf.yaml"
    notify: restart flink
    - name: 创建 systemd 单元文件
    copy:
    content: |
    [Unit]
    Description=Apache Flink
    After=network.target
    [Service]
    Type=forking
    User=root
    ExecStart={{ flink_home }}/bin/start-cluster.sh
    ExecStop={{ flink_home }}/bin/stop-cluster.sh
    Restart=on-failure
    [Install]
    WantedBy=multi-user.target
    dest: /etc/systemd/system/flink.service
    notify: restart flink
    - name: 启动 Flink 集群
    systemd:
    name: flink
    daemon_reload: yes
    state: started
    enabled: yes
    - name: 等待 JobManager 8081 端口
    wait_for:
    port: 8081
    timeout: 60
  3. roles/flink/templates/flink-conf.yaml.j2

    jobmanager.rpc.address: localhost
    jobmanager.rpc.port: 6123
    taskmanager.numberOfTaskSlots: 4
    parallelism.default: 4
  4. roles/flink/handlers/main.yml

    - name: restart flink
    systemd:
    name: flink
    state: restarted

2.6. 一键执行

```bash
# 1. 安装 Ansible(Ubuntu 示例)
sudo apt update && sudo apt install -y ansible
# 2. 把目录摆好,进入项目根
cd bigdata-ansible
# 3. 跑
ansible-playbook -i inventories/hosts site.yml
```

2.7. 验证

  • Hadoop UI: http://192.168.56.10:9870
  • Flink UI: http://192.168.56.10:8081
http://www.jsqmd.com/news/313109/

相关文章:

  • 【Android毕设全套源码+文档】基于android的交友平台小程序设计与实现(丰富项目+远程调试+讲解+定制)
  • 2026年常州营销策划公司推荐:针对B2B与本地生活场景评价,破解获客难与转化低痛点
  • 2026年净菜生产线厂家权威推荐:净菜流水线厂商引领行业升级!
  • 本地营销服务怎么挑?2026年常州营销策划公司评测与推荐,应对预算与效果痛点
  • 麦德龙购物卡回收流程与规则,警惕“虚高”折扣陷阱
  • 2026年常州营销策划公司专项测评及排名报告:权威选型指引
  • 小程序毕设项目:基于php+微信小程序的学习交流平台(源码+文档,讲解、调试运行,定制等)
  • 【AI+编程】从“敲代码“到“聊代码“,大模型如何重塑编程生态?2025中国AI技术路线全解析
  • 程序员必看!AI大模型无限使用神器,告别付费API,10+生图+15+生视频模型全解锁,代码效率起飞!
  • 【毕业设计】基于php+微信小程序的学习交流平台(源码+文档+远程调试,全bao定制等)
  • [网络] Netplan下vlan配置手册
  • 深度探秘 Apache DolphinScheduler 数据库模式
  • 江苏冷冻离心机生产商哪家性价比高,凯特实验仪器是答案
  • 2026年广东医疗器械铝质氧化优质厂家口碑排名,谁是榜首
  • 宝宝湿疹选对不踩雷:纽强实现从舒缓到维稳一步到位
  • 【读书笔记】《森林帝国》
  • 液体搅拌机实力厂家推荐,哪家更具性价比?
  • 这部电影,让千万青少年看到了自己
  • 耐用型DO溶氧仪哪个品牌好,上海美续测控技术推荐吗
  • 落雪音乐服务器搭建 - AA
  • 【Android毕设源码分享】基于springboot+Android的交友平台小程序的设计与实现(程序+文档+代码讲解+一条龙定制)
  • 盘点全国好用的环保型翻堆机品牌
  • 【Android毕设源码分享】基于springboot+Android的小区废品收购管理系统小程序的设计与实现(程序+文档+代码讲解+一条龙定制)
  • 2026年最新江浙沪皖系统门窗实力厂家选哪家
  • 【Android毕设源码分享】基于springboot+Android的4S店试驾平台小程序的设计与实现(程序+文档+代码讲解+一条龙定制)
  • 【渗透测试】HTB靶场之Forgotten 全过程wp
  • AI 一键生成思维导图:Visual Paradigm Markmap Studio 完全上手指南(附实战案例)
  • 2026最新防火涂料五大实力品牌推荐:钢结构/隧道/电缆防火涂料精准适配多场景,筑牢建筑安全屏障
  • 2026国内卧式拉力试验机领域知名企业盘点,技术实力与服务体系全方位解读
  • 竞品对比分析:大家的系统 vs Reddit Answer