ansible-container
ansible-container copied to clipboard
Run dependant services during build
ISSUE TYPE
- Feature Idea
container.yml
version: "2"
#defaults:
settings:
conductor_base: ubuntu:xenial
services:
db:
from: mysql:5.7
volumes:
- "{{ DOCKER_DATA_ROOT }}/mysql/data:/var/lib/mysql"
environment:
MYSQL_ROOT_PASSWORD: "{{ COMMON_MYSQL_MIGRATE_PASS }}"
MYSQL_DATABASE: "{{ EDXAPP_MYSQL_DB_NAME }}"
MYSQL_USER: "{{ EDXAPP_MYSQL_USER }}"
MYSQL_PASSWORD: "{{ EDXAPP_MYSQL_PASSWORD }}"
mongo:
from: mongo:3.4
command: /entrypoint.sh mongod --auth
volumes:
- "{{ DOCKER_DATA_ROOT }}/mongo/data:/data/db"
roles:
- { role: docker-setup, gather_facts: no }
- common_vars
- mongo_docker
# Need to build our own for ES 0.9
es:
from: ubuntu:xenial # opensaas/edx-elasticsearch
command: ["/usr/share/elasticsearch/bin/elasticsearch","-f"]
volumes:
- "{{ DOCKER_DATA_ROOT }}/elasticsearch/data:/data"
roles:
- { role: docker-setup, gather_facts: no }
- common_vars
- elasticsearch
- { role: oraclejdk, tags: ['install:app-requirements'] }
memcache:
from: memcached:1.4.24
volumes:
- "{{ DOCKER_DATA_ROOT }}/memcache/data:/data"
nginx:
from: nginx
links:
- lms:lms
- cms:cms
# - xqueue:xqueue
- insights:insights
- analytics:analytics
volumes:
- "{{ DOCKER_DATA_ROOT }}:/edx/var"
command: |
/bin/bash -c "nginx -g 'daemon off;'"
ports:
- 6080:80
- 6443:443
- 18000:18000
- 18010:18010
- 18020:18020
- 18040:18040
- 18100:18100
- 18110:18110
roles:
- { role: docker-setup, gather_facts: no }
- common_vars
- role: nginx
nginx_sites:
- cms
- lms
- analytics_api
- insights
- lms-preview
- xqueue
nginx_default_sites:
- lms
- cms
rabbitmq:
from: rabbitmq:3.5.3
volumes:
- "{{ DOCKER_DATA_ROOT }}/rabbitmq/data:/var/lib/rabbitmq"
environment:
RABBITMQ_DEFAULT_USER: "{{ EDXAPP_CELERY_USER }}"
RABBITMQ_DEFAULT_PASS: "{{ EDXAPP_CELERY_PASSWORD }}"
forums:
from: ubuntu:xenial #opensaas/edx-forums
links:
- db:db
# - xqueue:xqueue
- memcache:memcache
- mongo:mongo
- es:es
- rabbitmq:rabbitmq
command: ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"]
depends_on:
- db
- memcache
- mongo
- es
- rabbitmq
volumes:
- "{{ DOCKER_EDX_ROOT }}/cs_comments_service:/edx/app/forum/cs_comments_service"
roles:
- { role: docker-setup, gather_facts: no }
- common_vars
- forum
# xqueue:
# from: ubuntu:xenial #opensaas/edx-xqueue
# links:
# - db:db
# - memcache:memcache
# - mongo:mongo
# - es:es
# - rabbitmq:rabbitmq
# command: ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"]
# volumes:
# - "{{DOCKER_EDX_ROOT}}/xqueue:/edx/app/edxapp/xqueue"
# - /dev/log:/dev/log
lms:
from: ubuntu:xenial #opensaas/edxapp:trusty-v3
links:
- db:db
- forums:forums
# - xqueue:xqueue
- memcache:memcache
- mongo:mongo
- es:es
- rabbitmq:rabbitmq
command: ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"]
depends_on:
- db
- memcache
- mongo
- es
- rabbitmq
volumes:
- "{{ DOCKER_EDX_ROOT }}/edx-platform:/edx/app/edxapp/edx-platform"
- /dev/log:/dev/log
- "{{ DOCKER_DATA_ROOT }}/edxapp:/edx/var/edxapp"
roles:
- { role: docker-setup, gather_facts: no }
- common_vars
- {role: edxapp, service_variants_enabled: ['lms'], migrate_db: 'no'}
cms:
from: ubuntu:xenial #opensaas/edxapp:trusty-v3
links:
- db:db
- forums:forums
# - xqueue:xqueue
- memcache:memcache
- mongo:mongo
- es:es
- rabbitmq:rabbitmq
command: ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"]
depends_on:
- db
- memcache
- mongo
- es
- rabbitmq
volumes:
- "{{DOCKER_EDX_ROOT}}/edx-platform:/edx/app/edxapp/edx-platform"
- /dev/log:/dev/log
- "{{ DOCKER_DATA_ROOT }}/edxapp:/edx/var/edxapp"
roles:
- { role: docker-setup, gather_facts: no }
- common_vars
- { role: 'edxapp', skip_static_remove: True, skip_git: true, service_variants_enabled: ['cms'], migrate_db: 'no' }
- {role: demo, when: edx_install_demo is defined and edx_install_demo, tags: ['install:configuration', 'migrate']}
edxworker:
from: ubuntu:xenial #opensaas/edxapp:trusty-v3
links:
- db:db
- forums:forums
# - xqueue:xqueue
- memcache:memcache
- mongo:mongo
- es:es
- rabbitmq:rabbitmq
command: ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"]
depends_on:
- db
- memcache
- mongo
- es
- rabbitmq
volumes:
- "{{DOCKER_EDX_ROOT}}/edx-platform:/edx/app/edxapp/edx-platform"
- /dev/log:/dev/log
roles:
- { role: docker-setup, gather_facts: no }
- common_vars
- { role: 'edxapp', celery_worker: True, skip_git: true }
analytics:
from: ubuntu:xenial #edxops/trusty-common:v3
links:
- db:db
- forums:forums
# - xqueue:xqueue
- memcache:memcache
- mongo:mongo
- es:es
- rabbitmq:rabbitmq
- insights:insights
command: ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"]
depends_on:
- db
- memcache
- mongo
- es
- rabbitmq
volumes:
- "{{ DOCKER_EDX_ROOT }}/analytics_api:/edx/app/analytics_api"
- "{{ DOCKER_DATA_ROOT }}/analytics_api:/edx/var/analytics_api"
- /dev/log:/dev/log
roles:
- { role: docker-setup, gather_facts: no }
- common_vars
- analytics_api
edxconfig:
from: ubuntu:xenial #edxops/trusty-common:v3
links:
- db:db
- forums:forums
# - xqueue:xqueue
- memcache:memcache
- mongo:mongo
- es:es
command: /bin/false
depends_on:
- db
- memcache
- mongo
- es
- rabbitmq
roles:
- { role: docker-setup, gather_facts: no }
- common_vars
- docker_db_setup
insights:
from: ubuntu:xenial #opensaas/edx-insights
links:
- db:db
- forums:forums
# - xqueue:xqueue
- memcache:memcache
- mongo:mongo
- es:es
- rabbitmq:rabbitmq
command: ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"]
depends_on:
- db
- memcache
- mongo
- es
- rabbitmq
volumes:
- "{{DOCKER_EDX_ROOT}}/insights:/edx/app/insights"
- /dev/log:/dev/log
- "{{ DOCKER_DATA_ROOT }}/insights:/edx/var/insights"
roles:
- { role: docker-setup, gather_facts: no }
- common_vars
- insights
analytics_pipeline:
from: ubuntu:xenial #edxops/trusty-common:v3
links:
- db:db
- forums:forums
# - xqueue:xqueue
- memcache:memcache
- mongo:mongo
- es:es
- rabbitmq:rabbitmq
- insights:insights
command: |
/bin/bash su -m hadoop -c /edx/app/hadoop/hadoop/docker-startup.sh
depends_on:
- db
- memcache
- mongo
- es
- rabbitmq
volumes:
- "{{ DOCKER_EDX_ROOT }}/hadoop:/edx/app/hadoop"
- "{{ DOCKER_DATA_ROOT }}/analytics_pipline:/edx/etc/edx-analytics-pipeline"
- "{{ DOCKER_EDX_ROOT }}/analytics_pipline:/edx/app/edx-analytics-pipeline"
- "{{ DOCKER_DATA_ROOT }}/hadoop:/edx/var/hadoop"
roles:
- { role: docker-setup, gather_facts: no }
- common_vars
- { role: 'hadoop_master', tags: ['install:app-requirements'] }
- { role: 'hive', tags: ['install:app-requirements'] }
- { role: 'sqoop', tags: ['install:app-requirements'] }
- analytics_pipeline
registries: {}
OS / ENVIRONMENT
SUMMARY
In the 0.2 release all the services ran during build which meant I could load required information into a database from a separate service. This does not seam to be possible with the latest release which only runs each service as it is being built. I have tried depends_on but that does not deam to make a difference.
STEPS TO REPRODUCE
Maybe the depends_on tag could make those dependant services run as the primary service is being built. For example my lms service below installs all the required software and then runs a migrate_dbscript, which needs to be run from the lms service, which sets up the database correctly. Currently does not work as the database service is not running while the lms service is being built, but could the depends_on tag actual start the required services?
lms:
from: ubuntu:xenial #opensaas/edxapp:trusty-v3
links:
- db:db
- forums:forums
# - xqueue:xqueue
- memcache:memcache
- mongo:mongo
- es:es
- rabbitmq:rabbitmq
command: ["/edx/app/supervisor/venvs/supervisor/bin/supervisord", "-n", "--configuration", "/edx/app/supervisor/supervisord.conf"]
depends_on:
- db
- memcache
- mongo
- es
- rabbitmq
volumes:
- "{{ DOCKER_EDX_ROOT }}/edx-platform:/edx/app/edxapp/edx-platform"
- /dev/log:/dev/log
- "{{ DOCKER_DATA_ROOT }}/edxapp:/edx/var/edxapp"
roles:
- { role: docker-setup, gather_facts: no }
- common_vars
- {role: edxapp, service_variants_enabled: ['lms'], migrate_db: 'yes'}
EXPECTED RESULTS
ACTUAL RESULTS
Howdy! Thanks for the feedback, and it's awesome you're approaching a-c with such a complete use-case. It's so helpful for us to be able to interact with the diversity of real-world scenarios out there.
I totally get what you're saying, and one of those elements of diversity is how folks go about developing and building their containers. So something maybe you can help me understand: the container instance that you end up applying the migrations to during the build process, the resultant data will either be in a container instance or a volume instance. If you then deploy the images for your app anywhere, the container instances or volume instances on those container platforms won't have the database migrations applied.
Some of the feedback we got in the <0.9 days was that orchestrated builds were uninteresting, because of this problem. Building one image at a time as we're doing is way simpler, and it supports layering because we can start/stop application container instances during build without worrying about resharing volumes or reconnecting network paths.
Can you help me understand your development workflow better so I can try to understand how we might better be able to support it? Thanks!
Thanks! I totally agree that layering approach is a much better way to do it than the pre 0.9 way (I don't have to go through the build process of 9 containers to debug an issue in the 10th!!)
The way I have been using a-c is building and running the full stack on the same instance with the same volumes. A large proportion of my build is setting up data in data volumes. Maybe there is a bit of a redesign that need to happen on my end to try to make things a bit more portable, I haven't really gone down that road as it is not really a requirement for me.
Another way I thought of was some way of running some select tasks, maybe based on tag, on a first-run or after a shipit.
if you are interested or if it is of any help, all my build is located here: https://github.com/OpenSaasAU/edx-docker-ansible see ansible-container0.9 branch
@borisno2 I think I have a similar issue to yours. Before I open an issue for it, I'd like to discuss it a bit here. I am building a set of services that consist of
- a basic container with crond in it to run some python scripts from time to time, and
- a mysqldb container (
from: mysql:5.7)
Since nothing needs to get done to the mysql container, it is not started - and this causes the role for the other container to fail, since there are some tasks which interact with the db (create user, etc).
The way I thought about solving this was to write a role to customise the base image from the registry.
@brucellino happy to be involved in a discussion.. Not sure if your idea would work as each container is run separately (unless something has changed), so even if you run a customisation on your mysql container it will still not be running at the same time as your crond container..
@borisno2 thanks for the response. After bouncing around with this for a few hours, I found @kelseyhightower 's old post on doing 12 factor "right". This convinced me to take another look at the problem.
I re-worked the container.yml to have the db provision itself independently of the other containers, so now I no longer have this issue. Hope that helps anyone watching.
@brucellino Good thinking! Thanks for sharing..
Hi, I've got similar problem. The simplified description is: Ansible-container contains 2 services - each install one application. Call it app_1 and app_2. While installing app_2 the connection to app_1 is validated. But it fails because app_1 container is down during ansible-container build while installing app_2. Consider this build time dependency as a prerequisite that I shall take as it is. I'm aware of the wrong design in this case, but I can't do anything with that, cause it's part of a legacy system. No redesign possible in this case.
Summarizing the problems I want to solve: a/ I want to start a container committed in previous ansible-container build step b/ I want to make the containers communicate between each other during ansible-container build I'd like to ask you for an advice. What are my possibilities?
Here are some related questions specified regarding some of the ways I've tried to solve the problems: 1/ @j00bar wrote ..."we can start/stop application container instances during build"... How can I achieve that during ansible-container build ? 2/ Can I use docker_container ansible module inside ansible-container build ? (At least embedded into one of the roles) 3/ Can I customize a docker network of a containers during ansible-container build? Or at least assign an IP address to a container? ... (The problem I have here is that the containers created during a-c build are assigned to a default docker network. And communication over host=container name not possible in this case. )
Env: ansible-container version 0.9.2 docker-version 1.13.1 ansible 2.4.2.0 Host system: ubuntu 16.04