Implement a more dynamic celery queue system

* Meant to be a starting point to more efficiently manage work routing
  and to balance work across all tower nodes
* Integrate flower as a dev tool that starts alongside other nodes.
  Helpful for observing and monitoring the queues/exchanges
* For the moment, force the task manager to only run on one node (not
  sure if this is needed)
* Define queues and routes for all task work
* Bump celery version to 3.1.23
* Expose flower through haproxy
This commit is contained in:
Matthew Jones
2016-09-09 15:18:18 -04:00
parent 13a0fd749f
commit 807cced571
9 changed files with 63 additions and 11 deletions

View File

@@ -378,6 +378,12 @@ honcho:
fi; \
honcho start
flower:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/tower/bin/activate; \
fi; \
$(PYTHON) manage.py celery flower --address=0.0.0.0 --port=5555 --broker=amqp://guest:guest@$(RABBITMQ_HOST):5672//
# Run the built-in development webserver (by default on http://localhost:8013).
runserver:
@if [ "$(VENV_BASE)" ]; then \
@@ -390,7 +396,8 @@ celeryd:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/tower/bin/activate; \
fi; \
$(PYTHON) manage.py celeryd -l DEBUG -B --autoscale=20,2 -Ofair --schedule=$(CELERY_SCHEDULE_FILE)
$(PYTHON) manage.py celeryd -l DEBUG -B --autoscale=20,3 --schedule=$(CELERY_SCHEDULE_FILE) -Q projects,jobs,default
#$(PYTHON) manage.py celery multi show projects jobs default -l DEBUG -Q:projects projects -Q:jobs jobs -Q:default default -c:projects 1 -c:jobs 3 -c:default 3 -Ofair -B --schedule=$(CELERY_SCHEDULE_FILE)
# Run to start the zeromq callback receiver
receiver:
@@ -403,7 +410,11 @@ taskmanager:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/tower/bin/activate; \
fi; \
$(PYTHON) manage.py run_task_system
if [ "$(COMPOSE_HOST)" == "tower_1" ] || [ "$(COMPOSE_HOST)" == "tower" ]; then \
$(PYTHON) manage.py run_task_system; \
else \
while true; do sleep 2; done; \
fi
socketservice:
@if [ "$(VENV_BASE)" ]; then \