feat: remove traefik from balancing
This commit is contained in:
parent
7daf22b5b6
commit
f896569f4c
@ -17,8 +17,8 @@ RUN curl --retry 7 --fail -vo /tmp/consul.zip "https://releases.hashicorp.com/co
|
||||
&& mkdir /config
|
||||
|
||||
# Install ContainerPilot
|
||||
ENV CP_SHA1 1f159207c7dc2b622f693754f6dda77c82a88263
|
||||
ENV CONTAINERPILOT_VERSION 3.1.1
|
||||
ENV CP_SHA1 e27c1b9cd1023e622f77bb19914606dee3c9b22c
|
||||
ENV CONTAINERPILOT_VERSION 3.3.1
|
||||
RUN curl -Lo /tmp/containerpilot.tar.gz "https://github.com/joyent/containerpilot/releases/download/${CONTAINERPILOT_VERSION}/containerpilot-${CONTAINERPILOT_VERSION}.tar.gz" \
|
||||
&& echo "${CP_SHA1} /tmp/containerpilot.tar.gz" | sha1sum -c \
|
||||
&& tar zxf /tmp/containerpilot.tar.gz -C /bin \
|
||||
|
2
docker/api/bootstrap-data.js
vendored
2
docker/api/bootstrap-data.js
vendored
@ -5,7 +5,7 @@ const Url = require('url');
|
||||
const Path = require('path');
|
||||
const Fs = require('fs');
|
||||
|
||||
const Data = require('./lib/data');
|
||||
const Data = require('portal-api/lib/data');
|
||||
|
||||
const {
|
||||
DOCKER_HOST,
|
||||
|
@ -21,7 +21,7 @@
|
||||
"joi": "^10.6.0",
|
||||
"joyent-cp-gql-schema": "^1.2.0",
|
||||
"piloted": "^3.1.1",
|
||||
"portal-api": "^1.2.0",
|
||||
"portal-api": "^1.3.2",
|
||||
"toppsy": "^1.1.0",
|
||||
"triton": "^5.2.0"
|
||||
}
|
||||
|
@ -3,12 +3,8 @@
|
||||
const Brule = require('brule');
|
||||
const Good = require('good');
|
||||
const Hapi = require('hapi');
|
||||
const HapiSwagger = require('hapi-swagger');
|
||||
const Inert = require('inert');
|
||||
const Toppsy = require('toppsy');
|
||||
const Vision = require('vision');
|
||||
const Pack = require('./package');
|
||||
const Portal = require('./lib');
|
||||
const Portal = require('portal-api');
|
||||
const Path = require('path');
|
||||
const Fs = require('fs');
|
||||
const Url = require('url');
|
||||
@ -16,13 +12,6 @@ const Url = require('url');
|
||||
const server = new Hapi.Server();
|
||||
server.connection({ port: 3000 });
|
||||
|
||||
const swaggerOptions = {
|
||||
info: {
|
||||
title: 'Portal API Documentation',
|
||||
version: Pack.version
|
||||
}
|
||||
};
|
||||
|
||||
const {
|
||||
DOCKER_HOST,
|
||||
DOCKER_CERT_PATH,
|
||||
@ -74,7 +63,7 @@ const goodOptions = {
|
||||
{
|
||||
module: 'good-squeeze',
|
||||
name: 'Squeeze',
|
||||
args: [{ log: '*', response: '*', error: '*' }]
|
||||
args: [{ response: '*', error: '*' }]
|
||||
},
|
||||
{
|
||||
module: 'good-console'
|
||||
@ -87,8 +76,6 @@ const goodOptions = {
|
||||
server.register(
|
||||
[
|
||||
Brule,
|
||||
Inert,
|
||||
Vision,
|
||||
{
|
||||
register: Good,
|
||||
options: goodOptions
|
||||
@ -97,10 +84,6 @@ server.register(
|
||||
register: Portal,
|
||||
options: portalOptions
|
||||
},
|
||||
{
|
||||
register: HapiSwagger,
|
||||
options: swaggerOptions
|
||||
},
|
||||
{
|
||||
register: Toppsy,
|
||||
options: { namespace: 'portal', subsystem: 'api' }
|
||||
|
@ -11,10 +11,10 @@ RUN export CONSUL_VERSION=0.7.0 \
|
||||
&& mkdir /config
|
||||
|
||||
# Add Containerpilot and set its configuration
|
||||
ENV CONTAINERPILOT_VERSION 3.1.1
|
||||
ENV CONTAINERPILOT_VERSION 3.3.1
|
||||
ENV CONTAINERPILOT /etc/containerpilot.json
|
||||
|
||||
RUN export CONTAINERPILOT_CHECKSUM=1f159207c7dc2b622f693754f6dda77c82a88263 \
|
||||
RUN export CONTAINERPILOT_CHECKSUM=e27c1b9cd1023e622f77bb19914606dee3c9b22c \
|
||||
&& export archive=containerpilot-${CONTAINERPILOT_VERSION}.tar.gz \
|
||||
&& curl -Lso /tmp/${archive} \
|
||||
"https://github.com/joyent/containerpilot/releases/download/${CONTAINERPILOT_VERSION}/${archive}" \
|
||||
|
@ -9,6 +9,11 @@ RUN set -x \
|
||||
&& apk upgrade \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
# Use consul-template to re-write our Nginx virtualhost config
|
||||
RUN curl -Lo /tmp/consul_template_0.15.0_linux_amd64.zip https://releases.hashicorp.com/consul-template/0.15.0/consul-template_0.15.0_linux_amd64.zip && \
|
||||
unzip /tmp/consul_template_0.15.0_linux_amd64.zip && \
|
||||
mv consul-template /bin
|
||||
|
||||
# Install Consul agent
|
||||
ENV CONSUL_VERSION 0.7.0
|
||||
ENV CONSUL_CHECKSUM b350591af10d7d23514ebaa0565638539900cdb3aaa048f077217c4c46653dd8
|
||||
@ -19,8 +24,8 @@ RUN curl --retry 7 --fail -vo /tmp/consul.zip "https://releases.hashicorp.com/co
|
||||
&& mkdir /config
|
||||
|
||||
# Install ContainerPilot
|
||||
ENV CP_SHA1 1f159207c7dc2b622f693754f6dda77c82a88263
|
||||
ENV CONTAINERPILOT_VERSION 3.1.1
|
||||
ENV CP_SHA1 e27c1b9cd1023e622f77bb19914606dee3c9b22c
|
||||
ENV CONTAINERPILOT_VERSION 3.3.1
|
||||
RUN curl -Lo /tmp/containerpilot.tar.gz "https://github.com/joyent/containerpilot/releases/download/${CONTAINERPILOT_VERSION}/containerpilot-${CONTAINERPILOT_VERSION}.tar.gz" \
|
||||
&& echo "${CP_SHA1} /tmp/containerpilot.tar.gz" | sha1sum -c \
|
||||
&& tar zxf /tmp/containerpilot.tar.gz -C /bin \
|
||||
|
32
docker/frontend/bin/reload-nginx.sh
Executable file
32
docker/frontend/bin/reload-nginx.sh
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Render Nginx configuration template using values from Consul,
|
||||
# but do not reload because Nginx has't started yet
|
||||
preStart() {
|
||||
consul-template \
|
||||
-once \
|
||||
-consul localhost:8500 \
|
||||
-template "/etc/nginx/nginx.conf.tmpl:/etc/nginx/nginx.conf"
|
||||
}
|
||||
|
||||
# Render Nginx configuration template using values from Consul,
|
||||
# then gracefully reload Nginx
|
||||
onChange() {
|
||||
consul-template \
|
||||
-once \
|
||||
-consul localhost:8500 \
|
||||
-template "/etc/nginx/nginx.conf.tmpl:/etc/nginx/nginx.conf:nginx -s reload"
|
||||
}
|
||||
|
||||
until
|
||||
cmd=$1
|
||||
if [ -z "$cmd" ]; then
|
||||
onChange
|
||||
fi
|
||||
shift 1
|
||||
$cmd "$@"
|
||||
[ "$?" -ne 127 ]
|
||||
do
|
||||
onChange
|
||||
exit
|
||||
done
|
@ -1,31 +1,6 @@
|
||||
{
|
||||
consul: 'localhost:8500',
|
||||
jobs: [
|
||||
{
|
||||
name: 'config-nginx',
|
||||
exec: 'containerpilot -config /etc/nginx/nginx.conf.tmpl -template -out /etc/nginx/nginx.conf'
|
||||
},
|
||||
{
|
||||
name: 'cp-frontend',
|
||||
port: {{.PORT}},
|
||||
exec: 'nginx',
|
||||
interfaces: ["eth0", "eth1"],
|
||||
restarts: 'unlimited',
|
||||
when: {
|
||||
source: 'config-nginx',
|
||||
once: 'exitSuccess'
|
||||
},
|
||||
health: {
|
||||
exec: '/usr/bin/curl -o /dev/null --fail -s http://localhost:{{.PORT}}',
|
||||
interval: 5,
|
||||
ttl: 25
|
||||
},
|
||||
tags: [
|
||||
'traefik.backend=cp-frontend',
|
||||
'traefik.frontend.rule=PathPrefix:/',
|
||||
'traefik.frontend.entryPoints={{ .ENTRYPOINTS | default "http,ws,wss" }}'
|
||||
]
|
||||
},
|
||||
{
|
||||
name: 'consul-agent',
|
||||
exec: ['/usr/local/bin/consul', 'agent',
|
||||
@ -36,8 +11,45 @@
|
||||
'-retry-join', '{{ .CONSUL | default "consul" }}',
|
||||
'-retry-max', '10',
|
||||
'-retry-interval', '10s'],
|
||||
health: {
|
||||
exec: '/usr/bin/curl -o /dev/null --fail -s http://localhost:8500',
|
||||
interval: 5,
|
||||
ttl: 25
|
||||
},
|
||||
restarts: 'unlimited'
|
||||
},
|
||||
{
|
||||
name: "preStart",
|
||||
exec: "/bin/reload-nginx.sh preStart",
|
||||
when: {
|
||||
source: 'consul-agent',
|
||||
once: 'healthy'
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'cp-frontend',
|
||||
port: {{.PORT}},
|
||||
exec: 'nginx',
|
||||
interfaces: ["eth0", "eth1"],
|
||||
restarts: 'unlimited',
|
||||
when: {
|
||||
source: 'preStart',
|
||||
once: 'exitSuccess'
|
||||
},
|
||||
health: {
|
||||
exec: '/usr/bin/curl -o /dev/null --fail -s http://localhost:{{.PORT}}',
|
||||
interval: 5,
|
||||
ttl: 25
|
||||
}
|
||||
},
|
||||
{
|
||||
name: "onchange-api",
|
||||
exec: "/bin/reload-nginx.sh onChange",
|
||||
when: {
|
||||
source: "watch.api",
|
||||
each: "changed"
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'sensor_memory_usage',
|
||||
exec: '/bin/sensors.sh memory',
|
||||
@ -75,6 +87,12 @@
|
||||
restarts: 'unlimited'
|
||||
}
|
||||
],
|
||||
watches: [
|
||||
{
|
||||
name: 'api',
|
||||
interval: 3
|
||||
}
|
||||
],
|
||||
telemetry: {
|
||||
port: 9090,
|
||||
tags: ['op'],
|
||||
|
@ -17,85 +17,100 @@ include /etc/nginx/modules/*.conf;
|
||||
|
||||
|
||||
events {
|
||||
# The maximum number of simultaneous connections that can be opened by
|
||||
# a worker process.
|
||||
worker_connections 1024;
|
||||
# The maximum number of simultaneous connections that can be opened by
|
||||
# a worker process.
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
index index.html index.htm;
|
||||
server {
|
||||
server_name _;
|
||||
listen {{ .PORT | default "80" }} default_server;
|
||||
listen [::]:{{ .PORT | default "80" }} default_server;
|
||||
root /opt/app/packages/cp-frontend/build;
|
||||
location / {
|
||||
index index.html index.htm;
|
||||
|
||||
{{ if service "api" }}
|
||||
upstream api_hosts {
|
||||
{{range service "api"}}
|
||||
server {{.Address}}:{{.Port}};
|
||||
{{end}}
|
||||
}{{ end }}
|
||||
|
||||
server {
|
||||
server_name _;
|
||||
listen {{ env "PORT" }} default_server;
|
||||
listen [::]:{{ env "PORT" }} default_server;
|
||||
root /opt/app/packages/cp-frontend/build;
|
||||
location / {
|
||||
try_files $uri /index.html;
|
||||
}
|
||||
}
|
||||
# Includes mapping of file name extensions to MIME types of responses
|
||||
# and defines the default type.
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
location /api {
|
||||
rewrite /api/(.*) /$1 break;
|
||||
proxy_pass http://api_hosts;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
}
|
||||
|
||||
# Name servers used to resolve names of upstream servers into addresses.
|
||||
# It's also needed when using tcpsocket and udpsocket in Lua modules.
|
||||
#resolver 208.67.222.222 208.67.220.220;
|
||||
# Includes mapping of file name extensions to MIME types of responses
|
||||
# and defines the default type.
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
# Don't tell nginx version to clients.
|
||||
server_tokens off;
|
||||
# Name servers used to resolve names of upstream servers into addresses.
|
||||
# It's also needed when using tcpsocket and udpsocket in Lua modules.
|
||||
#resolver 208.67.222.222 208.67.220.220;
|
||||
|
||||
# Specifies the maximum accepted body size of a client request, as
|
||||
# indicated by the request header Content-Length. If the stated content
|
||||
# length is greater than this size, then the client receives the HTTP
|
||||
# error code 413. Set to 0 to disable.
|
||||
client_max_body_size 1m;
|
||||
# Don't tell nginx version to clients.
|
||||
server_tokens off;
|
||||
|
||||
# Timeout for keep-alive connections. Server will close connections after
|
||||
# this time.
|
||||
keepalive_timeout 65;
|
||||
# Specifies the maximum accepted body size of a client request, as
|
||||
# indicated by the request header Content-Length. If the stated content
|
||||
# length is greater than this size, then the client receives the HTTP
|
||||
# error code 413. Set to 0 to disable.
|
||||
client_max_body_size 1m;
|
||||
|
||||
# Sendfile copies data between one FD and other from within the kernel,
|
||||
# which is more efficient than read() + write().
|
||||
sendfile on;
|
||||
# Timeout for keep-alive connections. Server will close connections after
|
||||
# this time.
|
||||
keepalive_timeout 65;
|
||||
|
||||
# Don't buffer data-sends (disable Nagle algorithm).
|
||||
# Good for sending frequent small bursts of data in real time.
|
||||
tcp_nodelay on;
|
||||
# Sendfile copies data between one FD and other from within the kernel,
|
||||
# which is more efficient than read() + write().
|
||||
sendfile on;
|
||||
|
||||
# Causes nginx to attempt to send its HTTP response head in one packet,
|
||||
# instead of using partial frames.
|
||||
#tcp_nopush on;
|
||||
# Don't buffer data-sends (disable Nagle algorithm).
|
||||
# Good for sending frequent small bursts of data in real time.
|
||||
tcp_nodelay on;
|
||||
|
||||
# Path of the file with Diffie-Hellman parameters for EDH ciphers.
|
||||
#ssl_dhparam /etc/ssl/nginx/dh2048.pem;
|
||||
# Causes nginx to attempt to send its HTTP response head in one packet,
|
||||
# instead of using partial frames.
|
||||
#tcp_nopush on;
|
||||
|
||||
# Specifies that our cipher suits should be preferred over client ciphers.
|
||||
ssl_prefer_server_ciphers on;
|
||||
# Path of the file with Diffie-Hellman parameters for EDH ciphers.
|
||||
#ssl_dhparam /etc/ssl/nginx/dh2048.pem;
|
||||
|
||||
# Enables a shared SSL cache with size that can hold around 8000 sessions.
|
||||
ssl_session_cache shared:SSL:2m;
|
||||
# Specifies that our cipher suits should be preferred over client ciphers.
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
# Enables a shared SSL cache with size that can hold around 8000 sessions.
|
||||
ssl_session_cache shared:SSL:2m;
|
||||
|
||||
|
||||
# Enable gzipping of responses.
|
||||
#gzip on;
|
||||
# Enable gzipping of responses.
|
||||
#gzip on;
|
||||
|
||||
# Set the Vary HTTP header as defined in the RFC 2616.
|
||||
gzip_vary on;
|
||||
# Set the Vary HTTP header as defined in the RFC 2616.
|
||||
gzip_vary on;
|
||||
|
||||
# Enable checking the existence of precompressed files.
|
||||
#gzip_static on;
|
||||
# Enable checking the existence of precompressed files.
|
||||
#gzip_static on;
|
||||
|
||||
|
||||
# Specifies the main log format.
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
# Specifies the main log format.
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
# Sets the path, format, and configuration for a buffered log write.
|
||||
access_log /var/log/nginx/access.log main;
|
||||
# Sets the path, format, and configuration for a buffered log write.
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
|
||||
# Includes virtual hosts configs.
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
# Includes virtual hosts configs.
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
}
|
||||
|
@ -53,22 +53,13 @@ frontend:
|
||||
- consul:consul
|
||||
environment:
|
||||
- CONSUL=consul
|
||||
- PORT=3069
|
||||
- PORT=80
|
||||
- REACT_APP_GQL_HOSTNAME=localhost
|
||||
- REACT_APP_GQL_PORT=80
|
||||
expose:
|
||||
- 3069
|
||||
|
||||
traefik:
|
||||
image: d0cker/traefik
|
||||
ports:
|
||||
- "80:80"
|
||||
- "8080:8080"
|
||||
links:
|
||||
- consul:consul
|
||||
environment:
|
||||
- CONSUL=consul
|
||||
restart: always
|
||||
dns:
|
||||
- 127.0.0.1
|
||||
|
||||
|
||||
#############################################################################
|
||||
@ -108,7 +99,7 @@ rethinkdb:
|
||||
restart: always
|
||||
mem_limit: 1g
|
||||
ports:
|
||||
- 8081:8080
|
||||
- 8080:8080
|
||||
expose:
|
||||
- 28015
|
||||
- 29015
|
||||
|
@ -90,6 +90,7 @@ class Data extends EventEmitter {
|
||||
this._docker = new Dockerode(settings.docker);
|
||||
this._machines = null;
|
||||
this._triton = null;
|
||||
this._server = settings.server;
|
||||
|
||||
Triton.createClient({
|
||||
profile: settings.triton
|
||||
@ -252,7 +253,7 @@ class Data extends EventEmitter {
|
||||
|
||||
createDeploymentGroup (clientDeploymentGroup, cb) {
|
||||
const dg = Transform.toDeploymentGroup(clientDeploymentGroup);
|
||||
console.log(`-> creating DeploymentGroup: ${Util.inspect(dg)}`);
|
||||
this._server.log(['debug'], `-> creating DeploymentGroup: ${Util.inspect(dg)}`);
|
||||
|
||||
this._db.deployment_groups.query({
|
||||
slug: dg.slug
|
||||
@ -278,7 +279,7 @@ class Data extends EventEmitter {
|
||||
|
||||
updateDeploymentGroup (clientDeploymentGroup, cb) {
|
||||
const dg = Transform.toDeploymentGroup(clientDeploymentGroup);
|
||||
console.log(`-> updating DeploymentGroup: ${Util.inspect(dg)}`);
|
||||
this._server.log(['debug'], `-> updating DeploymentGroup: ${Util.inspect(dg)}`);
|
||||
|
||||
this._db.deployment_groups.update([dg], (err) => {
|
||||
if (err) {
|
||||
@ -527,7 +528,7 @@ class Data extends EventEmitter {
|
||||
Hoek.assert(clientVersion.manifest, 'manifest is required');
|
||||
Hoek.assert(clientVersion.deploymentGroupId, 'deploymentGroupId is required');
|
||||
|
||||
console.log(`-> creating new Version for DeploymentGroup ${clientVersion.deploymentGroupId}: ${Util.inspect(clientVersion)}`);
|
||||
this._server.log(['debug'], `-> creating new Version for DeploymentGroup ${clientVersion.deploymentGroupId}: ${Util.inspect(clientVersion)}`);
|
||||
|
||||
const version = Transform.toVersion(clientVersion);
|
||||
this._db.versions.insert(version, (err, key) => {
|
||||
@ -535,7 +536,7 @@ class Data extends EventEmitter {
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
console.log(`-> new Version for DeploymentGroup ${clientVersion.deploymentGroupId} created: ${key}`);
|
||||
this._server.log(['debug'], `-> new Version for DeploymentGroup ${clientVersion.deploymentGroupId} created: ${key}`);
|
||||
this._db.deployment_groups.query({
|
||||
id: clientVersion.deploymentGroupId
|
||||
}, (err, deploymentGroup) => {
|
||||
@ -551,7 +552,7 @@ class Data extends EventEmitter {
|
||||
[]
|
||||
};
|
||||
|
||||
console.log(`-> updating DeploymentGroup ${clientVersion.deploymentGroupId} to add Version ${key}`);
|
||||
this._server.log(['debug'], `-> updating DeploymentGroup ${clientVersion.deploymentGroupId} to add Version ${key}`);
|
||||
|
||||
this._db.deployment_groups.update([changes], (err) => {
|
||||
if (err) {
|
||||
@ -715,7 +716,7 @@ class Data extends EventEmitter {
|
||||
isHandled: false
|
||||
};
|
||||
|
||||
console.log('-> scale request received');
|
||||
this._server.log(['debug'], '-> scale request received');
|
||||
|
||||
const handleFailedScale = (err1, cb) => {
|
||||
if (err1) {
|
||||
@ -747,7 +748,7 @@ class Data extends EventEmitter {
|
||||
|
||||
ctx.isHandled = true;
|
||||
|
||||
console.log(`-> got response from docker-compose to scale ${ctx.service.name} to ${replicas} replicas`);
|
||||
this._server.log(['debug'], `-> got response from docker-compose to scale ${ctx.service.name} to ${replicas} replicas`);
|
||||
};
|
||||
|
||||
const triggerScale = (err, newVersion) => {
|
||||
@ -755,12 +756,12 @@ class Data extends EventEmitter {
|
||||
return handleFailedScale(err, cb);
|
||||
}
|
||||
|
||||
console.log('-> new Version created');
|
||||
this._server.log(['debug'], '-> new Version created');
|
||||
|
||||
cb(null, newVersion);
|
||||
|
||||
setImmediate(() => {
|
||||
console.log(`-> requesting docker-compose to scale ${ctx.service.name} to ${replicas} replicas`);
|
||||
this._server.log(['debug'], `-> requesting docker-compose to scale ${ctx.service.name} to ${replicas} replicas`);
|
||||
|
||||
this._dockerCompose.scale({
|
||||
projectName: ctx.deploymentGroup.name,
|
||||
@ -803,7 +804,7 @@ class Data extends EventEmitter {
|
||||
hasPlan: true
|
||||
};
|
||||
|
||||
console.log(`-> creating new Version for DOWN scale ${Util.inspect(payload)}`);
|
||||
this._server.log(['debug'], `-> creating new Version for DOWN scale ${Util.inspect(payload)}`);
|
||||
|
||||
// note: createVersion updates deploymentGroup
|
||||
this.createVersion(payload, triggerScale);
|
||||
@ -826,7 +827,7 @@ class Data extends EventEmitter {
|
||||
hasPlan: true
|
||||
};
|
||||
|
||||
console.log(`-> creating new Version for UP scale ${Util.inspect(payload)}`);
|
||||
this._server.log(['debug'], `-> creating new Version for UP scale ${Util.inspect(payload)}`);
|
||||
|
||||
// note: createVersion updates deploymentGroup
|
||||
this.createVersion(payload, triggerScale);
|
||||
@ -837,7 +838,7 @@ class Data extends EventEmitter {
|
||||
return handleFailedScale(err, cb);
|
||||
}
|
||||
|
||||
console.log(`-> got current scale ${Util.inspect(currentScale)}`);
|
||||
this._server.log(['debug'], `-> got current scale ${Util.inspect(currentScale)}`);
|
||||
|
||||
const serviceReplicas = Find(currentScale, ['serviceName', ctx.service.name]).replicas;
|
||||
const serviceScale = Number.isFinite(serviceReplicas) ? serviceReplicas : 1;
|
||||
@ -869,7 +870,7 @@ class Data extends EventEmitter {
|
||||
|
||||
ctx.manifest = manifest;
|
||||
|
||||
console.log('-> fetching current scale');
|
||||
this._server.log(['debug'], '-> fetching current scale');
|
||||
|
||||
this._getCurrentScale({
|
||||
deploymentGroupName: ctx.deploymentGroup.name,
|
||||
@ -891,7 +892,7 @@ class Data extends EventEmitter {
|
||||
|
||||
ctx.version = version;
|
||||
|
||||
console.log(`-> fetching Manifest ${version.manifest_id}`);
|
||||
this._server.log(['debug'], `-> fetching Manifest ${version.manifest_id}`);
|
||||
|
||||
this._db.manifests.single({
|
||||
id: version.manifest_id
|
||||
@ -909,7 +910,7 @@ class Data extends EventEmitter {
|
||||
|
||||
ctx.deploymentGroup = deploymentGroup;
|
||||
|
||||
console.log(`-> fetching Version ${ctx.deploymentGroup.version_id}`);
|
||||
this._server.log(['debug'], `-> fetching Version ${ctx.deploymentGroup.version_id}`);
|
||||
|
||||
this._db.versions.single({
|
||||
id: deploymentGroup.version_id
|
||||
@ -921,11 +922,11 @@ class Data extends EventEmitter {
|
||||
return handleFailedScale(err, cb);
|
||||
}
|
||||
|
||||
console.log(`-> got ${instances.length} Instances from ${ctx.service.name}`);
|
||||
this._server.log(['debug'], `-> got ${instances.length} Instances from ${ctx.service.name}`);
|
||||
|
||||
ctx.instances = instances;
|
||||
|
||||
console.log(`-> fetching DeploymentGroup ${ctx.service.deployment_group_id}`);
|
||||
this._server.log(['debug'], `-> fetching DeploymentGroup ${ctx.service.deployment_group_id}`);
|
||||
|
||||
this._db.deployment_groups.single({
|
||||
id: ctx.service.deployment_group_id
|
||||
@ -937,7 +938,7 @@ class Data extends EventEmitter {
|
||||
return handleFailedScale(err, cb);
|
||||
}
|
||||
|
||||
console.log(`-> fetching Instances from ${ctx.service.name}`);
|
||||
this._server.log(['debug'], `-> fetching Instances from ${ctx.service.name}`);
|
||||
|
||||
this.getInstances({ ids: ctx.service.instance_ids }, handleInstances);
|
||||
};
|
||||
@ -957,7 +958,7 @@ class Data extends EventEmitter {
|
||||
|
||||
ctx.service = service;
|
||||
|
||||
console.log(`-> fetching DeploymentGroup ${service.deployment_group_id}`);
|
||||
this._server.log(['debug'], `-> fetching DeploymentGroup ${service.deployment_group_id}`);
|
||||
|
||||
this.updateService({
|
||||
id: serviceId,
|
||||
@ -965,7 +966,7 @@ class Data extends EventEmitter {
|
||||
}, handleUpdatedService);
|
||||
};
|
||||
|
||||
console.log(`-> fetching Service ${serviceId}`);
|
||||
this._server.log(['debug'], `-> fetching Service ${serviceId}`);
|
||||
|
||||
this._db.services.single({ id: serviceId }, handleService);
|
||||
}
|
||||
@ -992,7 +993,7 @@ class Data extends EventEmitter {
|
||||
isHandled: false
|
||||
};
|
||||
|
||||
console.log('-> provision request received');
|
||||
this._server.log(['debug'], '-> provision request received');
|
||||
|
||||
const handleFailedProvision = (err) => {
|
||||
if (!err) {
|
||||
@ -1027,7 +1028,7 @@ class Data extends EventEmitter {
|
||||
|
||||
const services = ForceArray(result.successes);
|
||||
|
||||
console.log(`-> got a map of Service's-Instance's from DeploymentGroup ${ctx.currentDeploymentGroup.id} ${Util.inspect(services)}`);
|
||||
this._server.log(['debug'], `-> got a map of Service's-Instance's from DeploymentGroup ${ctx.currentDeploymentGroup.id} ${Util.inspect(services)}`);
|
||||
|
||||
const plan = Flatten(services.map(({ name, instances }) => {
|
||||
const provision = ctx.provisionRes[name];
|
||||
@ -1092,7 +1093,7 @@ class Data extends EventEmitter {
|
||||
VAsync.parallel({
|
||||
funcs: [
|
||||
(cb) => {
|
||||
console.log(`-> updating Version ${ctx.newVersion.id} from DeploymentGroup ${ctx.currentDeploymentGroup.id} with new Plan ${Util.inspect(plan)}`);
|
||||
this._server.log(['debug'], `-> updating Version ${ctx.newVersion.id} from DeploymentGroup ${ctx.currentDeploymentGroup.id} with new Plan ${Util.inspect(plan)}`);
|
||||
return this.updateVersion({
|
||||
id: ctx.newVersion.id,
|
||||
hasPlan: true,
|
||||
@ -1100,7 +1101,7 @@ class Data extends EventEmitter {
|
||||
}, cb);
|
||||
},
|
||||
(cb) => {
|
||||
console.log(`-> updating DeploymentGroup ${ctx.currentDeploymentGroup.id} with new Service's ${Util.inspect(ctx.newServices)} and ACTIVE status`);
|
||||
this._server.log(['debug'], `-> updating DeploymentGroup ${ctx.currentDeploymentGroup.id} with new Service's ${Util.inspect(ctx.newServices)} and ACTIVE status`);
|
||||
|
||||
const services = UniqBy(
|
||||
ForceArray(ctx.newServices)
|
||||
@ -1124,8 +1125,8 @@ class Data extends EventEmitter {
|
||||
return handleFailedProvision(err);
|
||||
}
|
||||
|
||||
console.log(`-> marked removed Service's with DELETING from DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
console.log(`-> fetching a map of Service's-Instance's from DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
this._server.log(['debug'], `-> marked removed Service's with DELETING from DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
this._server.log(['debug'], `-> fetching a map of Service's-Instance's from DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
|
||||
VAsync.forEachParallel({
|
||||
inputs: ctx.previousServices,
|
||||
@ -1149,7 +1150,7 @@ class Data extends EventEmitter {
|
||||
return handleFailedProvision(err);
|
||||
}
|
||||
|
||||
console.log(`-> identified previous Service's from DeploymentGroup ${ctx.currentDeploymentGroup.id} ${Util.inspect(ctx.previousServices)}`);
|
||||
this._server.log(['debug'], `-> identified previous Service's from DeploymentGroup ${ctx.currentDeploymentGroup.id} ${Util.inspect(ctx.previousServices)}`);
|
||||
|
||||
ctx.previousServices = previousServices;
|
||||
|
||||
@ -1158,12 +1159,12 @@ class Data extends EventEmitter {
|
||||
return !Find(ctx.newServices, ['name', name]);
|
||||
});
|
||||
|
||||
console.log(`-> identified removed Service's from DeploymentGroup ${ctx.currentDeploymentGroup.id} ${Util.inspect(ctx.removedServices)}`);
|
||||
this._server.log(['debug'], `-> identified removed Service's from DeploymentGroup ${ctx.currentDeploymentGroup.id} ${Util.inspect(ctx.removedServices)}`);
|
||||
|
||||
VAsync.forEachParallel({
|
||||
inputs: ctx.removedServices,
|
||||
func: ({ id, name }, next) => {
|
||||
console.log(`-> marking Service ${name} as DELETING from DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
this._server.log(['debug'], `-> marking Service ${name} as DELETING from DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
this.updateService({
|
||||
id,
|
||||
status: 'DELETING'
|
||||
@ -1180,18 +1181,18 @@ class Data extends EventEmitter {
|
||||
|
||||
ctx.newServices = ForceArray(result.successes);
|
||||
|
||||
console.log(`-> got "${ctx.newServices.length}" Services provisioned from DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
this._server.log(['debug'], `-> got "${ctx.newServices.length}" Services provisioned from DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
|
||||
ctx.currentDeploymentGroup.services({}, handlePreviousServices);
|
||||
};
|
||||
|
||||
const createProvisionService = ({ payload }, cb) => {
|
||||
console.log(`-> creating Service "${payload.name}" from DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
this._server.log(['debug'], `-> creating Service "${payload.name}" from DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
this.createService(payload, cb);
|
||||
};
|
||||
|
||||
const updateProvisionService = ({ payload, serviceId }, cb) => {
|
||||
console.log(`-> updating Service "${payload.name}" from DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
this._server.log(['debug'], `-> updating Service "${payload.name}" from DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
this.updateService(Object.assign({}, payload, {
|
||||
id: serviceId
|
||||
}), cb);
|
||||
@ -1199,7 +1200,7 @@ class Data extends EventEmitter {
|
||||
|
||||
// 10. on each service, either create or update it with new status and hash
|
||||
const handleProvisionService = (serviceName, next) => {
|
||||
console.log(`-> handling Service "${serviceName}" from DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
this._server.log(['debug'], `-> handling Service "${serviceName}" from DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
|
||||
this.getServices({
|
||||
name: serviceName,
|
||||
@ -1209,7 +1210,7 @@ class Data extends EventEmitter {
|
||||
return next(err);
|
||||
}
|
||||
|
||||
console.log(`-> got ${services.length} services with name ${serviceName} from DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
this._server.log(['debug'], `-> got ${services.length} services with name ${serviceName} from DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
|
||||
const provision = ctx.provisionRes[serviceName];
|
||||
const action = Get(provision, 'plan.action', 'noop').toUpperCase();
|
||||
@ -1245,7 +1246,7 @@ class Data extends EventEmitter {
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`-> got response from provision ${Util.inspect(provisionRes)}`);
|
||||
this._server.log(['debug'], `-> got response from provision ${Util.inspect(provisionRes)}`);
|
||||
|
||||
ctx.isHandled = true;
|
||||
ctx.provisionRes = provisionRes;
|
||||
@ -1272,7 +1273,7 @@ class Data extends EventEmitter {
|
||||
cb(null, ctx.newVersion);
|
||||
|
||||
setImmediate(() => {
|
||||
console.log(`-> requesting docker-compose provision for DeploymentGroup ${ctx.currentDeploymentGroup.name}`);
|
||||
this._server.log(['debug'], `-> requesting docker-compose provision for DeploymentGroup ${ctx.currentDeploymentGroup.name}`);
|
||||
|
||||
this._dockerCompose.provision({
|
||||
projectName: ctx.currentDeploymentGroup.name,
|
||||
@ -1290,7 +1291,7 @@ class Data extends EventEmitter {
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
console.log(`-> got current scale ${Util.inspect(currentScale)}`);
|
||||
this._server.log(['debug'], `-> got current scale ${Util.inspect(currentScale)}`);
|
||||
|
||||
ctx.currentScale = currentScale;
|
||||
|
||||
@ -1311,9 +1312,9 @@ class Data extends EventEmitter {
|
||||
}
|
||||
|
||||
if (!currentVersion) {
|
||||
console.log(`-> detected first provision for DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
this._server.log(['debug'], `-> detected first provision for DeploymentGroup ${ctx.currentDeploymentGroup.id}`);
|
||||
} else {
|
||||
console.log(`-> creating new Version based on old Version ${currentVersion.id}`);
|
||||
this._server.log(['debug'], `-> creating new Version based on old Version ${currentVersion.id}`);
|
||||
}
|
||||
|
||||
ctx.currentVersion = currentVersion;
|
||||
@ -1332,7 +1333,7 @@ class Data extends EventEmitter {
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
console.log(`-> fetching current version for ${ctx.currentDeploymentGroup.id}`);
|
||||
this._server.log(['debug'], `-> fetching current version for ${ctx.currentDeploymentGroup.id}`);
|
||||
|
||||
ctx.newManifest = newManifest;
|
||||
ctx.currentDeploymentGroup.version(null, handleCurrentVersion);
|
||||
@ -1345,7 +1346,7 @@ class Data extends EventEmitter {
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
console.log(`-> got docker-compose config ${Util.inspect(config)}`);
|
||||
this._server.log(['debug'], `-> got docker-compose config ${Util.inspect(config)}`);
|
||||
|
||||
ctx.config = config;
|
||||
|
||||
@ -1372,13 +1373,13 @@ class Data extends EventEmitter {
|
||||
return currentDeploymentGroup.version({}, cb);
|
||||
}
|
||||
|
||||
console.log(`-> DeploymentGroup found with id ${currentDeploymentGroup.id}`);
|
||||
this._server.log(['debug'], `-> DeploymentGroup found with id ${currentDeploymentGroup.id}`);
|
||||
|
||||
const configPayload = Object.assign({}, clientManifest, {
|
||||
deploymentGroupName: currentDeploymentGroup.name
|
||||
});
|
||||
|
||||
console.log(`-> requesting docker-compose config for manifest ${Util.inspect(configPayload)}`);
|
||||
this._server.log(['debug'], `-> requesting docker-compose config for manifest ${Util.inspect(configPayload)}`);
|
||||
|
||||
ctx.currentDeploymentGroup = currentDeploymentGroup;
|
||||
|
||||
@ -1401,7 +1402,7 @@ class Data extends EventEmitter {
|
||||
}
|
||||
|
||||
createManifest (clientManifest, cb) {
|
||||
console.log(`-> creating new Manifest ${Util.inspect(clientManifest)}`);
|
||||
this._server.log(['debug'], `-> creating new Manifest ${Util.inspect(clientManifest)}`);
|
||||
|
||||
const newManifest = Transform.toManifest(clientManifest);
|
||||
this._db.manifests.insert(newManifest, (err, manifestId) => {
|
||||
@ -1409,7 +1410,7 @@ class Data extends EventEmitter {
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
console.log(`-> new Manifest created with id ${manifestId}`);
|
||||
this._server.log(['debug'], `-> new Manifest created with id ${manifestId}`);
|
||||
|
||||
clientManifest.id = manifestId;
|
||||
cb(null, Transform.fromManifest(clientManifest));
|
||||
@ -1455,7 +1456,7 @@ class Data extends EventEmitter {
|
||||
|
||||
updateService (clientService, cb) {
|
||||
const payload = Transform.toService(clientService);
|
||||
console.log(`-> got update Service request ${Util.inspect(payload)}`);
|
||||
this._server.log(['debug'], `-> got update Service request ${Util.inspect(payload)}`);
|
||||
|
||||
this._db.services.update([payload], (err) => {
|
||||
if (err) {
|
||||
@ -1468,20 +1469,20 @@ class Data extends EventEmitter {
|
||||
|
||||
getService ({ id, hash }, cb) {
|
||||
const query = id ? { id } : { version_hash: hash };
|
||||
console.log(`-> fetching Service ${Util.inspect(query)}`);
|
||||
this._server.log(['debug'], `-> fetching Service ${Util.inspect(query)}`);
|
||||
this._db.services.query(query, (err, services) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
if (!services || !services.length) {
|
||||
console.log(`-> Service ${Util.inspect(query)} not found`);
|
||||
this._server.log(['debug'], `-> Service ${Util.inspect(query)} not found`);
|
||||
return cb(Boom.notFound());
|
||||
}
|
||||
|
||||
const service = services.shift();
|
||||
|
||||
console.log(`-> Service ${Util.inspect(query)} found ${Util.inspect(service)}`);
|
||||
this._server.log(['debug'], `-> Service ${Util.inspect(query)} found ${Util.inspect(service)}`);
|
||||
|
||||
const branches = ForceArray(service.branches).map((branch) => {
|
||||
return Object.assign({}, branch, {
|
||||
@ -2277,17 +2278,17 @@ class Data extends EventEmitter {
|
||||
}
|
||||
|
||||
importDeploymentGroup ({ deploymentGroupSlug }, cb) {
|
||||
console.log(`-> import requested for ${deploymentGroupSlug}`);
|
||||
this._server.log(['debug'], `-> import requested for ${deploymentGroupSlug}`);
|
||||
|
||||
if (!this._machines) {
|
||||
console.log('-> watcher not yet defined');
|
||||
this._server.log(['debug'], '-> watcher not yet defined');
|
||||
return cb(null, null);
|
||||
}
|
||||
|
||||
const machines = this._machines.getContainers();
|
||||
|
||||
if (!Array.isArray(machines)) {
|
||||
console.log('-> no machines found');
|
||||
this._server.log(['debug'], '-> no machines found');
|
||||
return cb(null, null);
|
||||
}
|
||||
|
||||
@ -2304,7 +2305,7 @@ class Data extends EventEmitter {
|
||||
);
|
||||
|
||||
if (!containers.length) {
|
||||
console.log(`-> no containers found for ${deploymentGroupSlug}`);
|
||||
this._server.log(['debug'], `-> no containers found for ${deploymentGroupSlug}`);
|
||||
return cb(null, null);
|
||||
}
|
||||
|
||||
@ -2341,7 +2342,7 @@ class Data extends EventEmitter {
|
||||
return (serviceId, next) => {
|
||||
const service = services[serviceId];
|
||||
|
||||
console.log(`-> creating Service ${Util.inspect(service)}`);
|
||||
this._server.log(['debug'], `-> creating Service ${Util.inspect(service)}`);
|
||||
|
||||
VAsync.forEachParallel({
|
||||
inputs: service.instances,
|
||||
@ -2355,7 +2356,7 @@ class Data extends EventEmitter {
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
console.log(`-> created Instances ${Util.inspect(results.successes)}`);
|
||||
this._server.log(['debug'], `-> created Instances ${Util.inspect(results.successes)}`);
|
||||
|
||||
this.createService(Object.assign(service, {
|
||||
instances: results.successes,
|
||||
@ -2372,7 +2373,7 @@ class Data extends EventEmitter {
|
||||
imported: true
|
||||
};
|
||||
|
||||
console.log(`-> creating DeploymentGroup ${Util.inspect(deploymentGroup)}`);
|
||||
this._server.log(['debug'], `-> creating DeploymentGroup ${Util.inspect(deploymentGroup)}`);
|
||||
|
||||
this.createDeploymentGroup(deploymentGroup, (err, dg) => {
|
||||
if (err) {
|
||||
|
@ -30,6 +30,8 @@ module.exports = function (server, options, next) {
|
||||
console.error(ex);
|
||||
}
|
||||
|
||||
options.watch.server = server;
|
||||
options.data.server = server;
|
||||
const data = new Data(options.data);
|
||||
const cpWatcher = new ContainerPilotWatcher(Object.assign(options.watch, { data }));
|
||||
const machinesWatcher = new MachinesWatcher(Object.assign(options.watch, {
|
||||
|
@ -63,6 +63,7 @@ module.exports = class MachineWatcher {
|
||||
|
||||
// todo assert options
|
||||
this._data = options.data;
|
||||
this._server = options.watch;
|
||||
this._frequency = 200;
|
||||
|
||||
this._tritonWatch = new TritonWatch({
|
||||
@ -161,7 +162,7 @@ module.exports = class MachineWatcher {
|
||||
}
|
||||
|
||||
createInstance ({ deploymentGroup, machine, instances, service }, cb) {
|
||||
console.error(`-> detected that machine ${machine.name} was created`);
|
||||
this._server.log(['debug', 'error'], `-> detected that machine ${machine.name} was created`);
|
||||
|
||||
const status = (machine.state || '').toUpperCase();
|
||||
|
||||
@ -176,7 +177,7 @@ module.exports = class MachineWatcher {
|
||||
machineId: machine.id
|
||||
};
|
||||
|
||||
console.log('-> creating instance', Util.inspect(instance));
|
||||
this._server.log(['debug'], '-> creating instance', Util.inspect(instance));
|
||||
this._data.createInstance(instance, (err, instance) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
@ -187,7 +188,7 @@ module.exports = class MachineWatcher {
|
||||
instances: instances.concat(instance)
|
||||
};
|
||||
|
||||
console.log('-> updating service', Util.inspect(payload));
|
||||
this._server.log(['debug'], '-> updating service', Util.inspect(payload));
|
||||
this._data.updateService(payload, cb);
|
||||
});
|
||||
}
|
||||
@ -200,7 +201,7 @@ module.exports = class MachineWatcher {
|
||||
status: (machine.state || '').toUpperCase()
|
||||
};
|
||||
|
||||
console.log('-> updating instance', Util.inspect(updatedInstance));
|
||||
this._server.log(['debug'], '-> updating instance', Util.inspect(updatedInstance));
|
||||
this._data.updateInstance(updatedInstance, (err) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
@ -218,7 +219,7 @@ module.exports = class MachineWatcher {
|
||||
})
|
||||
};
|
||||
|
||||
console.log('-> updating service', Util.inspect(payload));
|
||||
this._server.log(['debug'], '-> updating service', Util.inspect(payload));
|
||||
this._data.updateService(payload, cb);
|
||||
});
|
||||
}
|
||||
@ -544,7 +545,7 @@ module.exports = class MachineWatcher {
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('-> `change` event received', Util.inspect(machine));
|
||||
//console.log('-> `change` event received', Util.inspect(machine));
|
||||
|
||||
const { id, tags = {} } = machine;
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "portal-api",
|
||||
"version": "1.2.0",
|
||||
"version": "1.3.2",
|
||||
"description": "",
|
||||
"main": "./lib/index.js",
|
||||
"scripts": {
|
||||
@ -37,6 +37,7 @@
|
||||
"cidr-matcher": "^1.0.5",
|
||||
"docker-compose-client": "^1.0.8",
|
||||
"dockerode": "^2.5.0",
|
||||
"force-array": "^3.1.0",
|
||||
"graphi": "^2.2.1",
|
||||
"hoek": "^4.1.1",
|
||||
"joyent-cp-gql-schema": "^1.0.4",
|
||||
|
Loading…
Reference in New Issue
Block a user