2014-05-08 16:59:35 +00:00
|
|
|
import hashlib
|
|
|
|
import json
|
2014-10-04 09:29:18 +00:00
|
|
|
import logging
|
2014-05-08 16:59:35 +00:00
|
|
|
import os
|
|
|
|
import socket
|
|
|
|
import sys
|
|
|
|
import select
|
|
|
|
|
|
|
|
import paramiko
|
|
|
|
from celery.datastructures import ExceptionInfo
|
|
|
|
|
|
|
|
from . import settings
|
|
|
|
|
|
|
|
|
2014-10-04 09:29:18 +00:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2014-10-04 13:23:04 +00:00
|
|
|
transports = {}
|
|
|
|
|
2014-10-04 09:29:18 +00:00
|
|
|
|
2014-05-08 16:59:35 +00:00
|
|
|
def BashSSH(backend, log, server, cmds):
|
|
|
|
from .models import BackendLog
|
2014-09-10 16:53:09 +00:00
|
|
|
script = '\n'.join(['set -e', 'set -o pipefail'] + cmds + ['exit 0'])
|
2014-05-08 16:59:35 +00:00
|
|
|
script = script.replace('\r', '')
|
|
|
|
log.script = script
|
2014-09-30 16:06:42 +00:00
|
|
|
log.save(update_fields=['script'])
|
2014-10-04 09:29:18 +00:00
|
|
|
logger.debug('%s is going to be executed on %s' % (backend, server))
|
2014-05-08 16:59:35 +00:00
|
|
|
try:
|
2014-07-14 14:56:48 +00:00
|
|
|
# Avoid "Argument list too long" on large scripts by genereting a file
|
|
|
|
# and scping it to the remote server
|
2014-05-08 16:59:35 +00:00
|
|
|
digest = hashlib.md5(script).hexdigest()
|
|
|
|
path = os.path.join(settings.ORCHESTRATION_TEMP_SCRIPT_PATH, digest)
|
|
|
|
with open(path, 'w') as script_file:
|
|
|
|
script_file.write(script)
|
2014-10-04 13:23:04 +00:00
|
|
|
|
2014-05-08 16:59:35 +00:00
|
|
|
# ssh connection
|
|
|
|
ssh = paramiko.SSHClient()
|
|
|
|
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
|
|
|
addr = server.get_address()
|
|
|
|
try:
|
2014-10-04 09:29:18 +00:00
|
|
|
ssh.connect(addr, username='root', key_filename=settings.ORCHESTRATION_SSH_KEY_PATH)
|
2014-05-08 16:59:35 +00:00
|
|
|
except socket.error:
|
2014-10-04 09:29:18 +00:00
|
|
|
logger.error('%s timed out on %s' % (backend, server))
|
2014-05-08 16:59:35 +00:00
|
|
|
log.state = BackendLog.TIMEOUT
|
2014-09-30 16:06:42 +00:00
|
|
|
log.save(update_fields=['state'])
|
2014-05-08 16:59:35 +00:00
|
|
|
return
|
|
|
|
transport = ssh.get_transport()
|
2014-10-04 13:23:04 +00:00
|
|
|
|
|
|
|
# Copy script to remote server
|
2014-05-08 16:59:35 +00:00
|
|
|
sftp = paramiko.SFTPClient.from_transport(transport)
|
2014-07-10 15:19:06 +00:00
|
|
|
sftp.put(path, "%s.remote" % path)
|
2014-05-08 16:59:35 +00:00
|
|
|
sftp.close()
|
|
|
|
os.remove(path)
|
|
|
|
|
2014-10-04 13:23:04 +00:00
|
|
|
# Execute it
|
2014-05-08 16:59:35 +00:00
|
|
|
context = {
|
2014-07-10 15:19:06 +00:00
|
|
|
'path': "%s.remote" % path,
|
2014-05-08 16:59:35 +00:00
|
|
|
'digest': digest
|
|
|
|
}
|
|
|
|
cmd = (
|
|
|
|
"[[ $(md5sum %(path)s|awk {'print $1'}) == %(digest)s ]] && bash %(path)s\n"
|
|
|
|
"RETURN_CODE=$?\n"
|
2014-10-02 15:58:27 +00:00
|
|
|
# TODO "rm -fr %(path)s\n"
|
2014-05-08 16:59:35 +00:00
|
|
|
"exit $RETURN_CODE" % context
|
|
|
|
)
|
2014-10-02 15:58:27 +00:00
|
|
|
channel = transport.open_session()
|
2014-05-08 16:59:35 +00:00
|
|
|
channel.exec_command(cmd)
|
2014-10-04 13:23:04 +00:00
|
|
|
|
|
|
|
# Log results
|
2014-10-04 09:29:18 +00:00
|
|
|
logger.debug('%s running on %s' % (backend, server))
|
2014-05-08 16:59:35 +00:00
|
|
|
if True: # TODO if not async
|
|
|
|
log.stdout += channel.makefile('rb', -1).read().decode('utf-8')
|
|
|
|
log.stderr += channel.makefile_stderr('rb', -1).read().decode('utf-8')
|
|
|
|
else:
|
|
|
|
while True:
|
|
|
|
# Non-blocking is the secret ingridient in the async sauce
|
|
|
|
select.select([channel], [], [])
|
|
|
|
if channel.recv_ready():
|
|
|
|
log.stdout += channel.recv(1024)
|
|
|
|
if channel.recv_stderr_ready():
|
|
|
|
log.stderr += channel.recv_stderr(1024)
|
2014-09-30 16:06:42 +00:00
|
|
|
log.save(update_fields=['stdout', 'stderr'])
|
2014-05-08 16:59:35 +00:00
|
|
|
if channel.exit_status_ready():
|
|
|
|
break
|
|
|
|
log.exit_code = exit_code = channel.recv_exit_status()
|
|
|
|
log.state = BackendLog.SUCCESS if exit_code == 0 else BackendLog.FAILURE
|
2014-10-04 09:29:18 +00:00
|
|
|
logger.debug('%s execution state on %s is %s' % (backend, server, log.state))
|
2014-05-08 16:59:35 +00:00
|
|
|
log.save()
|
|
|
|
except:
|
|
|
|
log.state = BackendLog.ERROR
|
|
|
|
log.traceback = ExceptionInfo(sys.exc_info()).traceback
|
2014-10-04 13:23:04 +00:00
|
|
|
logger.error('Exception while executing %s on %s' % (backend, server))
|
|
|
|
logger.debug(log.traceback)
|
2014-05-08 16:59:35 +00:00
|
|
|
log.save()
|
2014-10-04 13:23:04 +00:00
|
|
|
finally:
|
|
|
|
channel.close()
|
|
|
|
ssh.close()
|
2014-05-08 16:59:35 +00:00
|
|
|
|
|
|
|
|
|
|
|
def Python(backend, log, server, cmds):
|
|
|
|
from .models import BackendLog
|
|
|
|
script = [ str(cmd.func.func_name) + str(cmd.args) for cmd in cmds ]
|
|
|
|
script = json.dumps(script, indent=4).replace('"', '')
|
|
|
|
log.script = '\n'.join([log.script, script])
|
2014-09-30 16:06:42 +00:00
|
|
|
log.save(update_fields=['script'])
|
2014-05-08 16:59:35 +00:00
|
|
|
stdout = ''
|
|
|
|
try:
|
|
|
|
for cmd in cmds:
|
|
|
|
result = cmd(server)
|
|
|
|
stdout += str(result)
|
|
|
|
except:
|
|
|
|
log.exit_code = 1
|
|
|
|
log.state = BackendLog.FAILURE
|
|
|
|
log.traceback = ExceptionInfo(sys.exc_info()).traceback
|
|
|
|
else:
|
|
|
|
log.exit_code = 0
|
|
|
|
log.state = BackendLog.SUCCESS
|
|
|
|
log.stdout += stdout
|
|
|
|
log.save()
|