Initial commit

This commit is contained in:
Brett Williams
2022-10-04 23:09:09 -07:00
commit 1c304e77f5
13 changed files with 1717 additions and 0 deletions

0
config.yaml Normal file
View File

282
dashboard.py Executable file
View File

@@ -0,0 +1,282 @@
#!/usr/bin/env python
import datetime
import json
import os.path
import socket
import time
import threading
import traceback
import psutil
import requests
import click
from rich import box
from rich.console import Console
from rich.live import Live
from rich.table import Column
from rich.table import Table
from rich.text import Text
from rich.layout import Layout
from rich.panel import Panel
from rich.tree import Tree
import zordon_server
from zordon_server import RenderStatus
from zordon_server import string_to_status
"""
The RenderDashboard is designed to be run on a remote machine or on the local server
This provides a detailed status of all jobs running on the server
"""
status_colors = {RenderStatus.ERROR: "red", RenderStatus.CANCELLED: 'orange1', RenderStatus.COMPLETED: 'green',
RenderStatus.NOT_STARTED: "yellow", RenderStatus.SCHEDULED: 'purple',
RenderStatus.RUNNING: 'cyan'}
categories = [RenderStatus.RUNNING, RenderStatus.ERROR, RenderStatus.NOT_STARTED, RenderStatus.SCHEDULED,
RenderStatus.COMPLETED, RenderStatus.CANCELLED]
local_hostname = socket.gethostname()
def status_string_to_color(status_string):
job_status = string_to_status(status_string)
job_color = '[{}]'.format(status_colors[job_status])
return job_color
def sorted_jobs(all_jobs):
# sorted_jobs = []
# if all_jobs:
# for status_category in categories:
# found_jobs = [x for x in all_jobs if x['status'] == status_category.value]
# if found_jobs:
# sorted_found_jobs = sorted(found_jobs, key=lambda d: datetime.datetime.fromisoformat(d['date_created']), reverse=True)
# sorted_jobs.extend(sorted_found_jobs)
sorted_jobs = sorted(all_jobs, key=lambda d: datetime.datetime.fromisoformat(d['date_created']), reverse=True)
return sorted_jobs
def create_node_tree(all_server_data) -> Tree:
main_tree = Tree("[magenta]Server Cluster")
for server_host, server_data in all_server_data['servers'].items():
if server_host == local_hostname:
node_tree_text = f"[cyan bold]{server_host}[/] [yellow](This Computer)[default] - [green]Running"
else:
node_tree_text = f"[cyan]{server_host} [magenta](Remote)[default] - [green]Running"
node_tree = Tree(node_tree_text)
stats_text = f"CPU: {server_data['status']['cpu_percent']}% | RAM: {server_data['status']['memory_percent']}% | " \
f"Cores: {server_data['status']['cpu_count']} | {server_data['status']['platform'].split('-')[0]}"
node_tree.add(Tree(stats_text))
running_jobs = [job for job in server_data['jobs'] if job['status'] == 'running']
not_started = [job for job in server_data['jobs'] if job['status'] == 'not_started']
scheduled = [job for job in server_data['jobs'] if job['status'] == 'scheduled']
jobs_tree = Tree(f"Running: [green]{len(running_jobs)} [default]| Queued: [cyan]{len(not_started)}"
f"[default] | Scheduled: [cyan]{len(scheduled)}")
if running_jobs or not_started or scheduled:
for job in running_jobs:
filename = os.path.basename(job['render']['input']).split('.')[0]
jobs_tree.add(f"[bold]{filename} ({job['id']}) - {status_string_to_color(job['status'])}{(float(job['render']['percent_complete']) * 100):.1f}%")
for job in not_started:
filename = os.path.basename(job['render']['input']).split('.')[0]
jobs_tree.add(f"{filename} ({job['id']}) - {status_string_to_color(job['status'])}{job['status'].title()}")
for job in scheduled:
filename = os.path.basename(job['render']['input']).split('.')[0]
jobs_tree.add(f"{filename} ({job['id']}) - {status_string_to_color(job['status'])}{job['status'].title()}")
else:
jobs_tree.add("[italic]No running jobs")
node_tree.add(jobs_tree)
main_tree.add(node_tree)
return main_tree
def create_jobs_table(all_server_data) -> Table:
table = Table("ID", "Project", "Output", "Renderer", Column(header="Priority", justify="center"),
Column(header="Status", justify="center"), Column(header="Time Elapsed", justify="right"),
Column(header="# Frames", justify="right"), "Node", show_lines=True,
box=box.HEAVY_HEAD)
all_jobs = []
for server_name, server_data in all_server_data['servers'].items():
for job in server_data['jobs']:
job['node'] = server_name
all_jobs.append(job)
all_jobs = sorted_jobs(all_jobs)
for job in all_jobs:
job_status = string_to_status(job['status'])
job_color = '[{}]'.format(status_colors[job_status])
job_text = f"{job_color}" + job_status.value
elapsed_time = job['render'].get('time_elapsed', 'unknown')
# Project name
project_name = job_color + os.path.basename(job['render']['input'])
project_name = project_name.replace(".", "[default].")
if job_status == RenderStatus.RUNNING:
job_text = f"{job_color}[bold]Running - {float(job['render']['percent_complete']) * 100:.1f}%"
delta = datetime.datetime.now() - datetime.datetime.fromisoformat(job['render']['start_time'])
elapsed_time = "[bold]" + str(delta)
project_name = "[bold]" + project_name
elif job_status == RenderStatus.CANCELLED or job_status == RenderStatus.ERROR:
project_name = "[strike]" + project_name
# Priority
priority_color = ["red", "yellow", "cyan"][(job['priority'] - 1)]
node_title = ("[yellow]" if job['node'] == local_hostname else "[magenta]") + job['node']
renderer_colors = {'ffmpeg': '[magenta]', 'Blender': '[orange1]'}
table.add_row(
job['id'],
project_name,
os.path.basename(job['render']['output']),
renderer_colors.get(job['renderer'], '[cyan]') + job['renderer'] + '[default]-' + job['render']['renderer_version'],
f"[{priority_color}]{job['priority']}",
job_text,
elapsed_time,
str(max(int(job['render']['total_frames']), 1)),
node_title
)
return table
def create_status_panel(all_server_data):
for key, value in all_server_data['servers'].items():
if key == local_hostname:
return str(value['status'])
return "no status"
class RenderDashboard:
def __init__(self, server_ip=None, server_port="8080"):
self.server_ip = server_ip
self.server_port = server_port
self.local_hostname = local_hostname
def connect(self):
status = self.request_data('status')
return status
def request_data(self, payload, timeout=2):
try:
req = requests.get(f'http://{self.server_ip}:{self.server_port}/{payload}', timeout=timeout)
if req.ok:
return req.json()
except Exception as e:
pass
# print(f"Exception fetching server data: {e}")
return None
def get_jobs(self):
all_jobs = self.request_data('jobs')
sorted_jobs = []
if all_jobs:
for status_category in categories:
found_jobs = [x for x in all_jobs if x['status'] == status_category.value]
if found_jobs:
sorted_jobs.extend(found_jobs)
return sorted_jobs
def get_data(self):
all_data = self.request_data('full_status')
return all_data
class KeyboardThread(threading.Thread):
def __init__(self, input_cbk = None, name='keyboard-input-thread'):
self.input_cbk = input_cbk
super(KeyboardThread, self).__init__(name=name)
self.start()
def run(self):
while True:
self.input_cbk(input()) #waits to get input + Return
def my_callback(inp):
#evaluate the keyboard input
print('You Entered:', inp)
if __name__ == '__main__':
server_ip = input("Enter server IP or None for local: ") or local_hostname
client = RenderDashboard(server_ip, "8080")
if not client.connect():
if server_ip == local_hostname:
start_server = input("Local server not running. Start server? (y/n) ")
if start_server and start_server[0].lower() == "y":
# Startup the local server
zordon_server.start_server(background_thread=True)
test = client.connect()
print(f"connected? {test}")
else:
print(f"\nUnable to connect to server: {server_ip}")
print("\nVerify IP address is correct and server is running")
exit(1)
# start the Keyboard thread
kthread = KeyboardThread(my_callback)
# Console Layout
console = Console()
layout = Layout()
# Divide the "screen" in to three parts
layout.split(
Layout(name="header", size=3),
Layout(ratio=1, name="main"),
Layout(size=10, name="footer"),
)
# Divide the "main" layout in to "side" and "body"
layout["main"].split_row(
Layout(name="side"),
Layout(name="body",
ratio=3))
# Divide the "side" layout in to two
layout["side"].split(Layout(name="side_top"), Layout(name="side_bottom"))
# Server connection header
header_text = Text(f"Connected to server: ")
header_text.append(f"{server_ip} ", style="green")
if server_ip == local_hostname:
header_text.append("(This Computer)", style="yellow")
else:
header_text.append("(Remote)", style="magenta")
layout["header"].update(Panel(Text("Zordon Render Client - Version 0.0.1 alpha", justify="center")))
with Live(console=console, screen=False, refresh_per_second=1, transient=True) as live:
while True:
server_data = client.get_data()
try:
if server_data:
layout["body"].update(create_jobs_table(server_data))
layout["side_top"].update(Panel(create_node_tree(server_data)))
layout["side_bottom"].update(Panel(create_status_panel(server_data)))
live.update(layout, refresh=False)
except Exception as e:
print(f"Exception updating table: {e}")
traceback.print_exception(e)
time.sleep(1)
# # # todo: Add input prompt to manage running jobs (ie add, cancel, get info, etc)

7
requirements.txt Normal file
View File

@@ -0,0 +1,7 @@
click~=8.1.3
requests==2.28.1
psutil~=5.9.0
PyYAML~=6.0
Flask==2.2.2
rich==12.6.0
ffmpeg-python

143
utilities/aerender.py Normal file
View File

@@ -0,0 +1,143 @@
#! /usr/bin/python
from utilities.generic_renderer import *
import glob
import json
import re
def aerender_path():
paths = glob.glob('/Applications/*After Effects*/aerender')
if len(paths) > 1:
logging.warning('Multiple After Effects installations detected')
elif not paths:
logging.error('After Effects installation not found')
else:
return paths[0]
class AERenderer(Renderer):
@staticmethod
def version():
version = None
try:
x = subprocess.Popen([aerender_path(), '-version'], stdout=subprocess.PIPE)
x.wait()
ver_out = str(x.stdout.read().strip())
version = ver_out.split(" ")[-1].strip()
except Exception as e:
logging.error('failed getting version: {}'.format(e))
return version
renderer = 'After Effects'
render_engine = 'aerender'
supported_extensions = ['.aep']
def __init__(self, project, comp, render_settings, omsettings, output):
super(AERenderer, self).__init__(input=project, output=output)
self.comp = comp
self.render_settings = render_settings
self.omsettings = omsettings
self.progress = 0
self.progress_history = []
self.attributes = {}
def _generate_subprocess(self):
if os.path.exists('nexrender-cli-macos'):
logging.info('nexrender found')
# {
# "template": {
# "src": String,
# "composition": String,
#
# "frameStart": Number,
# "frameEnd": Number,
# "frameIncrement": Number,
#
# "continueOnMissing": Boolean,
# "settingsTemplate": String,
# "outputModule": String,
# "outputExt": String,
# },
# "assets": [],
# "actions": {
# "prerender": [],
# "postrender": [],
# },
# "onChange": Function,
# "onRenderProgress": Function
# }
job = {'template':
{
'src': 'file://' + self.input, 'composition': self.comp.replace('"', ''),
'settingsTemplate': self.render_settings.replace('"', ''),
'outputModule': self.omsettings.replace('"', ''), 'outputExt': 'mov'}
}
x = ['./nexrender-cli-macos', "'{}'".format(json.dumps(job))]
else:
logging.info('nexrender not found')
x = [aerender_path(), '-project', self.input, '-comp', self.comp, '-RStemplate', self.render_settings,
'-OMtemplate', self.omsettings, '-output', self.output]
return x
def _parse_stdout(self, line):
# print line
if line.startswith('PROGRESS:'):
# print 'progress'
trimmed = line.replace('PROGRESS:', '').strip()
if len(trimmed):
self.progress_history.append(line)
if 'Seconds' in trimmed:
self._update_progress(line)
elif ': ' in trimmed:
tmp = trimmed.split(': ')
self.attributes[tmp[0].strip()] = tmp[1].strip()
elif line.startswith('WARNING:'):
trimmed = line.replace('WARNING:', '').strip()
self.warnings.append(trimmed)
logging.warning(trimmed)
elif line.startswith('aerender ERROR') or 'ERROR:' in line:
self.errors.append(line)
logging.error(line)
def _update_progress(self, line):
if not self.total_frames:
duration_string = self.attributes.get('Duration', None)
frame_rate = self.attributes.get('Frame Rate', '0').split(' ')[0]
self.total_frames = timecode_to_frames(duration_string.split('Duration:')[-1], float(frame_rate))
match = re.match(r'PROGRESS:.*\((?P<frame>\d+)\): (?P<time>\d+)', line).groupdict()
self.last_frame = match['frame']
def average_frame_duration(self):
total_durations = 0
for line in self.progress_history:
match = re.match(r'PROGRESS:.*\((?P<frame>\d+)\): (?P<time>\d+)', line)
if match:
total_durations += int(match.group(2))
average = float(total_durations) / self.last_frame
return average
def percent_complete(self):
if self.total_frames:
return (float(self.last_frame) / float(self.total_frames)) * 100
else:
return 0
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.DEBUG)
r = AERenderer('/Users/brett/Desktop/Youtube_Vids/Film_Formats/Frame_Animations.aep', '"Film Pan"',
'"Draft Settings"', '"ProRes"', '/Users/brett/Desktop/test_render')
r.start()
while r.is_running():
time.sleep(0.1)

117
utilities/blender.py Normal file
View File

@@ -0,0 +1,117 @@
#! /usr/bin/python
from utilities.generic_renderer import *
SUPPORTED_FORMATS = ['TGA', 'RAWTGA', 'JPEG', 'IRIS', 'IRIZ', 'AVIRAW', 'AVIJPEG', 'PNG', 'BMP', 'HDR', 'TIFF', 'OPEN_EXR', 'OPEN_EXR_MULTILAYER', 'MPEG', 'CINEON', 'DPX', 'DDS', 'JP2']
class BlenderRenderer(Renderer):
def version(self):
version = None
try:
ver_out = subprocess.check_output([self.renderer_path(), '-v']).decode('utf-8')
version = ver_out.splitlines()[0].replace('Blender', '').strip()
except Exception as e:
logger.error("Failed to get Blender version: {}".format(e))
return version
renderer = 'Blender'
render_engine = 'blender'
supported_extensions = ['.blend']
install_paths = ['/Applications/Blender.app/Contents/MacOS/Blender']
def __init__(self, input_path, output_path, render_all_frames=False, engine='BLENDER_EEVEE'):
super(BlenderRenderer, self).__init__(input_path=input_path, output_path=output_path)
self.engine = engine # or 'CYCLES'
self.format = 'JPEG'
self.frame = 0
self.render_all_frames = render_all_frames
# Stats
self.current_frame = -1
self.memory_use = None
self.time_elapsed = None
self.time_remaining = None
self.frame_percent_complete = 0.0
def generate_preview(self, blend_file):
pass
def _generate_subprocess(self):
if self.format not in SUPPORTED_FORMATS:
raise ValueError("Unsupported format for Blender: {}".format(self.format))
if self.render_all_frames:
cmd = [self.renderer_path(), '-b', self.input, '-E', self.engine, '-o', self.output,
'-F', self.format, '-a']
else:
cmd = [self.renderer_path(), '-b', self.input, '-E', self.engine, '-o', self.output,
'-F', self.format, '-f', str(self.frame)]
return cmd
def _parse_stdout(self, line):
import re
pattern = re.compile(
r'Fra:(?P<frame>\d*).*Mem:(?P<memory>\S+).*Time:(?P<time>\S+)(?:.*Remaining:)?(?P<remaining>\S*)')
found = pattern.search(line)
if found:
stats = found.groupdict()
self.memory_use = stats['memory']
self.time_elapsed = stats['time']
self.time_remaining = stats['remaining'] or 'Unknown'
sample_string = line.split('|')[-1].strip()
if "sample" in sample_string.lower():
samples = re.sub(r'[^0-9/]', '', sample_string)
self.frame_percent_complete = int(samples.split('/')[0]) / int(samples.split('/')[-1])
# Calculate rough percent based on cycles
# EEVEE
# 10-Apr-22 22:42:06 - RENDERER: Fra:0 Mem:857.99M (Peak 928.55M) | Time:00:03.96 | Rendering 1 / 65 samples
# 10-Apr-22 22:42:10 - RENDERER: Fra:0 Mem:827.09M (Peak 928.55M) | Time:00:07.92 | Rendering 26 / 64 samples
# 10-Apr-22 22:42:10 - RENDERER: Fra:0 Mem:827.09M (Peak 928.55M) | Time:00:08.17 | Rendering 51 / 64 samples
# 10-Apr-22 22:42:10 - RENDERER: Fra:0 Mem:827.09M (Peak 928.55M) | Time:00:08.31 | Rendering 64 / 64 samples
# CYCLES
# 10-Apr-22 22:43:22 - RENDERER: Fra:0 Mem:836.30M (Peak 1726.13M) | Time:00:01.56 | Remaining:00:30.65 | Mem:588.68M, Peak:588.68M | Scene, View Layer | Sample 1/150
# 10-Apr-22 22:43:43 - RENDERER: Fra:0 Mem:836.30M (Peak 1726.13M) | Time:00:22.01 | Remaining:00:03.36 | Mem:588.68M, Peak:588.68M | Scene, View Layer | Sample 129/150
if int(stats['frame']) > self.current_frame:
self.current_frame = int(stats['frame'])
logger.info(
'Frame:{0} | Mem:{1} | Time:{2} | Remaining:{3}'.format(self.current_frame, self.memory_use,
self.time_elapsed, self.time_remaining))
elif 'error' in line.lower():
logger.error(line)
self.errors.append(line)
elif 'Saved' in line or 'Saving' in line or 'quit' in line:
x = re.match(r'Time: (.*) \(Saving', line)
if x:
time_completed = x.groups()[0]
logger.info('Render completed in {}'.format(time_completed))
else:
logger.info(line)
else:
pass
# if len(line.strip()):
# logger.debug(line.strip())
def percent_complete(self):
if self.total_frames <= 1:
return self.frame_percent_complete
else:
return (self.current_frame / self.total_frames) +\
(self.frame_percent_complete * (self.current_frame / self.total_frames))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.DEBUG)
r = BlenderRenderer('/Users/brett/Blender Files/Ian Hubert/CyberExtras.blend', '/Users/brett/testing1234', render_all_frames=False, engine="CYCLES")
# r.engine = 'CYCLES'
r.start()
while r.is_running():
time.sleep(1)

121
utilities/compressor.py Normal file
View File

@@ -0,0 +1,121 @@
#! /usr/bin/python
from generic_renderer import *
import glob
import logging
import subprocess
# Documentation
# https://help.apple.com/compressor/mac/4.0/en/compressor/usermanual/Compressor%204%20User%20Manual%20(en).pdf
def compressor_path():
return '/Applications/Compressor.app/Contents/MacOS/Compressor'
class CompressorRenderer(Renderer):
renderer = 'Compressor'
# Usage: Compressor [Cluster Info] [Batch Specific Info] [Optional Info] [Other Options]
#
# -computergroup <name> -- name of the Computer Group to use.
# --Batch Specific Info:--
# -batchname <name> -- name to be given to the batch.
# -priority <value> -- priority to be given to the batch. Possible values are: low, medium or high
# Job Info: Used when submitting individual source files. Following parameters are repeated to enter multiple job targets in a batch
# -jobpath <url> -- url to source file.
# -- In case of Image Sequence, URL should be a file URL pointing to directory with image sequence.
# -- Additional URL query style parameters may be specified to set frameRate (file:///myImageSequenceDir?frameRate=29.97) and audio file (e.g. file:///myImageSequenceDir?audio=/usr/me/myaudiofile.mov).
# -settingpath <path> -- path to settings file.
# -locationpath <path> -- path to location file.
# -info <xml> -- xml for job info.
# -jobaction <xml> -- xml for job action.
# -scc <url> -- url to scc file for source
# -startoffset <hh:mm:ss;ff> -- time offset from beginning
# -in <hh:mm:ss;ff> -- in time
# -out <hh:mm:ss;ff> -- out time
# -annotations <path> -- path to file to import annotations from; a plist file or a Quicktime movie
# -chapters <path> -- path to file to import chapters from
# --Optional Info:--
# -help -- Displays, on stdout, this help information.
# -checkstream <url> -- url to source file to analyze
# -findletterbox <url> -- url to source file to analyze
#
# --Batch Monitoring Info:--
# Actions on Job:
# -monitor -- monitor the job or batch specified by jobid or batchid.
# -kill -- kill the job or batch specified by jobid or batchid.
# -pause -- pause the job or batch specified by jobid or batchid.
# -resume -- resume previously paused job or batch specified by jobid or batchid.
# Optional Info:
# -jobid <id> -- unique id of the job usually obtained when job was submitted.
# -batchid <id> -- unique id of the batch usually obtained when job was submitted.
# -query <seconds> -- The value in seconds, specifies how often to query the cluster for job status.
# -timeout <seconds> -- the timeOut value, in seconds, specifies when to quit the process.
# -once -- show job status only once and quit the process.
#
# --Sharing Related Options:--
# -resetBackgroundProcessing [cancelJobs] -- Restart all processes used in background processing, and optionally cancel all queued jobs.
#
# -repairCompressor -- Repair Compressor config files and restart all processes used in background processing.
#
# -sharing <on/off> -- Turn sharing of this computer on or off.
#
# -requiresPassword [password] -- Sharing of this computer requires specified password. Computer must not be busy processing jobs when you set the password.
#
# -noPassword -- Turn off the password requirement for sharing this computer.
#
# -instances <number> -- Enables additional Compressor instances.
#
# -networkInterface <bsdname> -- Specify which network interface to use. If "all" is specified for <bsdname>, all available network interfaces are used.
#
# -portRange <startNumber> <count> -- Defines what port range use, using start number specifying how many ports to use.
#
# --File Modification Options (all other parameters ignored):--
# -relabelaudiotracks <layout[1] layout[2]... layout[N]
# Supported values:
# Ls : Left Surround
# R : Right
# C : Center
# Rs : Right Surround
# Lt : Left Total
# L : Left
# Rt : Right Total
# LFE : LFE Screen
# Lc : Left Center
# Rls : Rear Surround Left
# mono : Mono
# LtRt : Matrix Stereo (Lt Rt)
# Rc : Right Center
# stereo : Stereo (L R)
# Rrs : Rear Surround Right
# -jobpath <url> -- url to source file. - Must be a QuickTime Movie file
# --Optional Info:--
# -renametrackswithlayouts (Optional, rename the tracks with the new channel layouts)
# -locationpath <path> -- path to location file. Modified movie will be saved here. If unspecified, changes will be saved in place, overwriting the original file.
def __init__(self, project, settings_path, output):
super(CompressorRenderer, self).__init__(project=project, output=output)
self.settings_path = settings_path
self.batch_name = os.path.basename(project)
self.cluster_name = 'This Computer'
self.timeout = 5
# /Applications/Compressor.app/Contents/MacOS/Compressor -clusterid "tcp://192.168.1.148:62995" -batchname "My First Batch" -jobpath ~/Movies/MySource.mov -settingpath ~/Library/Application\ Support/Compressor/Settings/MPEG-4.setting -destinationpath ~/Movies/MyOutput.mp4 -timeout 5
def _generate_subprocess(self):
x = [compressor_path(), '-batchname', datetime.now().isoformat(), '-jobpath', self.input, '-settingpath', self.settings_path, '-locationpath', self.output]
print(' '.join(x))
return x
def _parse_stdout(self, line):
print(line)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.DEBUG)
r = CompressorRenderer('/Users/brett/Desktop/drone_raw.mp4', '/Applications/Compressor.app/Contents/Resources/Settings/Website Sharing/HD720WebShareName.compressorsetting', '/Users/brett/Desktop/test_drone_output.mp4')
r.start()
while r.is_running():
time.sleep(1)

200
utilities/fcpx.py Normal file
View File

@@ -0,0 +1,200 @@
import xml.etree.ElementTree as ET
import argparse
import os
import glob
from urllib2 import unquote
import time
library = None
class FCPXLibrary:
def __init__(self, xml_path):
parser = ET.parse(xml_path)
self.root = parser.getroot()
self.xml_version = self.root.attrib.get('version')
self.location = self.library_location()
# self.projects = self.root.findall('./library/event/project')
self.formats = self.root.findall('./resources/format')
self.clips = [Clip(x, self) for x in self.root.findall(".//asset-clip")]
self.projects = [Project(x, self) for x in self.root.findall('./library/event/project')]
def formats(self):
return self.root.findall('./resources/format')
def element_with_tag_value(self, element, tag, value):
return self.root.findall(".//{e}[@{t}='{v}']".format(e=element, t=tag, v=value))
def clips_with_videorole(self, role):
return [clip for clip in self.clips if getattr(clip, 'videoRole', None) == role]
def format_with_id(self, id):
# return self.root.findall("./resources/format[id='{}']".format(id))
return self.element_with_tag_value('format', 'id', id)
def library_location(self):
# urllib2.unquote(asset_ref.get('src'))[7:]
path = self.root.findall('./library')[0].attrib['location']
return unquote(path)[7:]
class Project:
def __init__(self, project_element, library):
for attrib in project_element.attrib:
setattr(self, attrib, project_element.get(attrib))
print(project_element.attrib)
print(project_element.parent)
ref_clips = project_element.findall(".//ref-clip")
print('start')
for clip in library.clips:
print(clip.name)
if clip.name == ref_clips[0]:
print(clip)
break
print('end')
# for child in ref_clips:
# print(child.tag, child.attrib)
class Clip:
def __init__(self, clip_element, library):
# self.library = library
# Get attribs from XML
for attrib in clip_element.attrib:
setattr(self, attrib, clip_element.get(attrib))
self.type = 'audio' if hasattr(self, 'audioRole') else 'video'
# Get clip reference
asset_ref = next(iter(library.element_with_tag_value('asset', 'id', self.ref)))
for attrib in asset_ref.attrib:
if not hasattr(self, attrib):
setattr(self, attrib, asset_ref.get(attrib))
self.source = unquote(asset_ref.get('src'))[7:]
if self.type == 'video':
format_id = getattr(self, 'format', asset_ref.get('format', None))
video_format = next(iter(library.format_with_id(format_id)))
if not hasattr(self, 'format'):
print('no format!')
try:
frame_duration = fcp_time_to_float(video_format.get('frameDuration'))
self.in_frame = int(round(fcp_time_to_float(self.start) / frame_duration))
duration = int(round(fcp_time_to_float(self.duration) / frame_duration))
self.out_frame = self.in_frame + duration
except Exception as e:
print('in/out fail: ' + str(e))
print(dir(self))
pass
def optimized_source(self):
path = None
mov = os.path.splitext(os.path.basename(self.source))[0] + '.mov'
found = glob.glob(os.path.join(library.location, '*', 'Transcoded Media', 'High Quality Media', mov))
if found:
path = found[0]
print(path)
return path
def proxy_source(self):
path = None
mov = os.path.splitext(os.path.basename(self.source))[0] + '.mov'
found = glob.glob(os.path.join(library.location, '*', 'Transcoded Media', 'Proxy Media', mov))
if found:
path = found[0]
print(path)
return path
def __repr__(self):
if self.type == 'video':
return "<Clip name:'%s' type: %s role: '%s' duration:%s frames>" % (getattr(self, 'name', None), self.type,
getattr(self, 'videoRole', None), self.out_frame - self.in_frame)
else:
return "<Clip name:'%s' type: %s role: '%s'>" % (getattr(self, 'name', None), self.type, getattr(self, 'audioRole', None))
def fcp_time_to_float(timestr):
try:
rates = timestr.strip('s').split('/')
return float(rates[0]) / float(rates[-1])
except (ZeroDivisionError, AttributeError) as e:
return 0.0
import sys
from types import ModuleType, FunctionType
from gc import get_referents
# Custom objects know their class.
# Function objects seem to know way too much, including modules.
# Exclude modules as well.
BLACKLIST = type, ModuleType, FunctionType
def getsize(obj):
"""sum size of object & members."""
if isinstance(obj, BLACKLIST):
raise TypeError('getsize() does not take argument of type: '+ str(type(obj)))
seen_ids = set()
size = 0
objects = [obj]
while objects:
need_referents = []
for obj in objects:
if not isinstance(obj, BLACKLIST) and id(obj) not in seen_ids:
seen_ids.add(id(obj))
size += sys.getsizeof(obj)
need_referents.append(obj)
objects = get_referents(*need_referents)
return size
# if __name__ == "__main__":
#
# parser = argparse.ArgumentParser()
# parser.add_argument('-i', '--input', help='Input FCPX Library XML', required=True)
# parser.add_argument('-s', '--save-file', help='Description', required=False)
#
# args = parser.parse_args()
#
# library = FCPXLibrary(args.input)
#
# print getsize(library)
# while True:
# time.sleep(4)
#
# print library.library_location()
#
# print dir(library.clips[0])
# print library.clips[0]
# print library.clips[0].proxy_source()
#
# print(args.input)
# print(args.save_file)
if __name__ == '__main__':
library = FCPXLibrary('new.fcpxml')
# print library.clips[0].source
# print library.library_location()
#
# print dir(library.clips[0])
# print library.clips[0]
# print library.clips[0].proxy_source()
# for clip in library.clips:
# print clip
print(dir(library.projects[0]))
print(library.formats)

View File

@@ -0,0 +1,32 @@
import ffmpeg
def file_info(path):
try:
return ffmpeg.probe(path)
except Exception as e:
print('Error getting ffmpeg info: ' + str(e))
return None
def generate_fast_preview(source_path, dest_path, max_width=1280, run_async=False):
stream = ffmpeg.input(source_path)
stream = ffmpeg.output(stream, dest_path, **{'vf': 'format=yuv420p,scale={width}:-2'.format(width=max_width), 'preset': 'ultrafast'})
return _run_output(stream, run_async)
def generate_prores_trim(source_path, dest_path, start_frame, end_frame, handles=10, run_async=False):
stream = ffmpeg.input(source_path)
stream = stream.trim(**{'start_frame': max(start_frame-handles, 0), 'end_frame': end_frame + handles})
stream = stream.setpts('PTS-STARTPTS') # reset timecode
stream = ffmpeg.output(stream, dest_path, strict='-2', **{'c:v': 'prores_ks', 'profile:v': 4})
return _run_output(stream, run_async)
def _run_output(stream, run_async):
return ffmpeg.run_async(stream) if run_async else ffmpeg.run(stream)
if __name__ == '__main__':
x = file_info("/Users/brettwilliams/Desktop/dark_knight_rises.mp4")
print(x)

View File

@@ -0,0 +1,72 @@
#! /usr/bin/python
import re
import time
import ffmpeg
from utilities.generic_renderer import *
class FFMPEGRenderer(Renderer):
def version(self):
version = None
try:
ver_out = subprocess.check_output([self.renderer_path(), '-version']).decode('utf-8')
match = re.match(".*version\s*(\S+)\s*Copyright", ver_out)
version = match.groups()[0]
except Exception as e:
logger.error("Failed to get FFMPEG version: {}".format(e))
return version
renderer = 'ffmpeg'
render_engine = 'ffmpeg'
def __init__(self, input_path, output_path, args=None):
super(FFMPEGRenderer, self).__init__(input_path=input_path, output_path=output_path, ignore_extensions=True)
self.total_frames = -1
if os.path.exists(input_path):
media_stats = ffmpeg.probe(input_path)
for stream in media_stats['streams']:
if stream['codec_type'] == 'video':
self.total_frames = stream['nb_frames']
break
self.frame = 0
self.args = args
# Stats
self.current_frame = -1
def _generate_subprocess(self):
cmd = [self.renderer_path(), '-y', '-stats', '-i', self.input]
if self.args:
cmd.extend(self.args)
cmd.append(self.output)
return cmd
def percent_complete(self):
return max(float(self.current_frame) / float(self.total_frames), 0.0)
def _parse_stdout(self, line):
pattern = re.compile(r'frame=\s*(?P<current_frame>\d+)\s*fps.*time=(?P<time_elapsed>\S+)')
found = pattern.search(line)
if found:
stats = found.groupdict()
self.current_frame = stats['current_frame']
self.time_elapsed = stats['time_elapsed']
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
test_movie = '/Users/brettwilliams/Desktop/dark_knight_rises.mp4'
r = FFMPEGRenderer(test_movie, '/Users/brettwilliams/Desktop/test-ffmpeg.mp4', args=['-c:v','libx265','-vtag','hvc1'])
# r = FFMPEGRenderer(test_movie, '/Users/brettwilliams/Desktop/dark_knight_rises-output.mp4')
r.start()
while r.is_running():
time.sleep(1)

View File

@@ -0,0 +1,211 @@
from datetime import datetime
import io
import logging
import os
import subprocess
import threading
from datetime import datetime
from enum import Enum
import psutil
logger = logging.getLogger()
class RenderStatus(Enum):
NOT_STARTED = "not_started"
RUNNING = "running"
COMPLETED = "completed"
CANCELLED = "cancelled"
ERROR = "error"
SCHEDULED = "scheduled"
def string_to_status(string):
for stat in RenderStatus:
if stat.value == string:
return stat
return RenderStatus.ERROR
class Renderer(object):
renderer = 'GenericRenderer'
render_engine = None
supported_extensions = []
install_paths = []
@staticmethod
def version():
return 'Unknown'
def __init__(self, input_path, output_path, ignore_extensions=False):
if not ignore_extensions:
if not any(ext in input_path for ext in self.supported_extensions):
err_meg = f'Cannot find valid project with supported file extension for {self.renderer} renderer'
logger.error(err_meg)
raise ValueError(err_meg)
# Essential Info
self.input = input_path
self.output = output_path
self.date_created = datetime.now()
self.attributes = {}
self.renderer_version = self.version()
# Ranges
self.total_frames = 0
self.last_frame = 0
# Logging
self.log_path = None
self.start_time = None
self.end_time = None
self.last_error = None
# History
self.status = RenderStatus.NOT_STARTED
self.warnings = []
self.errors = []
self.failed_attempts = 0
self.maximum_attempts = 1
# Threads and processes
self.thread = threading.Thread(target=self.run, args=())
self.thread.daemon = True
self.process = None
self.is_finished = False
self.last_output = None
def renderer_path(self):
path = None
try:
path = subprocess.check_output(['which', self.render_engine]).decode('utf-8').strip()
except Exception as e:
for p in self.install_paths:
if os.path.exists(p):
path = p
# if not path:
# logger.error("Failed to get path to {}: {}".format(self.renderer, e))
return path
def _generate_subprocess(self):
return []
def start(self):
if not os.path.exists(self.input):
self.status = RenderStatus.ERROR
msg = 'Cannot find input path: {}'.format(self.input)
logger.error(msg)
self.errors.append(msg)
return
if not self.renderer_path():
self.status = RenderStatus.ERROR
msg = 'Cannot find render engine path for {}'.format(self.render_engine)
logger.error(msg)
self.errors.append(msg)
return
self.status = RenderStatus.RUNNING
logger.info('Starting {0} {1} Render for {2}'.format(self.renderer, self.version(), self.input))
self.thread.start()
def run(self):
# Setup logging
try:
log_dir = os.path.join(os.path.dirname(self.input), 'logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
self.log_path = os.path.join(log_dir, os.path.basename(self.input)) + '.log'
logger.info('Logs saved in {}'.format(self.log_path))
except Exception as e:
logger.error("Error setting up logging: {}".format(e))
while self.failed_attempts < self.maximum_attempts and self.status is not RenderStatus.COMPLETED:
if self.failed_attempts:
logger.info('Attempt #{} failed. Starting attempt #{}'.format(self.failed_attempts, self.failed_attempts + 1))
# Start process and get updates
subprocess_cmds = self._generate_subprocess()
logger.debug("Renderer commands generated - {}".format(" ".join(subprocess_cmds)))
self.process = subprocess.Popen(subprocess_cmds, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=False)
self.start_time = datetime.now()
f = open(self.log_path, "a")
f.write("{3} - Starting {0} {1} Render for {2}\n".format(self.renderer, self.version(), self.input, self.start_time.isoformat()))
for c in io.TextIOWrapper(self.process.stdout, encoding="utf-8"): # or another encoding
f.write(c)
logger.debug("RENDERER: {}".format(c.strip()))
self.last_output = c.strip()
self._parse_stdout(c.strip())
f.write('\n')
# Check return codes
return_code = self.process.wait()
self.end_time = datetime.now()
# Return early if job was cancelled
if self.status is RenderStatus.CANCELLED:
self.is_finished = True
return
duration = self.end_time - self.start_time
if return_code:
message = f"{self.renderer} render failed with return_code {return_code} after {duration}"
logger.error(message)
self.failed_attempts = self.failed_attempts + 1
else:
message = f"{self.renderer} render completed successfully in {duration}"
logger.info(message)
self.status = RenderStatus.COMPLETED
f.write(message)
f.close()
if self.failed_attempts >= self.maximum_attempts and self.status is not RenderStatus.CANCELLED:
logger.error('{} Render of {} failed after {} attempts'.format(self.renderer, self.input, self.failed_attempts))
self.status = RenderStatus.ERROR
self.is_finished = True
def is_running(self):
if self.thread:
return self.thread.is_alive()
return False
def stop(self):
if self.process:
try:
self.status = RenderStatus.CANCELLED
self.maximum_attempts = 0
process = psutil.Process(self.process.pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
except Exception as e:
logger.error(f"Exception stopping the process: {e}")
def percent_complete(self):
return 0
def _parse_stdout(self, line):
pass
def elapsed_time(self):
elapsed = ""
if self.start_time:
if self.end_time:
elapsed = self.end_time - self.start_time
elif self.is_running():
elapsed = datetime.now() - self.start_time
return elapsed
def timecode_to_frames(timecode, frame_rate):
e = [int(x) for x in timecode.split(':')]
seconds = (((e[0] * 60) + e[1] * 60) + e[2])
frames = (seconds * frame_rate) + e[-1] + 1
return frames

107
utilities/openproject.py Normal file
View File

@@ -0,0 +1,107 @@
import json
import requests
from requests.auth import HTTPBasicAuth
from datetime import datetime, timezone
import time
import logging
class OpenProject:
def __init__(self):
# self.server_url = "http://localhost:8080"
self.server_url = "http://17.114.221.240:8080"
# self.api_key = "bb5897eb1daf9bdc4b400675de8e1e52bd64e1e8bce95b341a61a036431c850e"
self.api_key = "b902d975fcf6a29558e611e665145282acffa1e7109bfb462ef25266f7f9ed6e"
def create_shot(self, scene, shot, project, sequence=None):
url = self.server_url + "/api/v3/work_packages"
project_url = 1
attributes = {
"subject": "SC{}_{}".format(scene, shot),
"customField2": scene,
"customField1": shot,
"_links": {
"project": {"href": "/api/v3/projects/{}".format(project_url)},
"type": {"href": "/api/v3/types/1"}
}
}
return self._send_command(url, attributes)
def add_comment(self, work_project_id, comment, notify=False):
url = self.server_url + "/api/v3/work_packages/{}/activities?notify={}".format(str(work_project_id), str(notify))
attributes = {"comment": {"raw": comment}}
return self._send_command(url, attributes)
def get_work_package(self, identifier=None, attribute=None):
url = self.server_url + "/api/v3/work_packages/"
if identifier:
url = url + str(identifier)
return self._send_command(url, attribute)
def get_projects(self, identifier=None):
url = self.server_url + "/api/v3/projects/"
if identifier:
url = url + str(identifier)
return self._send_command(url, None)
def _send_command(self, url, body):
if body:
response = requests.post(url, json=body,
auth=HTTPBasicAuth('apikey', self.api_key))
else:
response = requests.get(url, auth=HTTPBasicAuth('apikey', self.api_key))
if not response.ok:
logging.error('Response error: {}'.format(response.reason))
return response.json()
class OpenProjectWatcher:
def __init__(self, op_instance, interval=30):
self.op = OpenProject()
self.interval = interval
self.last_check = None
def _check_projects(self):
projects = self.op.get_projects()
for project in projects['_embedded']['elements']:
# last_update = datetime.datetime.fromisoformat(project['updatedAt'])
last_update = datetime.strptime(project['updatedAt'], "%Y-%m-%dT%H:%M:%S%z")
if not self.last_check or last_update > self.last_check:
logging.info("Update found for project: {}".format(project['name']))
# todo: do something with updated info
def _check_work_projects(self):
packages = self.op.get_work_package()
for pkg in packages['_embedded']['elements']:
# print(pkg.keys())
last_update = datetime.strptime(pkg['updatedAt'], "%Y-%m-%dT%H:%M:%S%z")
if not self.last_check or last_update > self.last_check:
logging.info("Update found for shot: {}".format(pkg['subject']))
# todo: do something with updated info
def watch(self):
while True:
now = datetime.now(timezone.utc)
self._check_projects()
self._check_work_projects()
self.last_check = now
time.sleep(self.interval)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
op = OpenProject()
op.add_comment(42, "After Effects Render completed successfully. Log available here.", True)
# print(op.get_projects())
watcher = OpenProjectWatcher(OpenProject())
watcher.watch()

BIN
whiteboard.jpeg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 MiB

425
zordon_server.py Executable file
View File

@@ -0,0 +1,425 @@
#!/usr/bin/env python
import logging
import platform
import socket
import threading
import time
import uuid
from datetime import datetime
import psutil
import requests
import yaml
import json
import os
from flask import Flask, jsonify, request
from utilities.aerender import AERenderer
from utilities.blender import BlenderRenderer
from utilities.ffmpeg_render import FFMPEGRenderer
from utilities.generic_renderer import RenderStatus
from utilities.generic_renderer import string_to_status
data = 'foo'
app = Flask(__name__)
logger = logging.getLogger()
local_hostname = socket.gethostname()
JSON_FILE = 'job_history.json'
#todo: move history to sqlite db
class RenderJob:
def __init__(self, render, priority, owner=None, client=None, notify=None):
self.id = str(uuid.uuid4()).split('-')[0]
self.owner = owner
self.render = render
self.priority = priority
self.client = client
self.notify = notify
self.date_created = datetime.now()
self.scheduled_start = None
self.renderer = render.renderer
self.name = os.path.basename(render.input) + '_' + self.date_created.isoformat()
self.archived = False
def render_status(self):
"""Returns status of render job"""
try:
if self.scheduled_start and self.render.status == RenderStatus.NOT_STARTED:
return RenderStatus.SCHEDULED
else:
return self.render.status
except Exception as e:
logger.warning("render_status error: {}".format(e))
return RenderStatus.ERROR
def json(self):
"""Converts RenderJob into JSON format"""
import numbers
def date_serializer(o):
if isinstance(o, datetime):
return o.isoformat()
json_string = ''
try:
d = self.__dict__.copy()
d['status'] = self.render_status().value
d['render'] = self.render.__dict__.copy()
for key in ['thread', 'process']: # remove unwanted keys from JSON
d['render'].pop(key, None)
d['render']['status'] = d['status']
# jobs from current_session generate percent completed
# jobs after loading server pull in a saved value. Have to check if callable object or not
percent_complete = self.render.percent_complete if isinstance(self.render.percent_complete, numbers.Number) \
else self.render.percent_complete()
d['render']['percent_complete'] = percent_complete
json_string = json.dumps(d, default=date_serializer)
except Exception as e:
logger.error("Error converting to JSON: {}".format(e))
return json_string
def render_factory(input_path, output_path):
if '.blend' in input_path.lower():
return BlenderRenderer(input_path, output_path)
class RenderServer:
render_queue = []
render_clients = []
maximum_renderer_instances = {'Blender': 2, 'After Effects': 1, 'ffmpeg': 4}
host_name = socket.gethostname()
port = 8080
last_saved_counts = {}
def __init__(self):
pass
@classmethod
def add_to_render_queue(cls, render_job, force_start=False):
if not render_job.client:
logger.debug('Adding priority {} job to render queue: {}'.format(render_job.priority, render_job.render))
cls.render_queue.append(render_job)
if force_start:
cls.start_job(render_job)
else:
cls.evaluate_queue()
else:
# todo: implement client rendering
logger.warning('remote client rendering not implemented yet')
@classmethod
def running_jobs(cls):
return cls.jobs_with_status(RenderStatus.RUNNING)
@classmethod
def pending_jobs(cls):
pending_jobs = cls.jobs_with_status(RenderStatus.NOT_STARTED)
pending_jobs.extend(cls.jobs_with_status(RenderStatus.SCHEDULED))
return pending_jobs
@classmethod
def jobs_with_status(cls, status, priority_sorted=False, include_archived=True):
found_jobs = [x for x in cls.render_queue if x.render_status() == status]
if not include_archived:
found_jobs = [x for x in found_jobs if not x.archived]
if priority_sorted:
found_jobs = sorted(found_jobs, key=lambda a: a.priority, reverse=False)
return found_jobs
@classmethod
def clear_history(cls):
to_remove = [x for x in cls.render_queue if x.render_status() in [RenderStatus.CANCELLED,
RenderStatus.COMPLETED, RenderStatus.ERROR]]
for x in to_remove:
x.archived = True
@classmethod
def load_history(cls, json_path=None):
input_path = json_path or JSON_FILE
if os.path.exists(input_path):
f = open(input_path)
job_list = json.load(f)
for job in job_list:
# Identify renderer type and recreate Renderer object
# TODO: refactor to factory class
job_render_object = None
if job['renderer'] == 'Blender':
job_render_object = BlenderRenderer(job['render']['input'], job['render']['output'])
elif job['renderer'] == 'After Effects':
AERenderer()
elif job['renderer'] == 'ffmpeg':
job_render_object = FFMPEGRenderer(job['render']['input'], job['render']['output'])
# Load Renderer values
for key, val in job['render'].items():
if val and key in ['start_time', 'end_time']: # convert date strings back into date objects
job_render_object.__dict__[key] = datetime.fromisoformat(val)
else:
job_render_object.__dict__[key] = val
job_render_object.status = RenderStatus[job['status'].upper()]
job.pop('render', None)
# Create RenderJob with re-created Renderer object
new_job = RenderJob(job_render_object, job['priority'], job['client'])
for key, val in job.items():
if key in ['date_created']: # convert date strings back to datetime objects
new_job.__dict__[key] = datetime.fromisoformat(val)
else:
new_job.__dict__[key] = val
new_job.__delattr__('status')
# Handle older loaded jobs that were cancelled before closing
if new_job.render_status() == RenderStatus.RUNNING:
new_job.render.status = RenderStatus.CANCELLED
# finally add back to render queue
cls.render_queue.append(new_job)
f.close()
cls.last_saved_counts = cls.job_counts()
@classmethod
def save_history(cls, json_path=None):
"""Save job history to JSON file"""
try:
logger.debug("Saving Render History")
new_list = []
for job in cls.render_queue:
new_list.append(json.loads(job.json()))
output_path = json_path or JSON_FILE
with open(output_path, 'w') as f:
json.dump(new_list, f, indent=4)
cls.last_saved_counts = cls.job_counts()
except Exception as e:
logger.error("Error saving jobs JSON: {}".format(e))
@classmethod
def evaluate_queue(cls):
instances = cls.renderer_instances()
not_started = cls.jobs_with_status(RenderStatus.NOT_STARTED, priority_sorted=True)
if not_started:
for job in not_started:
renderer = job.render.renderer
higher_priority_jobs = [x for x in cls.running_jobs() if x.priority < job.priority]
max_renderers = renderer in instances.keys() and instances[
renderer] >= cls.maximum_renderer_instances.get(renderer, 1)
if not max_renderers and not higher_priority_jobs:
cls.start_job(job)
scheduled = cls.jobs_with_status(RenderStatus.SCHEDULED, priority_sorted=True)
for job in scheduled:
if job.scheduled_start <= datetime.now():
cls.start_job(job)
if cls.last_saved_counts != cls.job_counts():
cls.save_history()
@classmethod
def start_job(cls, job):
logger.info('Starting {}render: {} - Priority {}'.format('scheduled ' if job.scheduled_start else '', job.name,
job.priority))
job.render.start()
@classmethod
def cancel_job(cls, job):
logger.info('Cancelling job ID: {}'.format(job.id))
if job.render_status() in [RenderStatus.NOT_STARTED, RenderStatus.RUNNING, RenderStatus.ERROR]:
job.render.stop()
job.render.status = RenderStatus.CANCELLED
return True
return False
@classmethod
def renderer_instances(cls):
from collections import Counter
all_instances = [x.render.renderer for x in cls.running_jobs()]
return Counter(all_instances)
@classmethod
def job_counts(cls):
job_counts = {}
for job_status in RenderStatus:
job_counts[job_status.value] = len(RenderServer.jobs_with_status(job_status))
return job_counts
@classmethod
def status(cls):
stats = {"timestamp": datetime.now().isoformat(),
"platform": platform.platform(),
"cpu_percent": psutil.cpu_percent(percpu=False),
"cpu_count": psutil.cpu_count(),
"memory_total": psutil.virtual_memory().total,
"memory_available": psutil.virtual_memory().available,
"memory_percent": psutil.virtual_memory().percent,
"job_counts": RenderServer.job_counts(),
"host_name": RenderServer.host_name
}
return stats
@classmethod
def all_jobs(cls):
all_jobs = [x for x in RenderServer.render_queue if not x.archived]
return all_jobs
@classmethod
def register_client(cls, hostname):
# todo: register this
response = requests.get("https://{}/_register_".format(hostname))
print(response.status_code)
pass
@classmethod
def unregister_client(cls):
pass
@app.get('/jobs')
def jobs_json():
return jsonify([json.loads(x.json()) for x in RenderServer.render_queue if not x.archived])
@app.get('/jobs/<status_val>')
def filtered_jobs_json(status_val):
state = string_to_status(status_val)
jobs = [json.loads(x.json()) for x in RenderServer.jobs_with_status(state)]
return jsonify(jobs)
@app.get('/full_status')
def full_status():
full_results = {'timestamp': datetime.now().isoformat(), 'servers': {}}
# todo: iterate through all servers
server_list = [socket.gethostname()]
try:
for server_hostname in server_list:
if server_hostname == local_hostname:
server_status = RenderServer.status()
server_jobs = [json.loads(x.json()) for x in RenderServer.render_queue if not x.archived]
else:
server_status = requests.get(f'http://{server_hostname}:8080/status', timeout=1).json()
server_jobs = requests.get(f'http://{server_hostname}:8080/jobs', timeout=1).json()
server_data = {'status': server_status, 'jobs': server_jobs}
full_results['servers'][server_hostname] = server_data
except Exception as e:
logger.error(f"Exception fetching full status: {e}")
return full_results
@app.post('/add_job')
def add_job():
"""Create new job and add to server render queue"""
renderer = request.json["renderer"]
input_path = request.json["input"]
output_path = request.json["output"]
if not os.path.exists(input_path):
err_msg = f"Cannot add job. Cannot find input file: {input_path}"
logger.error(err_msg)
return {"error": err_msg}, 400
# todo: create factory class for creating renderers
if "blender" in renderer:
render_job = BlenderRenderer(input_path, output_path)
render_job.engine = request.json.get('engine', 'BLENDER_EEVEE')
elif "aerender" in renderer:
render_job = AERenderer(input_path, output_path)
elif "ffmpeg" in renderer:
render_job = FFMPEGRenderer(input_path, output_path, args=request.json.get('args', None))
else:
err_msg = "Unknown renderer: {}".format(renderer)
logger.error(err_msg)
return {'error': err_msg}, 400
new_job = RenderJob(render_job, priority=request.json.get('priority', 2))
RenderServer.add_to_render_queue(new_job, force_start=request.json.get('force_start', False))
return new_job.json()
@app.get('/cancel_job')
def cancel_job():
job_id = request.args.get('id', None)
confirm = request.args.get('confirm', False)
if not job_id:
return jsonify({'error': 'job id not found'})
elif not confirm:
return jsonify({'error': 'confirmation required'})
else:
found = [x for x in RenderServer.render_queue if x.id == job_id]
if len(found) > 1:
# logger.error('Multiple jobs found for ID {}'.format(job_id))
return jsonify({'error': 'multiple jobs found for ID {}'.format(job_id)})
elif found:
success = RenderServer.cancel_job(found[0])
return jsonify({'result': success})
return jsonify({'error': 'job not found'})
@app.get('/clear_history')
def clear_history():
RenderServer.clear_history()
return jsonify({'result': True})
@app.route('/status')
def status():
return jsonify(RenderServer.status())
@app.route('/')
def default():
return "Server running"
def start_server(background_thread=False):
def eval_loop():
while True:
RenderServer.evaluate_queue()
time.sleep(1)
with open('config.yaml') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
# disable most Flask logging
flask_log = logging.getLogger('werkzeug')
flask_log.setLevel(logging.ERROR)
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
# Setup the RenderServer object
RenderServer.load_history()
RenderServer.evaluate_queue()
thread = threading.Thread(target=eval_loop, daemon=True)
thread.start()
server_thread = threading.Thread(target=lambda: app.run(host=RenderServer.host_name, port=RenderServer.port, debug=False, use_reloader=False))
server_thread.start()
if not background_thread:
server_thread.join()
if __name__ == '__main__':
start_server()