mirror of
https://github.com/blw1138/Zordon.git
synced 2025-12-17 16:58:12 +00:00
Assign frame ranges to servers based on their CPU count (#19)
* Expose renderer availability in status api * Remove redundant is_available_for_job API call * New server split logic by cpu and moved to server_helper.py * Remove old dead code * Add RenderStatus.WAITING to proxy categories
This commit is contained in:
@@ -1,9 +1,13 @@
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import subprocess
|
||||
import threading
|
||||
|
||||
from .ffmpeg_helper import generate_thumbnail, save_first_frame
|
||||
import psutil
|
||||
|
||||
from lib.server.server_proxy import RenderServerProxy
|
||||
from lib.utilities.ffmpeg_helper import generate_thumbnail, save_first_frame
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
@@ -45,3 +49,75 @@ def generate_thumbnail_for_job(job, thumb_video_path, thumb_image_path, max_widt
|
||||
if video_files and not os.path.exists(thumb_video_path):
|
||||
x = threading.Thread(target=generate_thumb_thread, args=(video_files[0],))
|
||||
x.start()
|
||||
|
||||
|
||||
def divide_frames_evenly(start_frame, end_frame, num_servers):
|
||||
frame_range = end_frame - start_frame + 1
|
||||
frames_per_server = frame_range // num_servers
|
||||
leftover_frames = frame_range % num_servers
|
||||
|
||||
ranges = []
|
||||
current_start = start_frame
|
||||
for i in range(num_servers):
|
||||
current_end = current_start + frames_per_server - 1
|
||||
if leftover_frames > 0:
|
||||
current_end += 1
|
||||
leftover_frames -= 1
|
||||
if current_start <= current_end:
|
||||
ranges.append((current_start, current_end))
|
||||
current_start = current_end + 1
|
||||
|
||||
return ranges
|
||||
|
||||
|
||||
def divide_frames_by_cpu_count(frame_start, frame_end, servers):
|
||||
total_frames = frame_end - frame_start + 1
|
||||
total_performance = sum(server['cpu_count'] for server in servers)
|
||||
|
||||
frame_ranges = {}
|
||||
current_frame = frame_start
|
||||
allocated_frames = 0
|
||||
|
||||
for i, server in enumerate(servers):
|
||||
if i == len(servers) - 1: # if it's the last server
|
||||
# Give all remaining frames to the last server
|
||||
num_frames = total_frames - allocated_frames
|
||||
else:
|
||||
num_frames = round((server['cpu_count'] / total_performance) * total_frames)
|
||||
allocated_frames += num_frames
|
||||
|
||||
frame_end_for_server = current_frame + num_frames - 1
|
||||
|
||||
if current_frame <= frame_end_for_server:
|
||||
frame_ranges[server['hostname']] = (current_frame, frame_end_for_server)
|
||||
current_frame = frame_end_for_server + 1
|
||||
|
||||
return frame_ranges
|
||||
|
||||
|
||||
def find_available_servers(server_list, renderer, start_frame, end_frame):
|
||||
local_hostname = socket.gethostname()
|
||||
subjob_servers = [{'hostname': local_hostname, 'cpu_count': psutil.cpu_count(logical=False)}]
|
||||
for hostname in server_list:
|
||||
if hostname != local_hostname:
|
||||
response = RenderServerProxy(hostname).get_status()
|
||||
if response and response.get('renderers', {}).get(renderer, {}).get('is_ready', False):
|
||||
subjob_servers.append({'hostname': hostname, 'cpu_count': int(response['cpu_count'])})
|
||||
|
||||
if len(subjob_servers) == 1:
|
||||
logger.debug("No available servers to split job with. Skipping subjob creation.")
|
||||
return subjob_servers
|
||||
|
||||
# Calculate respective frames for each server
|
||||
breakdown = divide_frames_by_cpu_count(start_frame, end_frame, subjob_servers)
|
||||
subjob_servers = [server for server in subjob_servers if breakdown.get(server['hostname']) is not None]
|
||||
for server in subjob_servers:
|
||||
server['frame_range'] = breakdown[server['hostname']]
|
||||
server['total_frames'] = breakdown[server['hostname']][-1] - breakdown[server['hostname']][0] + 1
|
||||
|
||||
return subjob_servers
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
found_servers = ['kamino.local', 'deathstar.local']
|
||||
print(find_available_servers(found_servers, 'blender', 1, 5))
|
||||
|
||||
Reference in New Issue
Block a user