Async Server Status Fetch (#11)

* Add background fetching to server_proxy

* Update UI to use server_proxy fetched jobs

* Fix issue getting status with empty jobs_cache

* Fix issue with jobs not appearing after switching servers

* Remove job_cache from dashboard_window and utilize server_proxy caches

* Remove jobs from table that shouldn't be there

* Streamline how we're handling offline tracking and handle connection error when fetching thumbnail

* Add ability to remove any manually added servers
This commit is contained in:
2023-06-09 18:38:58 -05:00
committed by GitHub
parent 75de367153
commit 38936d40ab
4 changed files with 179 additions and 93 deletions

View File

@@ -2,6 +2,8 @@ import logging
import os
import json
import requests
import time
import threading
from lib.render_workers.base_worker import RenderStatus
from requests_toolbelt.multipart import MultipartEncoder, MultipartEncoderMonitor
@@ -13,15 +15,21 @@ categories = [RenderStatus.RUNNING, RenderStatus.ERROR, RenderStatus.NOT_STARTED
RenderStatus.COMPLETED, RenderStatus.CANCELLED, RenderStatus.UNDEFINED]
logger = logging.getLogger()
OFFLINE_MAX = 2
class RenderServerProxy:
def __init__(self, hostname=None, server_port="8080"):
def __init__(self, hostname, server_port="8080"):
self._hostname = hostname
self.port = server_port
self.fetched_status_data = None
self.__jobs_cache_token = None
self.__jobs_cache = []
self.__update_in_background = False
self.__background_thread = None
self.__offline_flags = 0
self.update_cadence = 5
@property
def hostname(self):
@@ -36,15 +44,29 @@ class RenderServerProxy:
status = self.request_data('status')
return status
def is_online(self):
if self.__update_in_background:
return self.__offline_flags < OFFLINE_MAX
else:
return self.connect() is not None
def status(self):
if not self.is_online():
return "Offline"
running_jobs = [x for x in self.__jobs_cache if x['status'] == 'running'] if self.__jobs_cache else []
return f"{len(running_jobs)} running" if running_jobs else "Available"
def request_data(self, payload, timeout=5):
try:
req = self.request(payload, timeout)
if req.ok and req.status_code == 200:
self.__offline_flags = 0
return req.json()
except json.JSONDecodeError as e:
logger.debug(f"JSON decode error: {e}")
except requests.ConnectionError as e:
logger.error(f"Connection error: {e}")
self.__offline_flags = self.__offline_flags + 1
except Exception as e:
logger.exception(f"Uncaught exception: {e}")
return None
@@ -52,19 +74,37 @@ class RenderServerProxy:
def request(self, payload, timeout=5):
return requests.get(f'http://{self.hostname}:{self.port}/api/{payload}', timeout=timeout)
def start_background_update(self):
self.__update_in_background = True
def thread_worker():
while self.__update_in_background:
self.__update_job_cache()
time.sleep(self.update_cadence)
self.__background_thread = threading.Thread(target=thread_worker)
self.__background_thread.daemon = True
self.__background_thread.start()
def stop_background_update(self):
self.__update_in_background = False
def get_jobs(self, timeout=5, ignore_token=False):
if not self.__update_in_background:
self.__update_job_cache(timeout, ignore_token)
return self.__jobs_cache.copy() if self.__jobs_cache else None
def __update_job_cache(self, timeout=5, ignore_token=False):
url = f'jobs?token={self.__jobs_cache_token}' if self.__jobs_cache_token and not ignore_token else 'jobs'
status_result = self.request_data(url, timeout=timeout)
all_jobs = None
if status_result is not None:
sorted_jobs = []
for status_category in categories:
found_jobs = [x for x in status_result['jobs'] if x['status'] == status_category.value]
if found_jobs:
sorted_jobs.extend(found_jobs)
all_jobs = sorted_jobs
self.__jobs_cache = sorted_jobs
self.__jobs_cache_token = status_result['token']
return all_jobs
def get_data(self, timeout=5):
all_data = self.request_data('full_status', timeout=timeout)