Improve performance on several API calls (#80)

* Streamline fetching renderer_info from API - use threading for performance improvements

* Use concurrent.futures instead of Threading

* Fix timeout issue with server proxy

* Minor fixes to code that handles proxy server online / offline status
This commit is contained in:
2024-08-03 11:02:40 -05:00
committed by GitHub
parent 47770c4fdd
commit 21de69ca4f
6 changed files with 88 additions and 51 deletions

View File

@@ -65,7 +65,7 @@ class RenderServerProxy:
if self.__update_in_background:
return self.__offline_flags < OFFLINE_MAX
else:
return self.connect() is not None
return self.get_status() is not None
def status(self):
if not self.is_online():
@@ -76,8 +76,9 @@ class RenderServerProxy:
def request_data(self, payload, timeout=5):
try:
req = self.request(payload, timeout)
if req.ok and req.status_code == 200:
if req.ok:
self.__offline_flags = 0
if req.status_code == 200:
return req.json()
except json.JSONDecodeError as e:
logger.debug(f"JSON decode error: {e}")
@@ -90,10 +91,10 @@ class RenderServerProxy:
except Exception as e:
logger.exception(f"Uncaught exception: {e}")
# If server unexpectedly drops off the network, remove from Zeroconf list
# If server unexpectedly drops off the network, stop background updates
if self.__offline_flags > OFFLINE_MAX:
try:
ZeroconfServer.client_cache.pop(self.hostname)
self.stop_background_update()
except KeyError:
pass
return None
@@ -108,9 +109,11 @@ class RenderServerProxy:
self.__update_in_background = True
def thread_worker():
logger.debug(f'Starting background updates for {self.hostname}')
while self.__update_in_background:
self.__update_job_cache()
time.sleep(self.update_cadence)
logger.debug(f'Stopping background updates for {self.hostname}')
self.__background_thread = threading.Thread(target=thread_worker)
self.__background_thread.daemon = True
@@ -127,7 +130,11 @@ class RenderServerProxy:
self.__update_job_cache(timeout, ignore_token)
return self.__jobs_cache.copy() if self.__jobs_cache else None
def __update_job_cache(self, timeout=30, ignore_token=False):
def __update_job_cache(self, timeout=40, ignore_token=False):
if self.__offline_flags: # if we're offline, don't bother with the long poll
ignore_token = True
url = f'jobs_long_poll?token={self.__jobs_cache_token}' if (self.__jobs_cache_token and
not ignore_token) else 'jobs'
status_result = self.request_data(url, timeout=timeout)
@@ -151,7 +158,7 @@ class RenderServerProxy:
def get_status(self):
status = self.request_data('status')
if not self.system_cpu:
if status and not self.system_cpu:
self.system_cpu = status['system_cpu']
self.system_cpu_count = status['cpu_count']
self.system_os = status['system_os']