Refactor: Move all initialization logic out of api_server and into init (#91)

* Zeroconf logging improvements

* Ignore RuntimeErrors in background threads - Prevents issues during shutdown

* Migrate start up code from api_server.py to init.py

* Add error handlers to the API server to handle detached instances

* Integrate RenderQueue eval loop into RenderQueue object

* Silently catch RuntimeErrors on evaluate_queue

* Stop background queue updates in prepare_for_shutdown
This commit is contained in:
2024-08-08 04:47:22 -05:00
committed by GitHub
parent 6afb6e65a6
commit 3600eeb21b
8 changed files with 290 additions and 177 deletions

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from src.api.api_server import start_server from init import run
if __name__ == '__main__': if __name__ == '__main__':
start_server() run(server_only=True)

View File

@@ -2,14 +2,12 @@
import concurrent.futures import concurrent.futures
import json import json
import logging import logging
import multiprocessing
import os import os
import pathlib import pathlib
import shutil import shutil
import socket import socket
import ssl import ssl
import tempfile import tempfile
import threading
import time import time
from datetime import datetime from datetime import datetime
from zipfile import ZipFile from zipfile import ZipFile
@@ -17,10 +15,10 @@ from zipfile import ZipFile
import psutil import psutil
import yaml import yaml
from flask import Flask, request, send_file, after_this_request, Response, redirect, url_for, abort from flask import Flask, request, send_file, after_this_request, Response, redirect, url_for, abort
from sqlalchemy.orm.exc import DetachedInstanceError
from src.api.add_job_helpers import handle_uploaded_project_files, process_zipped_project from src.api.add_job_helpers import handle_uploaded_project_files, process_zipped_project
from src.api.preview_manager import PreviewManager from src.api.preview_manager import PreviewManager
from src.api.serverproxy_manager import ServerProxyManager
from src.distributed_job_manager import DistributedJobManager from src.distributed_job_manager import DistributedJobManager
from src.engines.core.base_worker import string_to_status, RenderStatus from src.engines.core.base_worker import string_to_status, RenderStatus
from src.engines.engine_manager import EngineManager from src.engines.engine_manager import EngineManager
@@ -39,6 +37,29 @@ categories = [RenderStatus.RUNNING, RenderStatus.ERROR, RenderStatus.NOT_STARTED
RenderStatus.COMPLETED, RenderStatus.CANCELLED] RenderStatus.COMPLETED, RenderStatus.CANCELLED]
# -- Error Handlers --
@server.errorhandler(JobNotFoundError)
def handle_job_not_found(job_error):
return str(job_error), 400
@server.errorhandler(DetachedInstanceError)
def handle_detached_instance(error):
# logger.debug(f"detached instance: {error}")
return "Unavailable", 503
@server.errorhandler(Exception)
def handle_general_error(general_error):
err_msg = f"Server error: {general_error}"
logger.error(err_msg)
return err_msg, 500
# -- Jobs --
def sorted_jobs(all_jobs, sort_by_date=True): def sorted_jobs(all_jobs, sort_by_date=True):
if not sort_by_date: if not sort_by_date:
sorted_job_list = [] sorted_job_list = []
@@ -60,9 +81,11 @@ def jobs_json():
job_cache_int = int(json.dumps(all_jobs).__hash__()) job_cache_int = int(json.dumps(all_jobs).__hash__())
job_cache_token = num_to_alphanumeric(job_cache_int) job_cache_token = num_to_alphanumeric(job_cache_int)
return {'jobs': all_jobs, 'token': job_cache_token} return {'jobs': all_jobs, 'token': job_cache_token}
except DetachedInstanceError as e:
raise e
except Exception as e: except Exception as e:
logger.error(f"Error fetching jobs_json: {e}") logger.error(f"Error fetching jobs_json: {e}")
return {}, 500 raise e
@server.get('/api/jobs_long_poll') @server.get('/api/jobs_long_poll')
@@ -78,9 +101,11 @@ def long_polling_jobs():
if time.time() - start_time > 30: if time.time() - start_time > 30:
return {}, 204 return {}, 204
time.sleep(1) time.sleep(1)
except DetachedInstanceError as e:
raise e
except Exception as e: except Exception as e:
logger.error(f"Error fetching long_polling_jobs: {e}") logger.error(f"Error fetching long_polling_jobs: {e}")
return {}, 500 raise e
@server.route('/api/job/<job_id>/thumbnail') @server.route('/api/job/<job_id>/thumbnail')
@@ -107,7 +132,7 @@ def job_thumbnail(job_id):
file_mime_type = mime_types.get(preview_to_send['kind'], 'unknown') file_mime_type = mime_types.get(preview_to_send['kind'], 'unknown')
return send_file(preview_to_send['filename'], mimetype=file_mime_type) return send_file(preview_to_send['filename'], mimetype=file_mime_type)
except Exception as e: except Exception as e:
logger.exception(f'Error getting thumbnail: {e}') logger.error(f'Error getting thumbnail: {e}')
return f'Error getting thumbnail: {e}', 500 return f'Error getting thumbnail: {e}', 500
return "No thumbnail available", 404 return "No thumbnail available", 404
@@ -145,11 +170,6 @@ def subjob_update_notification(job_id):
return "Job not found", 404 return "Job not found", 404
@server.errorhandler(JobNotFoundError)
def handle_job_not_found(job_error):
return f'Cannot find job with ID {job_error.job_id}', 400
@server.get('/api/job/<job_id>') @server.get('/api/job/<job_id>')
def get_job_status(job_id): def get_job_status(job_id):
return RenderQueue.job_with_id(job_id).json() return RenderQueue.job_with_id(job_id).json()
@@ -488,75 +508,24 @@ def get_disk_benchmark():
return {'write_speed': results[0], 'read_speed': results[-1]} return {'write_speed': results[0], 'read_speed': results[-1]}
def start_server(): def start_server(hostname=None):
def eval_loop(delay_sec=1):
while True:
try:
RenderQueue.evaluate_queue()
except Exception as e:
logger.error(f"Uncaught error while evaluating queue: {e}")
time.sleep(delay_sec)
try:
Config.setup_config_dir()
Config.load_config(system_safe_path(os.path.join(Config.config_dir(), 'config.yaml')))
# suppress requests logging
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# get hostname # get hostname
if not hostname:
local_hostname = socket.gethostname() local_hostname = socket.gethostname()
local_hostname = local_hostname + (".local" if not local_hostname.endswith(".local") else "") hostname = local_hostname + (".local" if not local_hostname.endswith(".local") else "")
# load flask settings # load flask settings
server.config['HOSTNAME'] = local_hostname server.config['HOSTNAME'] = hostname
server.config['PORT'] = int(Config.port_number) server.config['PORT'] = int(Config.port_number)
server.config['UPLOAD_FOLDER'] = system_safe_path(os.path.expanduser(Config.upload_folder)) server.config['UPLOAD_FOLDER'] = system_safe_path(os.path.expanduser(Config.upload_folder))
server.config['MAX_CONTENT_PATH'] = Config.max_content_path server.config['MAX_CONTENT_PATH'] = Config.max_content_path
server.config['enable_split_jobs'] = Config.enable_split_jobs server.config['enable_split_jobs'] = Config.enable_split_jobs
# Setup storage directories
EngineManager.engines_path = system_safe_path(os.path.join(os.path.join(os.path.expanduser(Config.upload_folder),
'engines')))
os.makedirs(EngineManager.engines_path, exist_ok=True)
PreviewManager.storage_path = system_safe_path(os.path.join(os.path.expanduser(Config.upload_folder), 'previews'))
server.config['THUMBS_FOLDER'] = PreviewManager.storage_path # todo: remove this
# Debug info
logger.debug(f"Upload directory: {server.config['UPLOAD_FOLDER']}")
logger.debug(f"Thumbs directory: {PreviewManager.storage_path}")
logger.debug(f"Engines directory: {EngineManager.engines_path}")
# disable most Flask logging # disable most Flask logging
flask_log = logging.getLogger('werkzeug') flask_log = logging.getLogger('werkzeug')
flask_log.setLevel(Config.flask_log_level.upper()) flask_log.setLevel(Config.flask_log_level.upper())
# check for updates for render engines if configured or on first launch logger.debug('Starting API server')
if Config.update_engines_on_launch or not EngineManager.get_engines(): server.run(host='0.0.0.0', port=server.config['PORT'], debug=Config.flask_debug_enable, use_reloader=False,
EngineManager.update_all_engines() threaded=True)
# Set up the RenderQueue object
RenderQueue.load_state(database_directory=server.config['UPLOAD_FOLDER'])
ServerProxyManager.subscribe_to_listener()
DistributedJobManager.subscribe_to_listener()
thread = threading.Thread(target=eval_loop, kwargs={'delay_sec': Config.queue_eval_seconds}, daemon=True)
thread.start()
logger.info(f"Starting Zordon Render Server - Hostname: '{server.config['HOSTNAME']}:'")
ZeroconfServer.configure("_zordon._tcp.local.", server.config['HOSTNAME'], server.config['PORT'])
ZeroconfServer.properties = {'system_cpu': current_system_cpu(), 'system_cpu_cores': multiprocessing.cpu_count(),
'system_os': current_system_os(),
'system_os_version': current_system_os_version()}
ZeroconfServer.start()
try:
server.run(host='0.0.0.0', port=server.config['PORT'], debug=Config.flask_debug_enable,
use_reloader=False, threaded=True)
finally:
RenderQueue.save_state()
finally:
ZeroconfServer.stop()

View File

@@ -1,22 +1,27 @@
''' app/init.py ''' ''' app/init.py '''
import logging import logging
import multiprocessing
import os import os
import socket
import sys import sys
import threading import threading
import time
from collections import deque from collections import deque
from PyQt6.QtCore import QObject, pyqtSignal
from PyQt6.QtWidgets import QApplication
from src.api.api_server import start_server from src.api.api_server import start_server
from src.api.preview_manager import PreviewManager
from src.api.serverproxy_manager import ServerProxyManager
from src.distributed_job_manager import DistributedJobManager
from src.engines.engine_manager import EngineManager from src.engines.engine_manager import EngineManager
from src.render_queue import RenderQueue from src.render_queue import RenderQueue
from src.ui.main_window import MainWindow
from src.utilities.config import Config from src.utilities.config import Config
from src.utilities.misc_helper import system_safe_path from src.utilities.misc_helper import system_safe_path, current_system_cpu, current_system_os, current_system_os_version
from src.utilities.zeroconf_server import ZeroconfServer
logger = logging.getLogger()
def run() -> int: def run(server_only=False) -> int:
""" """
Initializes the application and runs it. Initializes the application and runs it.
@@ -24,43 +29,93 @@ def run() -> int:
int: The exit status code. int: The exit status code.
""" """
# setup logging
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(module)s: %(message)s', datefmt='%d-%b-%y %H:%M:%S',
level=Config.server_log_level.upper())
logging.getLogger("requests").setLevel(logging.WARNING) # suppress noisy requests/urllib3 logging
logging.getLogger("urllib3").setLevel(logging.WARNING)
# Setup logging for console ui
buffer_handler = __setup_buffer_handler() if not server_only else None
logger.info(f"Starting Zordon Render Server")
return_code = 0
try: try:
# Load Config YAML # Load Config YAML
Config.setup_config_dir() Config.setup_config_dir()
Config.load_config(system_safe_path(os.path.join(Config.config_dir(), 'config.yaml'))) Config.load_config(system_safe_path(os.path.join(Config.config_dir(), 'config.yaml')))
# configure default paths
EngineManager.engines_path = system_safe_path( EngineManager.engines_path = system_safe_path(
os.path.join(os.path.join(os.path.expanduser(Config.upload_folder), os.path.join(os.path.join(os.path.expanduser(Config.upload_folder),
'engines'))) 'engines')))
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(module)s: %(message)s', datefmt='%d-%b-%y %H:%M:%S', os.makedirs(EngineManager.engines_path, exist_ok=True)
level=Config.server_log_level.upper()) PreviewManager.storage_path = system_safe_path(
os.path.join(os.path.expanduser(Config.upload_folder), 'previews'))
app: QApplication = QApplication(sys.argv) # Debug info
logger.debug(f"Upload directory: {os.path.expanduser(Config.upload_folder)}")
logger.debug(f"Thumbs directory: {PreviewManager.storage_path}")
logger.debug(f"Engines directory: {EngineManager.engines_path}")
# Start server in background # Set up the RenderQueue object
background_server = threading.Thread(target=start_server) RenderQueue.load_state(database_directory=system_safe_path(os.path.expanduser(Config.upload_folder)))
background_server.daemon = True ServerProxyManager.subscribe_to_listener()
background_server.start() DistributedJobManager.subscribe_to_listener()
# Setup logging for console ui # check for updates for render engines if configured or on first launch
buffer_handler = BufferingHandler() if Config.update_engines_on_launch or not EngineManager.get_engines():
buffer_handler.setFormatter(logging.getLogger().handlers[0].formatter) EngineManager.update_all_engines()
logger = logging.getLogger()
logger.addHandler(buffer_handler)
window: MainWindow = MainWindow() # get hostname
window.buffer_handler = buffer_handler local_hostname = socket.gethostname()
window.show() local_hostname = local_hostname + (".local" if not local_hostname.endswith(".local") else "")
return_code = app.exec() # configure and start API server
api_server = threading.Thread(target=start_server, args=(local_hostname,))
api_server.daemon = True
api_server.start()
# start zeroconf server
ZeroconfServer.configure("_zordon._tcp.local.", local_hostname, Config.port_number)
ZeroconfServer.properties = {'system_cpu': current_system_cpu(),
'system_cpu_cores': multiprocessing.cpu_count(),
'system_os': current_system_os(),
'system_os_version': current_system_os_version()}
ZeroconfServer.start()
logger.info(f"Zordon Render Server started - Hostname: {local_hostname}")
RenderQueue.evaluation_inverval = Config.queue_eval_seconds
RenderQueue.start()
# start in gui or server only (cli) mode
logger.debug(f"Launching in {'server only' if server_only else 'GUI'} mode")
if server_only: # CLI only
api_server.join()
else: # GUI
return_code = __show_gui(buffer_handler)
except KeyboardInterrupt:
pass
except Exception as e: except Exception as e:
logging.error(f"Unhandled exception: {e}") logging.error(f"Unhandled exception: {e}")
return_code = 1 return_code = 1
finally: finally:
# shut down gracefully
logger.info(f"Zordon Render Server is preparing to shut down")
try:
RenderQueue.prepare_for_shutdown() RenderQueue.prepare_for_shutdown()
except Exception as e:
logger.exception(f"Exception during prepare for shutdown: {e}")
ZeroconfServer.stop()
logger.info(f"Zordon Render Server has shut down")
return sys.exit(return_code) return sys.exit(return_code)
def __setup_buffer_handler():
# lazy load GUI frameworks
from PyQt6.QtCore import QObject, pyqtSignal
class BufferingHandler(logging.Handler, QObject): class BufferingHandler(logging.Handler, QObject):
new_record = pyqtSignal(str) new_record = pyqtSignal(str)
@@ -70,9 +125,34 @@ class BufferingHandler(logging.Handler, QObject):
self.buffer = deque(maxlen=capacity) # Define a buffer with a fixed capacity self.buffer = deque(maxlen=capacity) # Define a buffer with a fixed capacity
def emit(self, record): def emit(self, record):
try:
msg = self.format(record) msg = self.format(record)
self.buffer.append(msg) # Add message to the buffer self.buffer.append(msg) # Add message to the buffer
self.new_record.emit(msg) # Emit signal self.new_record.emit(msg) # Emit signal
except RuntimeError:
pass
def get_buffer(self): def get_buffer(self):
return list(self.buffer) # Return a copy of the buffer return list(self.buffer) # Return a copy of the buffer
buffer_handler = BufferingHandler()
buffer_handler.setFormatter(logging.getLogger().handlers[0].formatter)
logger = logging.getLogger()
logger.addHandler(buffer_handler)
return buffer_handler
def __show_gui(buffer_handler):
# lazy load GUI frameworks
from PyQt6.QtWidgets import QApplication
# load application
app: QApplication = QApplication(sys.argv)
# configure main window
from src.ui.main_window import MainWindow
window: MainWindow = MainWindow()
window.buffer_handler = buffer_handler
window.show()
return app.exec()

View File

@@ -1,13 +1,15 @@
import logging import logging
import os import os
import threading
import time
from datetime import datetime from datetime import datetime
from sqlalchemy import create_engine from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import DetachedInstanceError
from src.utilities.status_utils import RenderStatus
from src.engines.engine_manager import EngineManager
from src.engines.core.base_worker import Base from src.engines.core.base_worker import Base
from src.utilities.status_utils import RenderStatus
logger = logging.getLogger() logger = logging.getLogger()
@@ -17,6 +19,9 @@ class JobNotFoundError(Exception):
super().__init__(args) super().__init__(args)
self.job_id = job_id self.job_id = job_id
def __str__(self):
return f"Cannot find job with ID: {self.job_id}"
class RenderQueue: class RenderQueue:
engine = None engine = None
@@ -24,9 +29,36 @@ class RenderQueue:
job_queue = [] job_queue = []
maximum_renderer_instances = {'blender': 1, 'aerender': 1, 'ffmpeg': 4} maximum_renderer_instances = {'blender': 1, 'aerender': 1, 'ffmpeg': 4}
last_saved_counts = {} last_saved_counts = {}
is_running = False
__eval_thread = None
evaluation_inverval = 1
def __init__(self): # --------------------------------------------
pass # Start / Stop Background Updates
# --------------------------------------------
@classmethod
def start(cls):
cls.is_running = True
cls.__eval_thread = threading.Thread(target=cls.__eval_loop, daemon=True)
cls.__eval_thread.start()
@classmethod
def __eval_loop(cls):
while cls.is_running:
try:
RenderQueue.evaluate_queue()
except Exception as e:
logger.exception(f"Uncaught error while evaluating queue: {e}")
time.sleep(cls.evaluation_inverval)
@classmethod
def stop(cls):
cls.is_running = False
# --------------------------------------------
# Queue Management
# --------------------------------------------
@classmethod @classmethod
def add_to_render_queue(cls, render_job, force_start=False): def add_to_render_queue(cls, render_job, force_start=False):
@@ -89,6 +121,7 @@ class RenderQueue:
@classmethod @classmethod
def prepare_for_shutdown(cls): def prepare_for_shutdown(cls):
logger.debug("Closing session") logger.debug("Closing session")
cls.stop()
running_jobs = cls.jobs_with_status(RenderStatus.RUNNING) # cancel all running jobs running_jobs = cls.jobs_with_status(RenderStatus.RUNNING) # cancel all running jobs
[cls.cancel_job(job) for job in running_jobs] [cls.cancel_job(job) for job in running_jobs]
cls.save_state() cls.save_state()
@@ -105,6 +138,7 @@ class RenderQueue:
@classmethod @classmethod
def evaluate_queue(cls): def evaluate_queue(cls):
try:
not_started = cls.jobs_with_status(RenderStatus.NOT_STARTED, priority_sorted=True) not_started = cls.jobs_with_status(RenderStatus.NOT_STARTED, priority_sorted=True)
for job in not_started: for job in not_started:
if cls.is_available_for_job(job.renderer, job.priority): if cls.is_available_for_job(job.renderer, job.priority):
@@ -118,6 +152,8 @@ class RenderQueue:
if cls.last_saved_counts != cls.job_counts(): if cls.last_saved_counts != cls.job_counts():
cls.save_state() cls.save_state()
except DetachedInstanceError:
pass
@classmethod @classmethod
def start_job(cls, job): def start_job(cls, job):

View File

@@ -1,4 +1,3 @@
import sys
import logging import logging
from PyQt6.QtGui import QFont from PyQt6.QtGui import QFont
@@ -16,7 +15,10 @@ class QSignalHandler(logging.Handler, QObject):
def emit(self, record): def emit(self, record):
msg = self.format(record) msg = self.format(record)
try:
self.new_record.emit(msg) # Emit signal self.new_record.emit(msg) # Emit signal
except RuntimeError:
pass
class ConsoleWindow(QMainWindow): class ConsoleWindow(QMainWindow):

View File

@@ -183,8 +183,13 @@ class MainWindow(QMainWindow):
def __background_update(self): def __background_update(self):
while True: while True:
try:
self.update_servers() self.update_servers()
self.fetch_jobs() self.fetch_jobs()
except RuntimeError:
pass
except Exception as e:
logger.error(f"Uncaught exception in background update: {e}")
time.sleep(0.5) time.sleep(0.5)
def closeEvent(self, event): def closeEvent(self, event):
@@ -372,13 +377,17 @@ class MainWindow(QMainWindow):
def load_image_path(self, image_path): def load_image_path(self, image_path):
# Load and set the image using QPixmap # Load and set the image using QPixmap
try:
pixmap = QPixmap(image_path) pixmap = QPixmap(image_path)
if not pixmap: if not pixmap:
logger.error("Error loading image") logger.error("Error loading image")
return return
self.image_label.setPixmap(pixmap) self.image_label.setPixmap(pixmap)
except Exception as e:
logger.error(f"Error loading image path: {e}")
def load_image_data(self, pillow_image): def load_image_data(self, pillow_image):
try:
# Convert the Pillow Image to a QByteArray (byte buffer) # Convert the Pillow Image to a QByteArray (byte buffer)
byte_array = QByteArray() byte_array = QByteArray()
buffer = QBuffer(byte_array) buffer = QBuffer(byte_array)
@@ -396,6 +405,8 @@ class MainWindow(QMainWindow):
logger.error("Error loading image") logger.error("Error loading image")
return return
self.image_label.setPixmap(pixmap) self.image_label.setPixmap(pixmap)
except Exception as e:
logger.error(f"Error loading image data: {e}")
def update_servers(self): def update_servers(self):
found_servers = list(set(ZeroconfServer.found_hostnames() + self.added_hostnames)) found_servers = list(set(ZeroconfServer.found_hostnames() + self.added_hostnames))

View File

@@ -32,20 +32,23 @@ class StatusBar(QStatusBar):
# Check for status change every 1s on background thread # Check for status change every 1s on background thread
while True: while True:
try:
# update status label - get download status
new_status = proxy.status() new_status = proxy.status()
new_image_name = image_names.get(new_status, 'Synchronize.png')
image_path = os.path.join(resources_dir(), new_image_name)
self.label.setPixmap((QPixmap(image_path).scaled(16, 16, Qt.AspectRatioMode.KeepAspectRatio)))
# add download status
if EngineManager.download_tasks: if EngineManager.download_tasks:
if len(EngineManager.download_tasks) == 1: if len(EngineManager.download_tasks) == 1:
task = EngineManager.download_tasks[0] task = EngineManager.download_tasks[0]
new_status = f"{new_status} | Downloading {task.engine.capitalize()} {task.version}..." new_status = f"{new_status} | Downloading {task.engine.capitalize()} {task.version}..."
else: else:
new_status = f"{new_status} | Downloading {len(EngineManager.download_tasks)} engines" new_status = f"{new_status} | Downloading {len(EngineManager.download_tasks)} engines"
self.messageLabel.setText(new_status) self.messageLabel.setText(new_status)
# update status image
new_image_name = image_names.get(new_status, 'Synchronize.png')
new_image_path = os.path.join(resources_dir(), new_image_name)
self.label.setPixmap((QPixmap(new_image_path).scaled(16, 16, Qt.AspectRatioMode.KeepAspectRatio)))
except RuntimeError: # ignore runtime errors during shutdown
pass
time.sleep(1) time.sleep(1)
background_thread = threading.Thread(target=background_update,) background_thread = threading.Thread(target=background_update,)

View File

@@ -2,7 +2,8 @@ import logging
import socket import socket
from pubsub import pub from pubsub import pub
from zeroconf import Zeroconf, ServiceInfo, ServiceBrowser, ServiceStateChange, NonUniqueNameException from zeroconf import Zeroconf, ServiceInfo, ServiceBrowser, ServiceStateChange, NonUniqueNameException, \
NotRunningException
logger = logging.getLogger() logger = logging.getLogger()
@@ -31,12 +32,14 @@ class ZeroconfServer:
def start(cls, listen_only=False): def start(cls, listen_only=False):
if not cls.service_type: if not cls.service_type:
raise RuntimeError("The 'configure' method must be run before starting the zeroconf server") raise RuntimeError("The 'configure' method must be run before starting the zeroconf server")
logger.debug("Starting zeroconf service")
if not listen_only: if not listen_only:
cls._register_service() cls._register_service()
cls._browse_services() cls._browse_services()
@classmethod @classmethod
def stop(cls): def stop(cls):
logger.debug("Stopping zeroconf service")
cls._unregister_service() cls._unregister_service()
cls.zeroconf.close() cls.zeroconf.close()
@@ -73,6 +76,7 @@ class ZeroconfServer:
@classmethod @classmethod
def _on_service_discovered(cls, zeroconf, service_type, name, state_change): def _on_service_discovered(cls, zeroconf, service_type, name, state_change):
try:
info = zeroconf.get_service_info(service_type, name) info = zeroconf.get_service_info(service_type, name)
hostname = name.split(f'.{cls.service_type}')[0] hostname = name.split(f'.{cls.service_type}')[0]
logger.debug(f"Zeroconf: {hostname} {state_change}") logger.debug(f"Zeroconf: {hostname} {state_change}")
@@ -82,6 +86,8 @@ class ZeroconfServer:
else: else:
cls.client_cache.pop(hostname) cls.client_cache.pop(hostname)
pub.sendMessage('zeroconf_state_change', hostname=hostname, state_change=state_change) pub.sendMessage('zeroconf_state_change', hostname=hostname, state_change=state_change)
except NotRunningException:
pass
@classmethod @classmethod
def found_hostnames(cls): def found_hostnames(cls):
@@ -104,9 +110,15 @@ class ZeroconfServer:
# Example usage: # Example usage:
if __name__ == "__main__": if __name__ == "__main__":
import time
logging.basicConfig(level=logging.DEBUG)
ZeroconfServer.configure("_zordon._tcp.local.", "foobar.local", 8080) ZeroconfServer.configure("_zordon._tcp.local.", "foobar.local", 8080)
try: try:
ZeroconfServer.start() ZeroconfServer.start()
input("Server running - Press enter to end") while True:
time.sleep(0.1)
except KeyboardInterrupt:
pass
finally: finally:
ZeroconfServer.stop() ZeroconfServer.stop()