@@ -126,7 +126,7 @@ class Protocol_Gateway:
126126 _logging_initialized = False
127127
128128 @classmethod
129- def _setup_logging (cls , cfg ) -> None :
129+ def _setup_logging (cls , cfg : ConfigParser ) -> None :
130130 """
131131 created to eliminate multi gig log file sizes in docker as logging.StreamHandler(sys.stdout) results in a
132132 json file that can quickly grow quiet large.
@@ -138,7 +138,7 @@ def _setup_logging(cls, cfg) -> None:
138138 return
139139
140140 # Read logging config
141- level_name : str = cfg .get ("logging" , "level" , fallback = "INFO" ).upper ()
141+ level_name : str = cfg .get ("logging" , "level" , fallback = "INFO" ).strip (). upper ()
142142 level : int = getattr (logging , level_name , logging .INFO )
143143
144144 log_dir = Path (cfg .get ("logging" , "log_dir" , fallback = "logs" ))
@@ -190,7 +190,7 @@ def _setup_logging(cls, cfg) -> None:
190190 handler .setFormatter (formatter )
191191
192192 # ---- Root logger wiring ----
193- root = logging .getLogger ()
193+ root : logging . Logger = logging .getLogger ()
194194 root .setLevel (level )
195195 root .handlers .clear ()
196196 root .addHandler (handler )
@@ -203,7 +203,7 @@ def _setup_logging(cls, cfg) -> None:
203203
204204 cls ._logging_initialized = True
205205
206- __log = None
206+ __log : logging . Logger
207207 # log level, available log levels are CRITICAL, FATAL, ERROR, WARNING, INFO, DEBUG, EXCEPTION
208208 __log_level = "DEBUG"
209209
@@ -213,12 +213,12 @@ def _setup_logging(cls, cfg) -> None:
213213 __transports : list [transport_base ] = []
214214 ''' transport_base is for type hinting. this can be any transport'''
215215
216- config_file : str
216+ config_file : Path
217217
218218 # Simple read completion tracking
219219 __read_completion_tracker : dict [str , bool ] = {}
220220 ''' Track which transports have completed their current read cycle '''
221- __read_tracker_lock : threading .Lock = None
221+ __read_tracker_lock : threading .Lock
222222
223223 # Concurrency control
224224 __enable_concurrency : bool = False
@@ -245,14 +245,15 @@ def __init__(self, config_file : str):
245245 if alternate_cfg .is_file ():
246246 self .config_file = alternate_cfg
247247 else :
248+ self .__log .warning (f"Config file not found { alternate_cfg } , using default: { default_cfg } " )
248249 self .config_file = default_cfg
249250
250251 #pymodbus_log = logging.getLogger('pymodbus')
251252 #pymodbus_log.setLevel(logging.DEBUG)
252253 #pymodbus_log.addHandler(handler)
253254
254255 self .__settings = CustomConfigParser ()
255- self .__settings .read (self .config_file )
256+ self .__settings .read (self .config_file . as_posix () )
256257
257258 self ._setup_logging (self .__settings )
258259 self .__log : logging .Logger = logging .getLogger (__name__ )
@@ -261,15 +262,17 @@ def __init__(self, config_file : str):
261262 self .__log_level = self .__settings .get ("general" ,"log_level" , fallback = "INFO" )
262263
263264 # Read concurrency setting - default to sequential (disabled) for better stability
264- self .__enable_concurrency = self .__settings .getboolean ("general" , "enable_concurrency" , fallback = False )
265+ self .__enable_concurrency = bool ( self .__settings .getboolean ("general" , "enable_concurrency" , fallback = False ) )
265266 self .__log .info (f"Concurrency mode: { 'Concurrent' if self .__enable_concurrency else 'Sequential' } " )
266267
267268 # Read sequential delay setting
268- self .__sequential_delay = self .__settings .getfloat ("general" , "sequential_delay" , fallback = 1.0 )
269+ self .__sequential_delay = float (
270+ self .__settings .getfloat ("general" , "sequential_delay" , fallback = 1.0 ) or 1.0
271+ )
269272 if not self .__enable_concurrency :
270273 self .__log .info (f"Sequential delay between transports: { self .__sequential_delay } seconds" )
271274
272- log_level = getattr (logging , self .__log_level , logging .INFO )
275+ log_level = getattr (logging , str ( self .__log_level ) , logging .INFO )
273276 self .__log .setLevel (log_level )
274277 self .__log .info ("Loading..." )
275278
@@ -362,6 +365,13 @@ def _process_transport_read(self, transport) -> None:
362365 except Exception as err :
363366 self .__log .exception (f"Error processing transport { transport .transport_name } and { err } " )
364367 # traceback.print_exc()
368+ # Errno 104 - Connection reset by peer (common for MQTT disconnects)
369+ # Errno 32 - Broken pipe (common for MQTT disconnects)
370+ # Errno 110 - Connection timed out (common for network issues)
371+ if err == 'Errno 104' or err == 'Errno 32' or err == 'Errno 110' :
372+ # traceback.print_exc()
373+ transport .connect ()
374+ self .__log .warning (f"Attempting reconnect for { transport .transport_name } " )
365375 self ._mark_read_complete (transport )
366376
367377 def _mark_read_complete (self , transport ) -> None :
0 commit comments