This closes issue20

This commit is contained in:
Thomas Williams 2024-07-05 13:42:28 +01:00
parent 29546b6280
commit 26d2d9b168
Signed by: thomas
GPG key ID: EB8F975CF60BCBFF
2 changed files with 64 additions and 51 deletions

View file

@ -17,4 +17,9 @@ Both CPU/RAM monitoring and URL monitoring can be set on their own monitoring pe
- **sqlUsername** - the username used to authenticate to the SQL server. - **sqlUsername** - the username used to authenticate to the SQL server.
- **sqlPassword** - the password used to authenticate to the SQL server. - **sqlPassword** - the password used to authenticate to the SQL server.
- **logRetentionDays** - the maximum age logs should be kept. - **logRetentionDays** - the maximum age logs should be kept.
- **maximumSQLAttempts** - the maximum number of attempts to try certain SQL operations - **maximumSQLAttempts** - the maximum number of attempts to try certain SQL operations.
- **hostMonitorStartTime** - the start time which the host monitor should start at the earliest
- **hostMonitorEndTime** - the end time which the host monitor should shut down.
- **urlMonitorStartTime** - the start time which the url monitor should start at the earliest
- **urlMonitorEndTime** - the end time which the url monitor whould shut down.

96
main.py
View file

@ -88,80 +88,88 @@ def getNonPOSIXCPUAverage():
return avgLoad return avgLoad
def monitorHost(stop_event):
while time.strftime("%H:%M:%S") <= config.hostMonitorStartTime: def monitorHost(stop_event):
time.sleep(1) # This block is important to ensure the thread sleeps until the start time is reached. Else the thread wont start if the script is started before the start time
nonPOSIXCPUStarted = False nonPOSIXCPUStarted = False
while not (stop_event.is_set()) and (time.strftime("%H:%M:%S") >= config.hostMonitorStartTime and time.strftime("%H:%M:%S") <= config.hostMonitorEndTime): while not (stop_event.is_set()):
if os.name != 'posix' or config.forceNonPOSIXCPU: while not (stop_event.is_set()) and (time.strftime("%H:%M:%S") >= config.hostMonitorStartTime and time.strftime("%H:%M:%S") <= config.hostMonitorEndTime):
if not nonPOSIXCPUStarted: if os.name != 'posix' or config.forceNonPOSIXCPU:
nonPOSIXCPUMonitor = threading.Thread(target=nonPOSIXCPULoad, args=(stop_event,)) if not nonPOSIXCPUStarted:
nonPOSIXCPUMonitor.start()
nonPOSIXCPUStarted = True
loadavg = round(getNonPOSIXCPUAverage(), 2) nonPOSIXCPUMonitor = threading.Thread(target=nonPOSIXCPULoad, args=(stop_event,))
nonPOSIXCPUMonitor.start()
nonPOSIXCPUStarted = True
else: loadavg = round(getNonPOSIXCPUAverage(), 2)
load1, load5, load15 = psutil.getloadavg() # this takes time to warm up if not running script on *nix else:
loadavg = round((load1/os.cpu_count()) * 100, 2)
memory = psutil.virtual_memory().percent load1, load5, load15 = psutil.getloadavg() # this takes time to warm up if not running script on *nix
logHostLog(socket.gethostname(), datetime.now(), loadavg, memory) loadavg = round((load1/os.cpu_count()) * 100, 2)
print("CPU %: " + str(loadavg)) memory = psutil.virtual_memory().percent
print("Memory %: " + str(memory))
print() # new line
time.sleep(config.hostMonitoringPeriod) logHostLog(socket.gethostname(), datetime.now(), loadavg, memory)
print("CPU %: " + str(loadavg))
print("Memory %: " + str(memory))
print() # new line
time.sleep(config.hostMonitoringPeriod)
time.sleep(1)
def monitorUrls(stop_event): def monitorUrls(stop_event):
while time.strftime("%H:%M:%S") <= config.urlMonitorStartTime: while not (stop_event.is_set()):
time.sleep(1) # This block is important to ensure the thread sleeps until the start time is reached. Else the thread wont start if the script is started before the start time
while not (stop_event.is_set()) and (time.strftime("%H:%M:%S") >= config.urlMonitorStartTime and time.strftime("%H:%M:%S") <= config.urlMonitorEndTime): while not (stop_event.is_set()) and (time.strftime("%H:%M:%S") >= config.urlMonitorStartTime and time.strftime("%H:%M:%S") <= config.urlMonitorEndTime):
for url in config.urls: for url in config.urls:
baseUrl = url baseUrl = url
urlFail = False urlFail = False
startTime = time.time() startTime = time.time()
request = loadUrl(url)
if request.status_code == 200: request = loadUrl(url)
html = BeautifulSoup(request.content, 'html.parser') if request.status_code == 200:
imageUrls = [img['src'] for img in html.find_all('img')]
with ThreadPoolExecutor(max_workers=config.maxWorkers) as executor: html = BeautifulSoup(request.content, 'html.parser')
imageUrls = [img['src'] for img in html.find_all('img')]
responses = [executor.submit(loadUrl, prepareUrl(url, baseUrl)) for url in imageUrls] with ThreadPoolExecutor(max_workers=config.maxWorkers) as executor:
responses = [future.result() for future in as_completed(responses)]
for response in responses: responses = [executor.submit(loadUrl, prepareUrl(url, baseUrl)) for url in imageUrls]
responses = [future.result() for future in as_completed(responses)]
if not response.status_code == 200: for response in responses:
urlFail = True
endTime = time.time() if not response.status_code == 200:
timeDiff = endTime - startTime
print(baseUrl + " response time: " + str(timeDiff)) urlFail = True
print() # new line
logURLLog(socket.gethostname(), datetime.now(), baseUrl, timeDiff)
else: endTime = time.time()
urlFail = True timeDiff = endTime - startTime
time.sleep(config.urlMonitoringPeriod) print(baseUrl + " response time: " + str(timeDiff))
print() # new line
logURLLog(socket.gethostname(), datetime.now(), baseUrl, timeDiff)
else:
urlFail = True
time.sleep(config.urlMonitoringPeriod)
time.sleep(1)
def logHostLog(hostname, logTime, cpu, memory): def logHostLog(hostname, logTime, cpu, memory):