This closes issue20

This commit is contained in:
Thomas Williams 2024-07-05 13:42:28 +01:00
parent 29546b6280
commit 26d2d9b168
Signed by: thomas
GPG key ID: EB8F975CF60BCBFF
2 changed files with 64 additions and 51 deletions

View file

@ -17,4 +17,9 @@ Both CPU/RAM monitoring and URL monitoring can be set on their own monitoring pe
- **sqlUsername** - the username used to authenticate to the SQL server. - **sqlUsername** - the username used to authenticate to the SQL server.
- **sqlPassword** - the password used to authenticate to the SQL server. - **sqlPassword** - the password used to authenticate to the SQL server.
- **logRetentionDays** - the maximum age logs should be kept. - **logRetentionDays** - the maximum age logs should be kept.
- **maximumSQLAttempts** - the maximum number of attempts to try certain SQL operations - **maximumSQLAttempts** - the maximum number of attempts to try certain SQL operations.
- **hostMonitorStartTime** - the start time which the host monitor should start at the earliest
- **hostMonitorEndTime** - the end time which the host monitor should shut down.
- **urlMonitorStartTime** - the start time which the url monitor should start at the earliest
- **urlMonitorEndTime** - the end time which the url monitor whould shut down.

108
main.py
View file

@ -88,80 +88,88 @@ def getNonPOSIXCPUAverage():
return avgLoad return avgLoad
def monitorHost(stop_event):
while time.strftime("%H:%M:%S") <= config.hostMonitorStartTime: def monitorHost(stop_event):
time.sleep(1) # This block is important to ensure the thread sleeps until the start time is reached. Else the thread wont start if the script is started before the start time
nonPOSIXCPUStarted = False nonPOSIXCPUStarted = False
while not (stop_event.is_set()) and (time.strftime("%H:%M:%S") >= config.hostMonitorStartTime and time.strftime("%H:%M:%S") <= config.hostMonitorEndTime): while not (stop_event.is_set()):
while not (stop_event.is_set()) and (time.strftime("%H:%M:%S") >= config.hostMonitorStartTime and time.strftime("%H:%M:%S") <= config.hostMonitorEndTime):
if os.name != 'posix' or config.forceNonPOSIXCPU: if os.name != 'posix' or config.forceNonPOSIXCPU:
if not nonPOSIXCPUStarted: if not nonPOSIXCPUStarted:
nonPOSIXCPUMonitor = threading.Thread(target=nonPOSIXCPULoad, args=(stop_event,)) nonPOSIXCPUMonitor = threading.Thread(target=nonPOSIXCPULoad, args=(stop_event,))
nonPOSIXCPUMonitor.start() nonPOSIXCPUMonitor.start()
nonPOSIXCPUStarted = True nonPOSIXCPUStarted = True
loadavg = round(getNonPOSIXCPUAverage(), 2) loadavg = round(getNonPOSIXCPUAverage(), 2)
else: else:
load1, load5, load15 = psutil.getloadavg() # this takes time to warm up if not running script on *nix load1, load5, load15 = psutil.getloadavg() # this takes time to warm up if not running script on *nix
loadavg = round((load1/os.cpu_count()) * 100, 2) loadavg = round((load1/os.cpu_count()) * 100, 2)
memory = psutil.virtual_memory().percent memory = psutil.virtual_memory().percent
logHostLog(socket.gethostname(), datetime.now(), loadavg, memory)
logHostLog(socket.gethostname(), datetime.now(), loadavg, memory)
print("CPU %: " + str(loadavg)) print("CPU %: " + str(loadavg))
print("Memory %: " + str(memory)) print("Memory %: " + str(memory))
print() # new line print() # new line
time.sleep(config.hostMonitoringPeriod) time.sleep(config.hostMonitoringPeriod)
time.sleep(1)
def monitorUrls(stop_event): def monitorUrls(stop_event):
while time.strftime("%H:%M:%S") <= config.urlMonitorStartTime: while not (stop_event.is_set()):
time.sleep(1) # This block is important to ensure the thread sleeps until the start time is reached. Else the thread wont start if the script is started before the start time
while not (stop_event.is_set()) and (time.strftime("%H:%M:%S") >= config.urlMonitorStartTime and time.strftime("%H:%M:%S") <= config.urlMonitorEndTime): while not (stop_event.is_set()) and (time.strftime("%H:%M:%S") >= config.urlMonitorStartTime and time.strftime("%H:%M:%S") <= config.urlMonitorEndTime):
for url in config.urls:
baseUrl = url
urlFail = False
startTime = time.time() for url in config.urls:
request = loadUrl(url)
if request.status_code == 200: baseUrl = url
urlFail = False
html = BeautifulSoup(request.content, 'html.parser') startTime = time.time()
imageUrls = [img['src'] for img in html.find_all('img')]
request = loadUrl(url)
with ThreadPoolExecutor(max_workers=config.maxWorkers) as executor: if request.status_code == 200:
responses = [executor.submit(loadUrl, prepareUrl(url, baseUrl)) for url in imageUrls]
responses = [future.result() for future in as_completed(responses)]
for response in responses: html = BeautifulSoup(request.content, 'html.parser')
imageUrls = [img['src'] for img in html.find_all('img')]
if not response.status_code == 200:
urlFail = True
endTime = time.time() with ThreadPoolExecutor(max_workers=config.maxWorkers) as executor:
timeDiff = endTime - startTime
print(baseUrl + " response time: " + str(timeDiff))
print() # new line
logURLLog(socket.gethostname(), datetime.now(), baseUrl, timeDiff)
else:
urlFail = True
time.sleep(config.urlMonitoringPeriod) responses = [executor.submit(loadUrl, prepareUrl(url, baseUrl)) for url in imageUrls]
responses = [future.result() for future in as_completed(responses)]
for response in responses:
if not response.status_code == 200:
urlFail = True
endTime = time.time()
timeDiff = endTime - startTime
print(baseUrl + " response time: " + str(timeDiff))
print() # new line
logURLLog(socket.gethostname(), datetime.now(), baseUrl, timeDiff)
else:
urlFail = True
time.sleep(config.urlMonitoringPeriod)
time.sleep(1)
def logHostLog(hostname, logTime, cpu, memory): def logHostLog(hostname, logTime, cpu, memory):