This closes issue20

This commit is contained in:
Thomas Williams 2024-07-05 13:42:28 +01:00
parent 29546b6280
commit 26d2d9b168
Signed by: thomas
GPG key ID: EB8F975CF60BCBFF
2 changed files with 64 additions and 51 deletions

View file

@ -17,4 +17,9 @@ Both CPU/RAM monitoring and URL monitoring can be set on their own monitoring pe
- **sqlUsername** - the username used to authenticate to the SQL server.
- **sqlPassword** - the password used to authenticate to the SQL server.
- **logRetentionDays** - the maximum age logs should be kept.
- **maximumSQLAttempts** - the maximum number of attempts to try certain SQL operations
- **maximumSQLAttempts** - the maximum number of attempts to try certain SQL operations.
- **hostMonitorStartTime** - the start time which the host monitor should start at the earliest
- **hostMonitorEndTime** - the end time which the host monitor should shut down.
- **urlMonitorStartTime** - the start time which the url monitor should start at the earliest
- **urlMonitorEndTime** - the end time which the url monitor whould shut down.

96
main.py
View file

@ -88,80 +88,88 @@ def getNonPOSIXCPUAverage():
return avgLoad
def monitorHost(stop_event):
while time.strftime("%H:%M:%S") <= config.hostMonitorStartTime:
time.sleep(1) # This block is important to ensure the thread sleeps until the start time is reached. Else the thread wont start if the script is started before the start time
def monitorHost(stop_event):
nonPOSIXCPUStarted = False
while not (stop_event.is_set()) and (time.strftime("%H:%M:%S") >= config.hostMonitorStartTime and time.strftime("%H:%M:%S") <= config.hostMonitorEndTime):
while not (stop_event.is_set()):
if os.name != 'posix' or config.forceNonPOSIXCPU:
while not (stop_event.is_set()) and (time.strftime("%H:%M:%S") >= config.hostMonitorStartTime and time.strftime("%H:%M:%S") <= config.hostMonitorEndTime):
if not nonPOSIXCPUStarted:
if os.name != 'posix' or config.forceNonPOSIXCPU:
nonPOSIXCPUMonitor = threading.Thread(target=nonPOSIXCPULoad, args=(stop_event,))
nonPOSIXCPUMonitor.start()
nonPOSIXCPUStarted = True
if not nonPOSIXCPUStarted:
loadavg = round(getNonPOSIXCPUAverage(), 2)
nonPOSIXCPUMonitor = threading.Thread(target=nonPOSIXCPULoad, args=(stop_event,))
nonPOSIXCPUMonitor.start()
nonPOSIXCPUStarted = True
else:
loadavg = round(getNonPOSIXCPUAverage(), 2)
load1, load5, load15 = psutil.getloadavg() # this takes time to warm up if not running script on *nix
loadavg = round((load1/os.cpu_count()) * 100, 2)
else:
memory = psutil.virtual_memory().percent
logHostLog(socket.gethostname(), datetime.now(), loadavg, memory)
load1, load5, load15 = psutil.getloadavg() # this takes time to warm up if not running script on *nix
loadavg = round((load1/os.cpu_count()) * 100, 2)
print("CPU %: " + str(loadavg))
print("Memory %: " + str(memory))
print() # new line
memory = psutil.virtual_memory().percent
time.sleep(config.hostMonitoringPeriod)
logHostLog(socket.gethostname(), datetime.now(), loadavg, memory)
print("CPU %: " + str(loadavg))
print("Memory %: " + str(memory))
print() # new line
time.sleep(config.hostMonitoringPeriod)
time.sleep(1)
def monitorUrls(stop_event):
while time.strftime("%H:%M:%S") <= config.urlMonitorStartTime:
time.sleep(1) # This block is important to ensure the thread sleeps until the start time is reached. Else the thread wont start if the script is started before the start time
while not (stop_event.is_set()):
while not (stop_event.is_set()) and (time.strftime("%H:%M:%S") >= config.urlMonitorStartTime and time.strftime("%H:%M:%S") <= config.urlMonitorEndTime):
while not (stop_event.is_set()) and (time.strftime("%H:%M:%S") >= config.urlMonitorStartTime and time.strftime("%H:%M:%S") <= config.urlMonitorEndTime):
for url in config.urls:
for url in config.urls:
baseUrl = url
urlFail = False
baseUrl = url
urlFail = False
startTime = time.time()
request = loadUrl(url)
startTime = time.time()
if request.status_code == 200:
request = loadUrl(url)
html = BeautifulSoup(request.content, 'html.parser')
imageUrls = [img['src'] for img in html.find_all('img')]
if request.status_code == 200:
with ThreadPoolExecutor(max_workers=config.maxWorkers) as executor:
html = BeautifulSoup(request.content, 'html.parser')
imageUrls = [img['src'] for img in html.find_all('img')]
responses = [executor.submit(loadUrl, prepareUrl(url, baseUrl)) for url in imageUrls]
responses = [future.result() for future in as_completed(responses)]
with ThreadPoolExecutor(max_workers=config.maxWorkers) as executor:
for response in responses:
responses = [executor.submit(loadUrl, prepareUrl(url, baseUrl)) for url in imageUrls]
responses = [future.result() for future in as_completed(responses)]
if not response.status_code == 200:
urlFail = True
for response in responses:
endTime = time.time()
timeDiff = endTime - startTime
if not response.status_code == 200:
print(baseUrl + " response time: " + str(timeDiff))
print() # new line
logURLLog(socket.gethostname(), datetime.now(), baseUrl, timeDiff)
urlFail = True
else:
urlFail = True
endTime = time.time()
timeDiff = endTime - startTime
time.sleep(config.urlMonitoringPeriod)
print(baseUrl + " response time: " + str(timeDiff))
print() # new line
logURLLog(socket.gethostname(), datetime.now(), baseUrl, timeDiff)
else:
urlFail = True
time.sleep(config.urlMonitoringPeriod)
time.sleep(1)
def logHostLog(hostname, logTime, cpu, memory):