Added more logging statements

This commit is contained in:
Frisk 2024-02-25 14:23:22 +01:00
parent ac720af6e5
commit 9366588ae8
3 changed files with 13 additions and 7 deletions

View file

@ -93,8 +93,9 @@ class Domain:
await wiki.update_targets()
if first:
self.wikis.move_to_end(wiki.script_url, last=False)
logger.debug(f"Added new wiki {wiki.script_url} to domain {self.name}")
async def run_wiki_scan(self, wiki: src.wiki.Wiki, reason: Optional[int] = None):
async def run_wiki_scan(self, wiki: src.wiki.Wiki, reason: Optional[str] = None):
await wiki.scan()
wiki.statistics.update(Log(type=LogType.SCAN_REASON, title=str(reason)))
self.wikis.move_to_end(wiki.script_url)
@ -111,11 +112,11 @@ class Domain:
except KeyError:
logger.error(f"Could not find a wiki with URL {wiki_url} in the domain group!")
continue
await self.run_wiki_scan(wiki)
await self.run_wiki_scan(wiki, "IRC feed event")
while True: # Iterate until hitting return, we don't have to iterate using for since we are sending wiki to the end anyways
wiki: src.wiki.Wiki = next(iter(self.wikis.values()))
if (int(time.time()) - (wiki.statistics.last_checked_rc or 0)) > settings.get("irc_overtime", 3600):
await self.run_wiki_scan(wiki)
await self.run_wiki_scan(wiki, "IRC backup check")
else:
return # Recently scanned wikis will get at the end of the self.wikis, so we assume what is first hasn't been checked for a while
except Exception as e:
@ -132,7 +133,7 @@ class Domain:
try:
while True:
await asyncio.sleep(self.calculate_sleep_time(len(self))) # To make sure that we don't spam domains with one wiki every second we calculate a sane timeout for domains with few wikis
await self.run_wiki_scan(next(iter(self.wikis.values())))
await self.run_wiki_scan(next(iter(self.wikis.values())), "regular check")
except Exception as e:
if command_line_args.debug:
logger.exception("Regular scheduler task for domain {} failed!".format(self.name))

View file

@ -43,6 +43,7 @@ class DomainManager:
self.remove_wiki(split_payload[1])
elif split_payload[0] == "UPDATE":
await self.return_domain(self.get_domain(split_payload[1])).get_wiki(split_payload[1]).update_targets()
logger.info("Successfully force updated information about {}".format(split_payload[1]))
elif split_payload[0] == "DEBUG":
if split_payload[1] == "INFO":
logger.info(self.domains)
@ -87,8 +88,10 @@ class DomainManager:
raise NoDomain
else:
domain.remove_wiki(script_url)
logger.debug(f"Removed a wiki {script_url} from {domain.name}")
if len(domain) == 0:
self.remove_domain(domain)
logger.debug(f"Removed domain {domain.name} due to removal of last queued wiki in its dictionary")
@staticmethod
def get_domain(url: str) -> str:

View file

@ -364,7 +364,8 @@ class Wiki:
categorize_events = {}
new_events = 0
self.statistics.last_checked_rc = int(time.time())
highest_id = self.rc_id # Pretty sure that will be faster
highest_id = self.rc_id
old_highest_id = self.rc_id
for change in recent_changes:
if change["rcid"] > highest_id and amount != 450:
new_events += 1
@ -387,6 +388,7 @@ class Wiki:
message.wiki = self
message_list.append(QueueEntry(message, webhooks, self))
messagequeue.add_messages(message_list)
if old_highest_id != highest_id: # update only when differs
self.statistics.update(last_action=highest_id)
dbmanager.add(("UPDATE rcgcdb SET rcid = $1 WHERE wiki = $2 AND ( rcid != -1 OR rcid IS NULL )", (highest_id, self.script_url))) # If this is not enough for the future, save rcid in message sending function to make sure we always send all of the changes
return