I tried "Log Parser/Extractor Search for services in logs" and it didn't work, so I made mine)
Just make sure the logs are not zipped
Have fun!
Just make sure the logs are not zipped
Have fun!
Python:
import os
import time
import re
domains = ["netflix.com", "paypal.com", "onlyfans.com", "xss.pro", "xss.pro", "agma.io", "portal.azure.com", "cracked[.]io", "minecraft.net", "spotify.com", "twitter.com", "nordaccount.com", "expressvpn.com"]
source_directory = r"C:\Users\Gadr\Desktop\Script\Logs"
destination_directory = r"C:\Users\Gadr\Desktop\Script\des"
count = {}
files_to_search = ["Passwords.txt", "_AllPasswords_list.txt", "passwords.txt"]
for root, dirs, files in os.walk(source_directory):
for file_name in files_to_search:
if file_name in files:
source_file = os.path.join(root, file_name)
# Get the file's last modified date
file_time = os.path.getmtime(source_file)
# Convert the timestamp to a readable format
readable_time = time.ctime(file_time)
# Remove invalid characters from the readable_time variable
cleaned_time = re.sub(r"[/:]", "", readable_time)
with open(source_file, encoding = 'utf-8') as f:
lines = f.readlines()
i = 0
while i < len(lines):
for domain in domains:
if domain in lines[i]:
if domain not in count:
count[domain] = 0
if not os.path.exists(os.path.join(destination_directory, domain)):
os.makedirs(os.path.join(destination_directory, domain))
parent_dir = os.path.basename(root)
destination_file = os.path.join(destination_directory, domain, f"{cleaned_time}.txt")
with open(destination_file, "a", encoding='utf-8', errors='ignore') as out_file:
out_file.write(lines[i])
out_file.write(lines[i+1])
out_file.write(lines[i+2])
out_file.write(lines[i+3])
out_file.write('\n')
count[domain] += 1
i += 1
for domain in domains:
try:
print(f"{count[domain]} of '{domain}' accounts found")
except KeyError:
print(f"No '{domain}' have been found.")