-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy path015-Recursion-Tuned-Directory-Bruteforcer.py
78 lines (67 loc) · 2.8 KB
/
015-Recursion-Tuned-Directory-Bruteforcer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import argparse
import requests
import threading
import time
import sys
from bs4 import BeautifulSoup
def main(domain, wordlist, jitter, threads, recursive):
urls = []
with open(wordlist, 'r') as f:
for line in f:
url = domain + line.strip()
urls.append(url)
thread_list = []
for i in range(threads):
t = threading.Thread(target=check_urls, args=(urls, jitter, recursive))
thread_list.append(t)
t.start()
for t in thread_list:
t.join()
def check_urls(urls, jitter, recursive):
# Check each URL in the list
for url in urls:
try:
# Make a request to the URL
response = requests.get(url)
# Print the details of the response
print(f"URL: {response.url}")
print(f"Title: {get_title(response)}")
print(f"Length: {len(response.content)}")
print(f"Status Code: {response.status_code}")
# If recursion is enabled, check all links on the page
if recursive:
soup = BeautifulSoup(response.content, 'html.parser')
links = soup.find_all('a')
for link in links:
new_url = link.get('href')
if new_url.startswith('/'):
new_url = url + new_url
elif not new_url.startswith('http'):
new_url = url + '/' + new_url
check_urls([new_url], jitter, False)
# Sleep for a random amount of time to avoid being detected as a bot
time.sleep(jitter)
except Exception as e:
print(f"Error checking URL {url}: {e}")
def get_title(response):
# Extract the title from the response
soup = BeautifulSoup(response.content, 'html.parser')
title_tag = soup.find('title')
if title_tag is not None:
return title_tag.string.strip()
else:
return ''
if __name__ == '__main__':
# Parse command line arguments
parser = argparse.ArgumentParser(description='Check URLs on a domain.')
parser.add_argument('domain', help='The domain to check URLs on.')
parser.add_argument('wordlist', help='The path to the wordlist to use.')
parser.add_argument('--jitter', type=float, default=1.0,
help='The maximum amount of jitter to use between requests (in seconds).')
parser.add_argument('--threads', type=int, default=10,
help='The number of concurrent threads to use.')
parser.add_argument('--recursive', action='store_true',
help='Enable recursive checking of links on the pages.')
args = parser.parse_args()
# Call the main function with the parsed arguments
main(args.domain, args.wordlist, args.jitter, args.threads, args.recursive)