forked from x4nth055/pythoncode-tutorials
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfast_subdomain_scanner.py
67 lines (54 loc) · 2.21 KB
/
fast_subdomain_scanner.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import requests
from threading import Thread, Lock
from queue import Queue
q = Queue()
list_lock = Lock()
discovered_domains = []
def scan_subdomains(domain):
global q
while True:
# get the subdomain from the queue
subdomain = q.get()
# scan the subdomain
url = f"http://{subdomain}.{domain}"
try:
requests.get(url)
except requests.ConnectionError:
pass
else:
print("[+] Discovered subdomain:", url)
# add the subdomain to the global list
with list_lock:
discovered_domains.append(url)
# we're done with scanning that subdomain
q.task_done()
def main(domain, n_threads, subdomains):
global q
# fill the queue with all the subdomains
for subdomain in subdomains:
q.put(subdomain)
for t in range(n_threads):
# start all threads
worker = Thread(target=scan_subdomains, args=(domain,))
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Faster Subdomain Scanner using Threads")
parser.add_argument("domain", help="Domain to scan for subdomains without protocol (e.g without 'http://' or 'https://')")
parser.add_argument("-l", "--wordlist", help="File that contains all subdomains to scan, line by line. Default is subdomains.txt",
default="subdomains.txt")
parser.add_argument("-t", "--num-threads", help="Number of threads to use to scan the domain. Default is 10", default=10, type=int)
parser.add_argument("-o", "--output-file", help="Specify the output text file to write discovered subdomains", default="discovered-subdomains.txt")
args = parser.parse_args()
domain = args.domain
wordlist = args.wordlist
num_threads = args.num_threads
output_file = args.output_file
main(domain=domain, n_threads=num_threads, subdomains=open(wordlist).read().splitlines())
q.join()
# save the file
with open(output_file, "w") as f:
for url in discovered_domains:
print(url, file=f)