File tree 8 files changed +289
-0
lines changed
8 files changed +289
-0
lines changed Original file line number Diff line number Diff line change
1
+ import socket
2
+
3
+ client = socket .socket (socket .AF_INET , socket .SOCK_DGRAM )
4
+ try :
5
+ while True :
6
+ msg = input ("Mensagem: " ) + "\n "
7
+ client .sendto (msg .encode (), ("127.0.0.1" , 4433 ))
8
+ data , sender = client .recvfrom (1024 )
9
+ print (sender [0 ] + ": " + data .decode ())
10
+ if data .decode () == "sair\n " or msg == "sair\n " :
11
+ break
12
+
13
+ client .close ()
14
+ except Exception as error :
15
+ print ("Erro de conexao" )
16
+ print (error )
17
+ client .close ()
Original file line number Diff line number Diff line change
1
+ import sys
2
+
3
+ import dns .resolver
4
+
5
+ resolver = dns .resolver .Resolver ()
6
+
7
+ try :
8
+ alvo = sys .argv [1 ]
9
+ wordlist = sys .argv [2 ]
10
+ except :
11
+ print ("Usage: python3 dnsbrute.py dominio wordlist.txt" )
12
+ sys .exit ()
13
+
14
+ try :
15
+ with open (wordlist , "r" ) as arq :
16
+ subdominios = arq .read ().splitlines ()
17
+ except :
18
+ print ("Erro ao abrir arquivo" )
19
+ sys .exit ()
20
+
21
+ for subdominio in subdominios :
22
+ try :
23
+ sub_alvo = "{}.{}" .format (subdominio , alvo )
24
+ resultados = resolver .resolve (sub_alvo , "A" )
25
+ for resultado in resultados :
26
+ print ("{} -> {}" .format (sub_alvo , resultado ))
27
+ except :
28
+ pass
Original file line number Diff line number Diff line change
1
+ import sys
2
+
3
+ import requests
4
+
5
+ def brute (url , wordlist ):
6
+ for word in wordlist :
7
+ try :
8
+ url_final = "{}/{}" .format (url , word .strip ())
9
+ response = requests .get (url_final )
10
+ code = response .status_code
11
+ if code != 404 :
12
+ print ("{} -- {}" .format (url_final , code ))
13
+ except KeyboardInterrupt :
14
+ sys .exit (0 )
15
+ except Exception as error :
16
+ print (error )
17
+ pass
18
+
19
+
20
+ if __name__ == "__main__" :
21
+ url = sys .argv [1 ]
22
+ wordlist = sys .argv [2 ]
23
+
24
+ with open (wordlist , "r" ) as file :
25
+ wordlist = file .readlines ()
26
+ brute (url , wordlist )
Original file line number Diff line number Diff line change
1
+ import sys
2
+ import re
3
+
4
+ import requests
5
+ from bs4 import BeautifulSoup
6
+
7
+ TO_CRAWL = []
8
+ CRAWLED = set ()
9
+
10
+
11
+ def request (url ):
12
+ header = {"User-Agent" : "Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0" }
13
+ try :
14
+ response = requests .get (url , headers = header )
15
+ return response .text
16
+ except KeyboardInterrupt :
17
+ sys .exit (0 )
18
+ except :
19
+ pass
20
+
21
+
22
+ #def get_emails(html):
23
+ # emails = []
24
+ # try:
25
+
26
+
27
+
28
+
29
+
30
+ def get_links (html ):
31
+ links = []
32
+ try :
33
+ soup = BeautifulSoup (html , "html.parser" )
34
+ tags_a = soup .find_all ("a" , href = True )
35
+ for tag in tags_a :
36
+ link = tag ["href" ]
37
+ if link .startswith ("http" ):
38
+ links .append (link )
39
+
40
+ return links
41
+ except :
42
+ pass
43
+
44
+
45
+ def crawl ():
46
+ while 1 :
47
+ if TO_CRAWL :
48
+ url = TO_CRAWL .pop ()
49
+
50
+ html = request (url )
51
+ if html :
52
+ links = get_links (html )
53
+ if links :
54
+ for link in links :
55
+ if link not in CRAWLED and link not in TO_CRAWL :
56
+ TO_CRAWL .append (link )
57
+
58
+ print ("Crawling {}" .format (url ))
59
+
60
+ CRAWLED .add (url )
61
+ else :
62
+ CRAWLED .add (url )
63
+ else :
64
+ print ("Done" )
65
+ break
66
+
67
+
68
+ if __name__ == "__main__" :
69
+ url = sys .argv [1 ]
70
+ TO_CRAWL .append (url )
71
+ crawl ()
Original file line number Diff line number Diff line change
1
+ import copy
2
+ import sys
3
+ from urllib import parse
4
+
5
+ import requests
6
+
7
+
8
+ def request (url ):
9
+ headers = {"User-Agent" :
10
+ "Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0" }
11
+ try :
12
+ response = requests .get (url , headers = headers )
13
+ html = response .text
14
+ return html
15
+ except :
16
+ pass
17
+
18
+
19
+ def is_vulnerable (html ):
20
+ errors = ["mysql_fetch_array()" ,
21
+ "You have an error in your SQL syntax" ]
22
+ for error in errors :
23
+ if error in html :
24
+ return True
25
+
26
+
27
+ if __name__ == "__main__" :
28
+ url = sys .argv [1 ]
29
+ url_parsed = parse .urlsplit (url )
30
+ params = parse .parse_qs (url_parsed .query )
31
+ for param in params .keys ():
32
+ query = copy .deepcopy (params )
33
+ for c in "'\" " :
34
+ query [param ][0 ] = c
35
+ new_params = parse .urlencode (query , doseq = True )
36
+ url_final = url_parsed ._replace (query = new_params )
37
+ url_final = url_final .geturl ()
38
+ html = request (url_final )
39
+ if html :
40
+ if is_vulnerable (html ):
41
+ print ("[ + ] {} parameter is vulnerable" .format (param ))
42
+ quit ()
43
+
44
+ print ("NOT VULNERABLE" )
Original file line number Diff line number Diff line change
1
+ import paramiko
2
+
3
+ host = "127.0.0.1"
4
+ user = "kali"
5
+ passwd = "kali"
6
+
7
+ client = paramiko .SSHClient ()
8
+ client .set_missing_host_key_policy (paramiko .AutoAddPolicy ())
9
+ client .connect (host , username = user , password = passwd )
10
+
11
+ while True :
12
+ stdin , stdout , stderr = client .exec_command (input ("Comando: " ))
13
+ for line in stdout .readlines ():
14
+ print (line .strip ())
15
+
16
+ erros = stderr .readlines ()
17
+ if erros :
18
+ print (erros )
Original file line number Diff line number Diff line change
1
+ import socket
2
+
3
+ server = socket .socket (socket .AF_INET , socket .SOCK_STREAM )
4
+
5
+ file = open ("output.txt" , "w" )
6
+
7
+ try :
8
+ server .bind (("0.0.0.0" , 4466 ))
9
+ server .listen (5 )
10
+ print ("Listening..." )
11
+
12
+ client_socket , address = server .accept ()
13
+ print ("Received from: " + address [0 ])
14
+
15
+ data = client_socket .recv (1024 ).decode ()
16
+
17
+ file .write (data )
18
+
19
+ server .close ()
20
+ except Exception as error :
21
+ print ("Erro: " , error )
22
+ server .close ()
23
+
Original file line number Diff line number Diff line change
1
+ import sys
2
+
3
+ import requests
4
+ from bs4 import BeautifulSoup
5
+
6
+ TO_CRAWL = []
7
+ CRAWLED = set ()
8
+
9
+
10
+ def request (url ):
11
+ header = {"User-Agent" : "Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0" }
12
+ try :
13
+ response = requests .get (url , headers = header )
14
+ return response .text
15
+ except KeyboardInterrupt :
16
+ sys .exit (0 )
17
+ except :
18
+ pass
19
+
20
+
21
+ def get_links (html ):
22
+ links = []
23
+ try :
24
+ soup = BeautifulSoup (html , "html.parser" )
25
+ tags_a = soup .find_all ("a" , href = True )
26
+ for tag in tags_a :
27
+ link = tag ["href" ]
28
+ if link .startswith ("http" ):
29
+ links .append (link )
30
+
31
+ return links
32
+ except :
33
+ pass
34
+
35
+
36
+ def crawl ():
37
+ while 1 :
38
+ if TO_CRAWL :
39
+ url = TO_CRAWL .pop ()
40
+
41
+ html = request (url )
42
+ if html :
43
+ links = get_links (html )
44
+ if links :
45
+ for link in links :
46
+ if link not in CRAWLED and link not in TO_CRAWL :
47
+ TO_CRAWL .append (link )
48
+
49
+ print ("Crawling {}" .format (url ))
50
+
51
+ CRAWLED .add (url )
52
+ else :
53
+ CRAWLED .add (url )
54
+ else :
55
+ print ("Done" )
56
+ break
57
+
58
+
59
+ if __name__ == "__main__" :
60
+ url = sys .argv [1 ]
61
+ TO_CRAWL .append (url )
62
+ crawl ()
You can’t perform that action at this time.
0 commit comments