opexxx

IPinfo.py

May 2nd, 2014
321
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 12.01 KB | None | 0 0
  1. #!/usr/bin/env python
  2.  
  3. # IPinfo.py was created by Glenn P. Edwards Jr.
  4. #   http://hiddenillusion.blogspot.com
  5. #       @hiddenillusion
  6. # Version 0.1.2
  7. # Date: 12-18-2012
  8.  
  9. """
  10. Usage:
  11. ------
  12.    1) Supply it an IP/site
  13.  
  14. Requirements:
  15. -------------
  16.    - simplejson
  17.    - BeautifulSoup
  18.    - API keys from Virus Total, Google Safe Browsing, Project Honeypot & URLvoid
  19.  
  20. To-do:
  21. --------
  22.    - cleanup netdemon and urlvoid code
  23.     - McAfee SiteAdvisor / TrustedSoruce
  24.     - exsporure.iseclab.org
  25.     - robtex
  26.     - fast flux
  27.    - cleanMX
  28.     - sucuri
  29.     - Phishtank, urlquery & wepawet are already in VirusTotal ...
  30.     - malc0de
  31.     - senderbase
  32.     - spamhause / spamcop
  33.    - if site given, use GeoIP/nslookup to get IP and use for Honeypot?
  34. """
  35. import re
  36. import os
  37. import sys
  38. import base64
  39. import urllib
  40. import urllib2
  41. import socket
  42. import simplejson
  43. from datetime import datetime
  44. from time import localtime, strftime
  45. try:
  46.     from BeautifulSoup import BeautifulSoup, NavigableString
  47. except ImportError:
  48.     try:
  49.         # Things changed a bit in the newer version
  50.         from bs4 import BeautifulSoup, NavigableString 
  51.     except ImportError:    
  52.         print "[!] BeautifulSoup not installed"
  53.         sys.exit(1)
  54.    
  55. if len(sys.argv) == 1:
  56.     print "[!] I need something to analyze"
  57.     sys.exit(1)
  58. else:
  59.     s = sys.argv[1]
  60.  
  61. # sanity check for WOT
  62. if re.match('^http(s)?://.*', s):
  63.     s = re.sub('^http(s)?://','',s)
  64.  
  65. def header(msg):
  66.     return "\n" + msg + "\n" + ("=" * 40)
  67.  
  68. def subTitle(msg):
  69.     return msg + "\n" + ("-" * 40)
  70.  
  71. def GeoIP(s):
  72.     """
  73.    GeoIP info: http://freegeoip.net/static/index.html
  74.    Restrictions: < 1,0000 queries an hour
  75.    """
  76.     print (header("GeoIP Information"))
  77.     url = "http://freegeoip.net/json/"
  78.     try:
  79.         req = urllib2.urlopen(url + s)
  80.         result = req.read()
  81.         rpt = simplejson.loads(result)
  82.         for key,value in rpt.iteritems():
  83.             if value:
  84.                 print "%12s : %-s" % (key,value)
  85.     except Exception, msg:
  86.         print msg
  87.  
  88. def Honeypot(s):
  89.     """
  90.    Project Honeypot's Http:BL API info: https://www.projecthoneypot.org/httpbl_api.php
  91.    """
  92.     print (header("Project Honeypot"))
  93.     api_key = ""
  94.     if not api_key:
  95.         print "[!] You must configure your Project Honeypot API key"
  96.         return
  97.  
  98.     def threat_rating(num):
  99.         """
  100.        third octet
  101.        ranges from 0 (low) - 255 (severe)
  102.        http://www.projecthoneypot.org/threat_info.php
  103.        """
  104.         if num <= 25: tr = "Low"
  105.         elif num <= 50: tr = "Medium"
  106.         elif num <= 75: tr = "High"
  107.         elif num <= 100: tr = "Severe"
  108.         else: tr = "N/A"
  109.         return tr
  110.  
  111.     def visitor_type(num):
  112.         """
  113.        fourth octet
  114.        """
  115.         if num == 0: vt = "Search Engine"
  116.         elif num == 1: vt = "Suspicious"
  117.         elif num == 2: vt = "Harvester"
  118.         elif num == 3: vt = "Suspicious & Harvester"
  119.         elif num == 4: vt = "Comment Spammer"
  120.         elif num == 5: vt = "Suspicious & Comment Spammer"
  121.         elif num == 6: vt = "Harvester & Comment Spammer"
  122.         elif num == 7: vt = "Suspicious & Comment Spammer"
  123.         else: vt = "N/A"
  124.         return vt
  125.  
  126.     # if fourth octet is 0 (search engine), 3rd octet is identifier for search engine
  127.     def search_engines(num):
  128.         if num == 0: se = "Undocumented"
  129.         elif num == 1: se = "AltaVista"
  130.         elif num == 2: se = "Ask"
  131.         elif num == 3: se = "Baidu"
  132.         elif num == 4: se = "Excite"
  133.         elif num == 5: se = "Google"
  134.         elif num == 6: se = "Looksmart"
  135.         elif num == 7: se = "Lycos"
  136.         elif num == 8: se = "MSN"
  137.         elif num == 9: se = "Yahoo"
  138.         elif num == 10: se = "Cuil"
  139.         elif num == 11: se = "InfoSeek"
  140.         elif num == 11: se = "Misc"
  141.         else: se = "N/A"
  142.         return se
  143.  
  144.     fields = s.split('.')
  145.     fields.reverse()
  146.     flipped = '.'.join(fields)
  147.     query = api_key + "." + flipped + "." + "dnsbl.httpbl.org"
  148.     try:
  149.         result = socket.gethostbyname(query)
  150.         vt, tr, days_since_last_activity, response_code = [int(octet) for octet in result.split('.')[::-1]]
  151.         if response_code != 127:
  152.             print "Invalid Response Code"
  153.         else:
  154.             print "Visitor type............:",visitor_type(vt)
  155.             if visitor_type == 0:
  156.                 tr = search_engines(tr)
  157.                 print "\t\t(%s)",threat_rating(tr)
  158.             else:
  159.                 print "Threat rating...........: %s (%d)" % (threat_rating(tr),tr)
  160.             print "Days since last activity:",days_since_last_activity
  161.     except socket.gaierror:
  162.         print "Not Listed"
  163.  
  164. def hpHosts(s):
  165.     """
  166.    hpHosts docs : http://hosts-file.net/?s=Help#developers
  167.    """
  168.     url = "http://verify.hosts-file.net/?v=IPinfo&s="
  169.     arg = "&class=true&date=true&ip=true&ipptr=true&nb=1"
  170.     print (header("hpHosts"))
  171.     try:
  172.         page = urllib2.urlopen(url + s + arg)        
  173.         soup = BeautifulSoup(page)
  174.         # strip HTML page breaks etc.
  175.         txt = soup.findAll(text=lambda txt:isinstance(txt, NavigableString))
  176.         for l in txt:
  177.             val = l.split(',')
  178.             """
  179.            Classification explanations: http://hosts-file.net/?s=classifications
  180.            """
  181.             classes = {'ATS': 'Ad/tracking server',
  182.                        'EMD': 'Malware distribution',
  183.                        'EXP': 'Exploit site',
  184.                        'FSA': 'Rogue software distribution',
  185.                        'GRM': 'Astroturfing site',
  186.                        'HFS': 'Spamming',
  187.                        'HJK': 'Hijacking',
  188.                        'MMT': 'Misleading marketing tactics',
  189.                        'PSH': 'Phishing', 'WRZ': 'Warez'};
  190.             # get rid of comments and other junk
  191.             if not re.match('^(#|%|remarks:)', l):
  192.                 if re.search('Listed', val[0]):
  193.                     print "[-] Listed?.......:",val[0].split('[')[0]
  194.                 if classes.has_key(val[1]):
  195.                     print "[-] Classification:",classes[val[1]]
  196.                 if re.match('\d{2}-\d{2}-\d{4}',val[2]):
  197.                     print "[-] Date..........:",val[2]
  198.                 if re.search('NETBLOCK', l):
  199.                     m = re.search('\[NETBLOCK\](.*)\[\/NETBLOCK\]', str(txt))
  200.                     g = m.group(1).split(',')
  201.                     print "[-] Netblock info :"
  202.                     for i in g:
  203.                         # yes ... I'm OCD
  204.                         s = re.sub("(^\su|'|\")","",i)
  205.                         if not re.match('(^%\s|^%$|^$)',s):
  206.                             print s
  207.     except Exception: pass
  208.  
  209. def SafeBrowsing(s):
  210.     """
  211.    Google SafeBrowsing API info: https://developers.google.com/safe-browsing/
  212.    """
  213.     print (header("Google Safe Browsing"))
  214.     api_key = ""
  215.     if not api_key:
  216.         print "[!] You must configure your Google SafeBrowsing API key"
  217.     else:
  218.         url = "https://sb-ssl.google.com/safebrowsing/api/lookup?"
  219.         parameters = {"client": "api",
  220.                       "apikey": api_key,
  221.                       "appver": "1.0",
  222.                       "pver": "3.0",
  223.                       "url": s}
  224.         data = urllib.urlencode(parameters)
  225.         req = url + data
  226.         try:
  227.             response = urllib2.urlopen(req)
  228.             result = response.read()
  229.             if len(result):
  230.                 print "[-] Classification: ",result
  231.             else: print "No Match"
  232.         except Exception: pass
  233.  
  234. def WOT(s):
  235.     """
  236.    WOT API info: http://www.mywot.com/wiki/API
  237.    Restrictions: < 50000 API requests during any 24 hour period & <= 10 requests per second
  238.    """
  239.     print (header("MyWOT"))
  240.  
  241.     # WOT scoring
  242.     def category(num):
  243.         if num == "0": rating = "Trustworthy"
  244.         elif num == "1": rating = "Vendor reliable"
  245.         elif num == "2": rating = "Privacy"
  246.         elif num == "4": rating = "Child safety"
  247.         else: rating = "N/A"
  248.         return rating
  249.  
  250.     # WOT Reputation/Confidence scoring
  251.     def score(num):
  252.         if num > "80": con = "Excellent"
  253.         elif num > "60": con = "Good"
  254.         elif num > "40": con = "Unsatisfactory"
  255.         elif num > "20": con = "Poor"
  256.         elif num > "0": con = "Very Poor"
  257.         else: con = "N/A"
  258.         return con
  259.  
  260.     url = "http://api.mywot.com/0.4/public_query2?target="
  261.     try:    
  262.         page = urllib2.urlopen(url + s).read()
  263.         soup = BeautifulSoup(page)
  264.         hits = soup.findAll('application')
  265.         if len(hits):
  266.             try:
  267.                 print (subTitle("      Category | Reputation | Confidence"))
  268.                 for h in hits:
  269.                     print "%15s: %-s, %s" % (category(h['name']),score(h['r']),score(h['c']))
  270.             except Exception, msg:
  271.                 print msg
  272.         else: print "No Match"
  273.     except Exception: pass    
  274.  
  275. def VirusTotal(s):
  276.     """
  277.    VirusTotal API info: https://www.virustotal.com/documentation/public-api/
  278.    """
  279.     print (header("Virus Total"))
  280.     api_key = ""
  281.     if not api_key:
  282.         print "[!] You must configure your VirusTotal API key"
  283.     else:
  284.         url = "https://www.virustotal.com/vtapi/v2/url/report"
  285.         parameters = {"resource": s,
  286.                       "apikey": api_key}
  287.         data = urllib.urlencode(parameters)
  288.         try:
  289.             req = urllib2.Request(url, data)            
  290.             response = urllib2.urlopen(req)
  291.             result = response.read()
  292.             rpt = simplejson.loads(result)
  293.             date = rpt["scan_date"].split(' ')[0]
  294.             print "Scan Date..:",datetime.strptime(date, "%Y-%m-%d").strftime("%b %d %Y")
  295.             print "Total Scans:",rpt["total"]
  296.             print "Detected...:",rpt["positives"]
  297.             print (subTitle("\t\tScanner | Classification"))
  298.             for scanner in rpt["scans"]:
  299.                 if not re.match('clean site',rpt["scans"][scanner]["result"]):
  300.                     print "%23s : %-s" % (scanner,rpt["scans"][scanner]["result"])
  301.         except Exception, msg:
  302.             print msg
  303.  
  304. def netdemon(s):
  305.     """
  306.    *fix this up*
  307.    Site that helps deobfuscate some of those tricky obfuscations/encodings
  308.    """
  309.     print (header("netdemon"))
  310.     url = "http://www.netdemon.net/decode.cgi?url="
  311.     try:
  312.         page = urllib2.urlopen(url + s)        
  313.         soup = BeautifulSoup(page)
  314.         # strip HTML page breaks etc.
  315.         txt = soup.findAll(text=lambda txt:isinstance(txt, NavigableString))
  316.         for l in txt:
  317.             if re.search('(Protocol|Host|Path):\s*', l):
  318.                 print l
  319.     except Exception: pass
  320.  
  321. def urlVoid(s):
  322.     """
  323.    API info: http://blog.urlvoid.com/urlvoid-api-v2-0/
  324.    Restrictions: < 1,0000 per day
  325.    * if "-1" is returned it means the domain has not been yet scanned
  326.    """
  327.     print (header("URLvoid"))
  328.     api_key = ""
  329.     if not api_key:
  330.         print "[!] You must configure your URLvoid API key"
  331.     else:
  332.         url = "http://api.urlvoid.com/index/exec/"
  333.         parameters = {"domains": s,
  334.                       "api": api_key,
  335.                       "go": 'Check'}
  336.         data = urllib.urlencode(parameters)
  337.         try:
  338.             page = urllib2.urlopen(url, data)
  339.             soup = BeautifulSoup(page)
  340.             new_date = datetime.fromtimestamp(int(soup.find("details")['last_scan'])).strftime("%b %d %Y")
  341.             print "Last Scan  :",new_date
  342.             detect_cnt = soup.find("details")['detected']
  343.             if detect_cnt == "-1":
  344.                 print "Not scanned yet"
  345.             else:
  346.                 print "Detected   :",detect_cnt
  347.             if detect_cnt > "0":
  348.                 print "Detections :",soup.find("details")['lists_detected']
  349.         except Exception, msg:
  350.             print msg
  351. def main():
  352.     GeoIP(s)
  353.     hpHosts(s)
  354.     WOT(s)
  355.     SafeBrowsing(s)
  356.     VirusTotal(s)
  357.     if re.match('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', s):
  358.         Honeypot(s)
  359.     netdemon(s)
  360.     urlVoid(s)
  361.  
  362. if __name__ == "__main__":
  363.         main()
Add Comment
Please, Sign In to add comment