diff --git a/README.md b/README.md index 939157c..7a81127 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ # [Darknet Onion Webring (WIP)](http://uptime.nowherejezfoltodf4jiyl6r56jnzintap5vyjlia7fkirfsnfizflqd.onion/) + ``` # Main features: @@ -11,17 +12,15 @@ DONE: -py : option 10) perform sanity checks on all csv files (to mark them as sensitive or remove the ones that are blacklisted) -py : option 7) Add/Remove words in the sensitive list -py : option 8) Add/Remove words in the blacklist +-manual work: fit all the existing links into the current format one by one TODO: --manual work: fit all the existing links into the current format one by one -php/css: make the search page preety -doc: redo the documentation for the project -doc: finish the blogpost about it --release it - +-release it officially ``` - Darknet Onion Webring is powered by a Minimalistic Onion Hidden Service uptime checker, written in only PHP and Python, because javascript is NOT needed. diff --git a/scripts/darknet_exploration.py b/scripts/darknet_exploration.py index f204bdd..94c1b82 100644 --- a/scripts/darknet_exploration.py +++ b/scripts/darknet_exploration.py @@ -160,6 +160,7 @@ Maintenance: uvdf.loc[-1] = newrow # adding a row uvdf.index = uvdf.index + 1 # shifting index uvdf = uvdf.sort_index() # sorting by index + uvdf = uvdf.sort_values(by=["Category","Score"], ascending=[True,False]) # sorting categories print("[+] New row added! now writing the csv file:") uvdf.to_csv(unverifiedcsvfile, index=False) choice=input("\n[+] Want to add another website ? (y/n) ") @@ -198,13 +199,15 @@ Maintenance: # append it into verified.csv vdf.loc[-1] = newrow # adding a row vdf.index = vdf.index + 1 # shifting index - vdf = vdf.sort_index() # sorting by index + vdf = vdf.sort_index() # sxorting by index + vdf = vdf.sort_values(by=["Category","Score"], ascending=[True,False]) # sorting categories vdf.to_csv(verifiedcsvfile, index=False) print("[+] New row added to verified.csv! now writing to the csv") # remove it from unverified.csv uvdf.drop(index, inplace= True) + uvdf = uvdf.sort_values(by=["Category","Score"], ascending=[True,False]) # sorting categories uvdf.to_csv(unverifiedcsvfile, index=False) print("[+] Link is now moved to verified.csv!") choice=input("\n[+] Want to trust another website ? (y/n) ") diff --git a/scripts/tests/banner.png b/scripts/old/tests/banner.png similarity index 100% rename from scripts/tests/banner.png rename to scripts/old/tests/banner.png diff --git a/scripts/tests/blacklist.csv b/scripts/old/tests/blacklist.csv similarity index 100% rename from scripts/tests/blacklist.csv rename to scripts/old/tests/blacklist.csv diff --git a/scripts/tests/checkimagesize.py b/scripts/old/tests/checkimagesize.py similarity index 100% rename from scripts/tests/checkimagesize.py rename to scripts/old/tests/checkimagesize.py diff --git a/scripts/tests/checkwebringinstanceexists.py b/scripts/old/tests/checkwebringinstanceexists.py similarity index 100% rename from scripts/tests/checkwebringinstanceexists.py rename to scripts/old/tests/checkwebringinstanceexists.py diff --git a/scripts/tests/csvwork.py b/scripts/old/tests/csvwork.py similarity index 100% rename from scripts/tests/csvwork.py rename to scripts/old/tests/csvwork.py diff --git a/scripts/tests/csvwork2.py b/scripts/old/tests/csvwork2.py similarity index 100% rename from scripts/tests/csvwork2.py rename to scripts/old/tests/csvwork2.py diff --git a/scripts/tests/favicon.png b/scripts/old/tests/favicon.png similarity index 100% rename from scripts/tests/favicon.png rename to scripts/old/tests/favicon.png diff --git a/scripts/tests/sensitive.csv b/scripts/old/tests/sensitive.csv similarity index 100% rename from scripts/tests/sensitive.csv rename to scripts/old/tests/sensitive.csv diff --git a/scripts/tests/unverified.csv b/scripts/old/tests/unverified.csv similarity index 100% rename from scripts/tests/unverified.csv rename to scripts/old/tests/unverified.csv diff --git a/scripts/tests/verified.csv b/scripts/old/tests/verified.csv similarity index 100% rename from scripts/tests/verified.csv rename to scripts/old/tests/verified.csv diff --git a/scripts/tests/webring-participants.csv b/scripts/old/tests/webring-participants.csv similarity index 100% rename from scripts/tests/webring-participants.csv rename to scripts/old/tests/webring-participants.csv diff --git a/scripts/tests/writefile.py b/scripts/old/tests/writefile.py similarity index 100% rename from scripts/tests/writefile.py rename to scripts/old/tests/writefile.py diff --git a/scripts/old/uptimechecker.py b/scripts/old/uptimechecker.py new file mode 100644 index 0000000..51ed0da --- /dev/null +++ b/scripts/old/uptimechecker.py @@ -0,0 +1,65 @@ +import csv +import requests +import json +import pandas as pd +import glob + +#apt install python3-pandas python3-requests python3-socks + +def main(): + print('[+] ONION UPTIME CHECKER') + + proxies = { + 'http': 'socks5h://127.0.0.1:9050', + 'https': 'socks5h://127.0.0.1:9050' + } + + for csvfile in glob.glob("/srv/darknet-onion-webring/www/links/*.csv"): + print('[+] Reading the CSV File:', csvfile) + + df = pd.read_csv(csvfile) + print('[+] Checking if each .onion link is reachable:') + for i in range(df.index.stop): + print("[+] Editing the uptime score") + #if empty, set to 100 + if pd.isnull(df.at[i,"Score"]): + df.at[i,"Score"] = 100 + + print(i) + #print(df.at[i,"URL"]) + url=df.at[i,"URL"] + try: + status = requests.get(url,proxies=proxies, timeout=5).status_code + print('[+]',url,status) + if status != 502: + print(url,"✔️") + df.at[i,"Status"]="✔️" + #if uptime <100 do +1 to the value + if df.at[i,"Score"] < 100: + df.at[i,"Score"] = df.at[i,"Score"] + 1 + else: + print(url,"❌") + df.at[i,"Status"]="❌" + #if uptime >0 do -1 to the value + if df.at[i,"Score"] > 0: + df.at[i,"Score"] = df.at[i,"Score"] - 1 + except requests.ConnectionError as e: + #print(e) + print(url,"❌") + df.at[i,"Status"]="❌" + #if uptime >0 do -1 to the value + if df.at[i,"Score"] > 0: + df.at[i,"Score"] = df.at[i,"Score"] - 1 + except requests.exceptions.ReadTimeout as e: + #print(e) + print(url,"❌") + df.at[i,"Status"]="❌" + #if uptime >0 do -1 to the value + if df.at[i,"Score"] > 0: + df.at[i,"Score"] = df.at[i,"Score"] - 1 + + df2 = df.sort_values(by=["Score"], ascending=False) + #print(df2) + df2.to_csv(csvfile, index=False) +if __name__ == '__main__': + main() diff --git a/scripts/uptimechecker.py b/scripts/uptimechecker.py index 51ed0da..77141aa 100644 --- a/scripts/uptimechecker.py +++ b/scripts/uptimechecker.py @@ -1,3 +1,4 @@ +import os,pwd,re import csv import requests import json @@ -9,17 +10,51 @@ import glob def main(): print('[+] ONION UPTIME CHECKER') + # TODO get the instance name and exit if its not there + rootpath='/srv/darknet-onion-webring/' + urlpath=pwd.getpwuid(os.getuid()).pw_dir+"/.darknet_participant_url" + #print(urlpath) + + + # check if ~/.darknet_participant_url exists, + # if exists, instance= the content of ~/.darknet_participant_url (which is the url: such as uptime.nowherejez...onion) + isitvalid="n" + while isitvalid != "y": + if os.path.isfile(urlpath): + with open(urlpath) as f: + instance = f.read().rstrip() + # check if the instance URL domain is valid + #print(urlpath,instance) + if IsOnionValid(instance): + print("[+] Instance Name:",instance,IsOnionValid(instance)) + isitvalid="y" + else: + print('[-] Invalid instance name in ~/.darknet_participant_url:', instance) + return False + else: + print("[-] Instance path doesn't exist yet, run darknet_exploration.py to set it up" ) + return False + + #i=input("continue?") proxies = { 'http': 'socks5h://127.0.0.1:9050', 'https': 'socks5h://127.0.0.1:9050' } - - for csvfile in glob.glob("/srv/darknet-onion-webring/www/links/*.csv"): + + instancepath=rootpath+'www/participants/'+instance+'/' + csvfiles2check=['verified.csv','unverified.csv','webring-participants.csv'] + csvfiles2sortcat=['verified.csv','unverified.csv'] + + #for csvfile in glob.glob("/srv/darknet-onion-webring/www/links/*.csv"): + for csvfilename in csvfiles2check: + csvfile = instancepath+csvfilename print('[+] Reading the CSV File:', csvfile) df = pd.read_csv(csvfile) + print(df[['Name','URL']]) print('[+] Checking if each .onion link is reachable:') - for i in range(df.index.stop): + #for i in range(df.index.stop): + for i in df.index: print("[+] Editing the uptime score") #if empty, set to 100 if pd.isnull(df.at[i,"Score"]): @@ -29,6 +64,10 @@ def main(): #print(df.at[i,"URL"]) url=df.at[i,"URL"] try: + index1 = url.find("http://") + index2 = url.find("https://") + if index1 == -1 and index2 == -1: + url = "http://"+url status = requests.get(url,proxies=proxies, timeout=5).status_code print('[+]',url,status) if status != 502: @@ -59,7 +98,92 @@ def main(): df.at[i,"Score"] = df.at[i,"Score"] - 1 df2 = df.sort_values(by=["Score"], ascending=False) + #sort by category if you are verified/unverified.csv + if csvfilename in csvfiles2sortcat: + df2 = df.sort_values(by=["Category"], ascending=True) #print(df2) df2.to_csv(csvfile, index=False) + + +def IsUrlValid(url:str)->bool: + """ + Check if url is valid both dark net end clearnet. + """ + # check if the characters are only [a-zA-Z0-9.:/] with maximum 128 chars max? + # check that it is only http(s)://wordA.wordB or http(s)://WordC.WordB.WordC, (onion or not), clearnet is fine too (double check if those are fine!) + # if OK return True + #if not : return False + pattern = re.compile("^[A-Za-z0-9:/.]+$") + url = str(url) + if url.endswith('.onion'): + return IsOnionValid(url) + else: + if not url.__contains__('.'): + #print("No (DOT) in clearnet url") + return False + if pattern.fullmatch(url) is None: + #print('Url contains invalid chars') + return False + return True + +def IsOnionValid(url: str)-> bool: + """ + Checks if the domain(param) is a valid onion domain and return True else False. + """ + # check if the characters are only [a-zA-Z0-9.] with maximum 128 chars max? + # check that it is only url.onion or subdomain.url.onion, + # if OK return True + #if not : return False + try: + pattern = re.compile("^[A-Za-z0-9.]+(\.onion)?$") + url = url.strip().removesuffix('/') + if url.startswith('http://'): + #print('URL starts with http') + # Removes the http:// + domain = url.split('/')[2] + if pattern.fullmatch(domain) is not None: + if len(domain.split('.')) > 3: + n_subdomians = len(domain.split('.')) + # Checks if there is more than 1 subdomain. "subdomain.url.onion" only + #print(f"This domain have more than one subdomain. There are {n_subdomians} subdomains") + return False + else: + if len(domain) < 62: + #print("Domain length is less than 62.") + return False + return True + elif pattern.fullmatch(domain) is None: + #print("Domain contains invalid character.") + #print(domain) + return False + else: + #print("Domain not valid") + return False + else: + #TODO : edit the url to make sure it has http:// at the beginning, in case if it's missing? (problem is that it only returns true or false) + #print("URL doesn't start http") + if pattern.fullmatch(url) is not None: + if len(url.split('.')) > 3: + n_subdomians = len(url.split('.')) + # Checks if there is more than 1 subdomain. "subdomain.url.onion" only + #print(f"This domain have more than one subdomain. There are {n_subdomians - 1} subdomains") + return False + else: + if len(url) < 62: + #print("Domain length is less than 62.") + return False + return True + elif pattern.fullmatch(url) is None: + #print("Domain contains invalid character.") + #print(url) + return False + else: + #print("Domain not valid") + return False + except Exception as e: + print(f"Error: {e}") + + + if __name__ == '__main__': main() diff --git a/scripts/uptimecheckernew.py b/scripts/uptimecheckernew.py deleted file mode 100644 index 62c3a3a..0000000 --- a/scripts/uptimecheckernew.py +++ /dev/null @@ -1,189 +0,0 @@ -import os,pwd,re -import csv -import requests -import json -import pandas as pd -import glob - -#apt install python3-pandas python3-requests python3-socks - -def main(): - print('[+] ONION UPTIME CHECKER') - - # TODO get the instance name and exit if its not there - rootpath='/srv/darknet-onion-webring/' - urlpath=pwd.getpwuid(os.getuid()).pw_dir+"/.darknet_participant_url" - #print(urlpath) - - - # check if ~/.darknet_participant_url exists, - # if exists, instance= the content of ~/.darknet_participant_url (which is the url: such as uptime.nowherejez...onion) - isitvalid="n" - while isitvalid != "y": - if os.path.isfile(urlpath): - with open(urlpath) as f: - instance = f.read().rstrip() - # check if the instance URL domain is valid - #print(urlpath,instance) - if IsOnionValid(instance): - print("[+] Instance Name:",instance,IsOnionValid(instance)) - isitvalid="y" - else: - print('[-] Invalid instance name in ~/.darknet_participant_url:', instance) - return False - else: - print("[-] Instance path doesn't exist yet, run darknet_exploration.py to set it up" ) - return False - - #i=input("continue?") - proxies = { - 'http': 'socks5h://127.0.0.1:9050', - 'https': 'socks5h://127.0.0.1:9050' - } - - instancepath=rootpath+'www/participants/'+instance+'/' - csvfiles2check=['verified.csv','unverified.csv','webring-participants.csv'] - csvfiles2sortcat=['verified.csv','unverified.csv'] - - #for csvfile in glob.glob("/srv/darknet-onion-webring/www/links/*.csv"): - for csvfilename in csvfiles2check: - csvfile = instancepath+csvfilename - print('[+] Reading the CSV File:', csvfile) - - df = pd.read_csv(csvfile) - print(df[['Name','URL']]) - print('[+] Checking if each .onion link is reachable:') - #for i in range(df.index.stop): - for i in df.index: - print("[+] Editing the uptime score") - #if empty, set to 100 - if pd.isnull(df.at[i,"Score"]): - df.at[i,"Score"] = 100 - - print(i) - #print(df.at[i,"URL"]) - url=df.at[i,"URL"] - try: - index1 = url.find("http://") - index2 = url.find("https://") - if index1 == -1 and index2 == -1: - url = "http://"+url - status = requests.get(url,proxies=proxies, timeout=5).status_code - print('[+]',url,status) - if status != 502: - print(url,"✔️") - df.at[i,"Status"]="✔️" - #if uptime <100 do +1 to the value - if df.at[i,"Score"] < 100: - df.at[i,"Score"] = df.at[i,"Score"] + 1 - else: - print(url,"❌") - df.at[i,"Status"]="❌" - #if uptime >0 do -1 to the value - if df.at[i,"Score"] > 0: - df.at[i,"Score"] = df.at[i,"Score"] - 1 - except requests.ConnectionError as e: - #print(e) - print(url,"❌") - df.at[i,"Status"]="❌" - #if uptime >0 do -1 to the value - if df.at[i,"Score"] > 0: - df.at[i,"Score"] = df.at[i,"Score"] - 1 - except requests.exceptions.ReadTimeout as e: - #print(e) - print(url,"❌") - df.at[i,"Status"]="❌" - #if uptime >0 do -1 to the value - if df.at[i,"Score"] > 0: - df.at[i,"Score"] = df.at[i,"Score"] - 1 - - df2 = df.sort_values(by=["Score"], ascending=False) - #sort by category if you are verified/unverified.csv - if csvfilename in csvfiles2sortcat: - df2 = df.sort_values(by=["Category"], ascending=False) - #print(df2) - df2.to_csv(csvfile, index=False) - - -def IsUrlValid(url:str)->bool: - """ - Check if url is valid both dark net end clearnet. - """ - # check if the characters are only [a-zA-Z0-9.:/] with maximum 128 chars max? - # check that it is only http(s)://wordA.wordB or http(s)://WordC.WordB.WordC, (onion or not), clearnet is fine too (double check if those are fine!) - # if OK return True - #if not : return False - pattern = re.compile("^[A-Za-z0-9:/.]+$") - url = str(url) - if url.endswith('.onion'): - return IsOnionValid(url) - else: - if not url.__contains__('.'): - #print("No (DOT) in clearnet url") - return False - if pattern.fullmatch(url) is None: - #print('Url contains invalid chars') - return False - return True - -def IsOnionValid(url: str)-> bool: - """ - Checks if the domain(param) is a valid onion domain and return True else False. - """ - # check if the characters are only [a-zA-Z0-9.] with maximum 128 chars max? - # check that it is only url.onion or subdomain.url.onion, - # if OK return True - #if not : return False - try: - pattern = re.compile("^[A-Za-z0-9.]+(\.onion)?$") - url = url.strip().removesuffix('/') - if url.startswith('http://'): - #print('URL starts with http') - # Removes the http:// - domain = url.split('/')[2] - if pattern.fullmatch(domain) is not None: - if len(domain.split('.')) > 3: - n_subdomians = len(domain.split('.')) - # Checks if there is more than 1 subdomain. "subdomain.url.onion" only - #print(f"This domain have more than one subdomain. There are {n_subdomians} subdomains") - return False - else: - if len(domain) < 62: - #print("Domain length is less than 62.") - return False - return True - elif pattern.fullmatch(domain) is None: - #print("Domain contains invalid character.") - #print(domain) - return False - else: - #print("Domain not valid") - return False - else: - #TODO : edit the url to make sure it has http:// at the beginning, in case if it's missing? (problem is that it only returns true or false) - #print("URL doesn't start http") - if pattern.fullmatch(url) is not None: - if len(url.split('.')) > 3: - n_subdomians = len(url.split('.')) - # Checks if there is more than 1 subdomain. "subdomain.url.onion" only - #print(f"This domain have more than one subdomain. There are {n_subdomians - 1} subdomains") - return False - else: - if len(url) < 62: - #print("Domain length is less than 62.") - return False - return True - elif pattern.fullmatch(url) is None: - #print("Domain contains invalid character.") - #print(url) - return False - else: - #print("Domain not valid") - return False - except Exception as e: - print(f"Error: {e}") - - - -if __name__ == '__main__': - main() diff --git a/www/header.php b/www/header.php index bdb035a..0f2dd1d 100644 --- a/www/header.php +++ b/www/header.php @@ -1,11 +1,3 @@ -
Darknet Onion Webring - Non-KYC Services - Sensitive Darknet Websites - Monero Nodes - Nowhere Infrastructure
- - -Update Frequency: every 3 hours
- - -" . $resultcount . " Result(s) found.
"; + + + + + + + + +function DisplayCategories($instancename, $path) { +$resultcount=0; +$row = 1; +//echo $instancename; +//$csvfilenames = "verified.csv unverified.csv"; +$csvfilenames = "verified.csv"; +$csvfilenames = explode(' ', $csvfilenames); + +foreach ($csvfilenames as $csvfilename){ +//echo $csvfilename; +$csvfile = $path . $csvfilename; +//echo $csvfile; +//echo "test"; +echo "