any suggestions how to change the url search but indicate to search all urls inside a txt file?
and save the downloads with their respective start and end date and time?
import requests
from multiprocessing.pool import ThreadPool
def download_url(url):
print("downloading: ",url)
# assumes that the last segment after the / represents the file name
# if url is abc/xyz/file.txt, the file name will be file.txt
file_name_start_pos = url.rfind("/") + 1
file_name = url[file_name_start_pos:]
r = requests.get(url, stream=True)
if r.status_code == requests.codes.ok:
with open(file_name, 'wb') as f:
for data in r:
f.write(data)
return url
urls = ["http://3ws3t4uo7fehnn4qpmadk3zjrxta5xlt3gsc5mx4sztrsy7ficuz5ayd.onion/FUGRO/1.zip",
"http://test.onion//2.z01",
"http://test.onion/2.z02",
"http://test.onion/2.z03",
"http://test.onion/2.z04" ]
# Run 5 multiple threads. Each call will take the next element in urls list
results = ThreadPool(5).imap_unordered(download_url, urls)
for r in results:
print(r)
and save the downloads with their respective start and end date and time?
import requests
from multiprocessing.pool import ThreadPool
def download_url(url):
print("downloading: ",url)
# assumes that the last segment after the / represents the file name
# if url is abc/xyz/file.txt, the file name will be file.txt
file_name_start_pos = url.rfind("/") + 1
file_name = url[file_name_start_pos:]
r = requests.get(url, stream=True)
if r.status_code == requests.codes.ok:
with open(file_name, 'wb') as f:
for data in r:
f.write(data)
return url
urls = ["http://3ws3t4uo7fehnn4qpmadk3zjrxta5xlt3gsc5mx4sztrsy7ficuz5ayd.onion/FUGRO/1.zip",
"http://test.onion//2.z01",
"http://test.onion/2.z02",
"http://test.onion/2.z03",
"http://test.onion/2.z04" ]
# Run 5 multiple threads. Each call will take the next element in urls list
results = ThreadPool(5).imap_unordered(download_url, urls)
for r in results:
print(r)