• XSS.stack #1 – первый литературный журнал от юзеров форума

Download multi file from url fast

blad3runn3r

RAID-массив
Пользователь
Регистрация
06.08.2021
Сообщения
82
Реакции
7
any suggestions how to change the url search but indicate to search all urls inside a txt file?
and save the downloads with their respective start and end date and time?


import requests
from multiprocessing.pool import ThreadPool

def download_url(url):
print("downloading: ",url)
# assumes that the last segment after the / represents the file name
# if url is abc/xyz/file.txt, the file name will be file.txt
file_name_start_pos = url.rfind("/") + 1
file_name = url[file_name_start_pos:]

r = requests.get(url, stream=True)
if r.status_code == requests.codes.ok:
with open(file_name, 'wb') as f:
for data in r:
f.write(data)
return url


urls = ["http://3ws3t4uo7fehnn4qpmadk3zjrxta5xlt3gsc5mx4sztrsy7ficuz5ayd.onion/FUGRO/1.zip",
"http://test.onion//2.z01",
"http://test.onion/2.z02",
"http://test.onion/2.z03",
"http://test.onion/2.z04" ]

# Run 5 multiple threads. Each call will take the next element in urls list
results = ThreadPool(5).imap_unordered(download_url, urls)
for r in results:
print(r)
 
you can use os.path.basename for extract only filename or url.split('/')[-1]
Python:
>>> import os
>>> urls = ["http://3ws3t4uo7fehnn4qpmadk3zjrxta5xlt3gsc5mx4sztrsy7ficuz5ayd.onion/FUGRO/1.zip",
... "http://test.onion//2.z01",
... "http://test.onion/2.z02",
... "http://test.onion/2.z03",
... "http://test.onion/2.z04" ]
>>> for url in urls:
...     print(os.path.basename(url))

1.zip
2.z01
2.z02
2.z03
2.z04
>>> for url in urls:
...     print(url.split('/')[-1])

1.zip
2.z01
2.z02
2.z03
2.z04
 


Напишите ответ...
  • Вставить:
Прикрепить файлы
Верх