Advertisement
Najeebsk

SCRAPE-URLS-DOWNLOAD.pyw

Apr 17th, 2025
333
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 22.02 KB | None | 0 0
  1. import os
  2. import requests
  3. from bs4 import BeautifulSoup
  4. from tkinter import *
  5. from tkinter import messagebox, filedialog
  6. from urllib.parse import urljoin, urlparse
  7. import yt_dlp
  8. import subprocess
  9. from PIL import Image, ImageTk
  10. import io
  11. import threading
  12.  
  13. stop_download_flag = False
  14. #================ADD-IMAGE-ICON=================
  15. import sys
  16.  
  17. def resource_path(relative_path):
  18.     """ Get the absolute path to the resource, works for PyInstaller. """
  19.     if getattr(sys, '_MEIPASS', False):
  20.         return os.path.join(sys._MEIPASS, relative_path)
  21.     return os.path.join(os.path.abspath("."), relative_path)
  22.  
  23. # Use this function to load files:
  24. #splash_image = resource_path("splash-1.png")
  25. icon_path = resource_path("D.ico")
  26. #================ADD-IMAGE-ICON=================
  27. media_urls = []
  28.  
  29. special_sites = ['youtube.com', 'youtu.be', 'facebook.com', 'fb.watch', 'tiktok.com', 'instagram.com']
  30. image_exts = ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.svg', '.ico']
  31. video_exts = ['.mp4', '.webm', '.ogg', '.mov', '.avi', '.mkv', '.flv', '.3gp', '.wmv', '.m3u', '.m3u8']
  32.  
  33. def is_special_site(url):
  34.     return any(domain in url for domain in special_sites)
  35.  
  36. def browse_url_file():
  37.     file_path = filedialog.askopenfilename(title="Open URL File", filetypes=[("Text files", "*.txt")])
  38.     if file_path:
  39.         with open(file_path, 'r') as f:
  40.             for line in f:
  41.                 url = line.strip()
  42.                 if url and url not in media_urls:
  43.                     media_urls.append(url)
  44.                     result_box.insert(END, url + "\n")
  45.  
  46. def save_urls_to_file():
  47.     file_path = filedialog.asksaveasfilename(defaultextension=".txt", filetypes=[("Text files", "*.txt")])
  48.     if file_path:
  49.         with open(file_path, 'w') as f:
  50.             f.write(result_box.get("1.0", END).strip())
  51.         messagebox.showinfo("Saved", f"URLs saved to {file_path}")
  52.  
  53. def scrape_normal_site(url):
  54.     found_urls = set()
  55.     try:
  56.         response = requests.get(url, timeout=10)
  57.         if response.status_code != 200:
  58.             return found_urls
  59.  
  60.         soup = BeautifulSoup(response.text, 'html.parser')
  61.         for tag in soup.find_all(['img', 'video', 'source', 'a']):
  62.             src = tag.get('src') or tag.get('href')
  63.             if src:
  64.                 full_url = urljoin(url, src)
  65.                 parsed = urlparse(full_url)
  66.                 ext = os.path.splitext(parsed.path)[1].lower()
  67.                 if ext in image_exts + video_exts:
  68.                     found_urls.add(full_url)
  69.     except Exception:
  70.         pass
  71.     return found_urls
  72.  
  73. def process_url():
  74.     url = url_entry.get().strip()
  75.     if not url:
  76.         messagebox.showwarning("Input Error", "Please enter a valid URL.")
  77.         return
  78.  
  79.     media_urls.clear()
  80.     result_box.delete("1.0", END)
  81.  
  82.     try:
  83.         if is_special_site(url):
  84.             ydl_opts = {
  85.                 'quiet': True,
  86.                 'skip_download': True,
  87.                 'force_generic_extractor': False
  88.             }
  89.             with yt_dlp.YoutubeDL(ydl_opts) as ydl:
  90.                 info = ydl.extract_info(url, download=False)
  91.                 if 'entries' in info:
  92.                     for entry in info['entries']:
  93.                         media_urls.append(entry['webpage_url'])
  94.                         result_box.insert(END, entry['webpage_url'] + "\n")
  95.                 else:
  96.                     media_urls.append(info['webpage_url'])
  97.                     result_box.insert(END, info['webpage_url'] + "\n")
  98.         else:
  99.             scraped = scrape_normal_site(url)
  100.             media_urls.extend(scraped)
  101.             for media_url in scraped:
  102.                 result_box.insert(END, media_url + "\n")
  103.  
  104.         if not media_urls:
  105.             messagebox.showinfo("Info", "No media URLs found.")
  106.         else:
  107.             messagebox.showinfo("Success", f"{len(media_urls)} media URL(s) found!")
  108.  
  109.     except Exception as e:
  110.         messagebox.showerror("Error", str(e))
  111.  
  112. def download_media(url, save_path):
  113.     try:
  114.         if is_special_site(url):
  115.             ytdlp_path = r"C:\Windows\yt-dlp.exe"  # Replace if needed
  116.             command = [
  117.                 ytdlp_path,
  118.                 "-f", "best",
  119.                 "--no-playlist",
  120.                 "--extractor-args", "youtube:player_client=web",
  121.                 "-o", save_path,
  122.                 url
  123.             ]
  124.             result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
  125.             if result.returncode != 0:
  126.                 raise Exception(result.stderr.strip())
  127.         else:
  128.             response = requests.get(url, stream=True)
  129.             if response.status_code == 200:
  130.                 with open(save_path, 'wb') as f:
  131.                     for chunk in response.iter_content(1024):
  132.                         f.write(chunk)
  133.     except Exception as e:
  134.         messagebox.showerror("Download Error", f"Failed to download:\n{url}\n\n{str(e)}")
  135.  
  136. def download_selected_line():
  137.     try:
  138.         line_index = result_box.index(INSERT).split(".")[0]
  139.         selected_url = result_box.get(f"{line_index}.0", f"{line_index}.end").strip()
  140.         if not selected_url:
  141.             raise Exception("No line selected.")
  142.  
  143.         folder = filedialog.askdirectory(title="Select Folder to Save File")
  144.         if not folder:
  145.             return
  146.  
  147.         parsed = urlparse(selected_url)
  148.         filename = os.path.basename(parsed.path)
  149.         if not filename:
  150.             filename = "downloaded_file"
  151.  
  152.         save_path = os.path.join(folder, filename)
  153.         threading.Thread(target=threaded_download, args=(selected_url, save_path), daemon=True).start()
  154.  
  155.     except Exception as e:
  156.         messagebox.showerror("Error", str(e))
  157.  
  158. def download_selected():
  159.     selected_urls = result_box.get("1.0", END).strip().splitlines()
  160.     if not selected_urls:
  161.         messagebox.showwarning("Selection Error", "No URLs to download.")
  162.         return
  163.  
  164.     selected = filedialog.askdirectory(title="Select Folder to Save Files")
  165.     if not selected:
  166.         return
  167.  
  168.     for url in selected_urls:
  169.         parsed = urlparse(url)
  170.         filename = os.path.basename(parsed.path)
  171.         if not filename:
  172.             filename = "downloaded_file.mp4"
  173.  
  174.         save_path = os.path.join(selected, filename)
  175.         download_media(url, save_path)
  176.  
  177.     messagebox.showinfo("Download Complete", f"Downloaded {len(selected_urls)} media files.")
  178.  
  179. from threading import Thread
  180. stop_flag = False
  181.  
  182. def fix_partial_video(input_path):
  183.     try:
  184.         if not os.path.exists(input_path) or not input_path.lower().endswith(".mp4"):
  185.             return
  186.         output_path = input_path.replace(".mp4", "_fixed.mp4")
  187.  
  188.         ffmpeg_path = r"C:\Program Files\ffmpeg\bin\ffmpeg.exe"  # ✅ Your FFmpeg location here
  189.  
  190.         # Try quick remux
  191.         command = [
  192.             ffmpeg_path,
  193.             "-y",
  194.             "-i", input_path,
  195.             "-c", "copy",
  196.             "-movflags", "+faststart",
  197.             output_path
  198.         ]
  199.         result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
  200.  
  201.         # Fallback to re-encode if remux fails or small file
  202.         if result.returncode != 0 or not os.path.exists(output_path) or os.path.getsize(output_path) < 1024 * 1024:
  203.             print("[INFO] Remux failed or file too small, retrying with re-encode...")
  204.             command = [
  205.                 ffmpeg_path,
  206.                 "-y",
  207.                 "-i", input_path,
  208.                 "-preset", "ultrafast",
  209.                 output_path
  210.             ]
  211.             subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
  212.  
  213.         # Replace original file if fixed
  214.         if os.path.exists(output_path):
  215.             os.remove(input_path)
  216.             os.rename(output_path, input_path)
  217.  
  218.     except Exception as e:
  219.         print(f"[FFmpeg Fix Error] {e}")
  220.        
  221. def threaded_download(url, save_path):
  222.     global stop_download_flag
  223.     stop_download_flag = False
  224.     try:
  225.         if is_special_site(url):
  226.             ytdlp_path = r"C:\Windows\yt-dlp.exe"  # ✅ Ensure correct path
  227.             command = [
  228.                 ytdlp_path,
  229.                 "-f", "mp4",
  230.                 "--no-part",  # Saves directly as .mp4
  231.                 "--downloader", "ffmpeg",
  232.                 "--downloader-args", "ffmpeg_i:-movflags +faststart",
  233.                 "-o", save_path,
  234.                 url
  235.             ]
  236.             proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
  237.             while proc.poll() is None:
  238.                 if stop_download_flag:
  239.                     proc.kill()
  240.                     break
  241.  
  242.         else:
  243.             response = requests.get(url, stream=True, timeout=10)
  244.             if response.status_code == 200:
  245.                 with open(save_path, 'wb') as f:
  246.                     for chunk in response.iter_content(1024 * 1024):  # 1MB
  247.                         if stop_download_flag:
  248.                             break
  249.                         if chunk:
  250.                             f.write(chunk)
  251.  
  252.         if stop_download_flag:
  253.             fix_partial_video(save_path)  # Try to repair it
  254.             messagebox.showinfo("Download Stopped", f"Download was stopped by user.\nSaved: {save_path}")
  255.         else:
  256.             messagebox.showinfo("Download Complete", f"Downloaded successfully to:\n{save_path}")
  257.  
  258.     except Exception as e:
  259.         messagebox.showerror("Download Error", str(e))
  260.  
  261. def start_download(url, save_path):
  262.     global stop_flag
  263.     stop_flag = False
  264.     Thread(target=threaded_download, args=(url, save_path)).start()
  265.  
  266. def stop_download():
  267.     global stop_download_flag
  268.     stop_download_flag = True
  269.  
  270. def scrape_all_links(url):
  271.     all_links = set()
  272.     try:
  273.         response = requests.get(url, timeout=10)
  274.         soup = BeautifulSoup(response.text, 'html.parser')
  275.         for a_tag in soup.find_all('a', href=True):
  276.             full_url = urljoin(url, a_tag['href'])
  277.             all_links.add(full_url)
  278.     except Exception as e:
  279.         print(f"[Scrape All Error] {e}")
  280.     return all_links
  281.  
  282. def scrape_all_button():
  283.     url = url_entry.get().strip()
  284.     if not url:
  285.         messagebox.showwarning("Input Error", "Please enter a valid URL.")
  286.         return
  287.  
  288.     result_box.delete("1.0", END)
  289.     media_urls.clear()
  290.     all_links = scrape_all_links(url)
  291.     media_urls.extend(all_links)
  292.     for link in all_links:
  293.         result_box.insert(END, link + "\n")
  294.     messagebox.showinfo("Done", f"{len(all_links)} total link(s) scraped.")
  295.  
  296. def open_in_vlc():
  297.     line_index = result_box.index(INSERT).split(".")[0]
  298.     selected_url = result_box.get(f"{line_index}.0", f"{line_index}.end").strip()
  299.     if not selected_url:
  300.         messagebox.showwarning("No Selection", "Select a valid media URL.")
  301.         return
  302.  
  303.     vlc_path = r"C:\Program Files\VideoLAN\VLC\vlc.exe"
  304.     try:
  305.         subprocess.Popen([vlc_path, selected_url])
  306.     except Exception as e:
  307.         messagebox.showerror("VLC Error", f"Could not open VLC:\n{e}")    
  308. def preview_image_popup():
  309.     try:
  310.         line_index = result_box.index(INSERT).split(".")[0]
  311.         selected_url = result_box.get(f"{line_index}.0", f"{line_index}.end").strip()
  312.         if not selected_url.lower().endswith(tuple(image_exts)):
  313.             raise Exception("Selected link is not an image.")
  314.  
  315.         response = requests.get(selected_url, timeout=10)
  316.         image = Image.open(io.BytesIO(response.content))
  317.  
  318.         popup = Toplevel(root)
  319.         popup.title("Image Preview")
  320.         popup.geometry("600x600")
  321.  
  322.         img_resized = image.resize((500, 500), Image.ANTIALIAS)
  323.         img_tk = ImageTk.PhotoImage(img_resized)
  324.  
  325.         label = Label(popup, image=img_tk)
  326.         label.image = img_tk
  327.         label.pack()
  328.  
  329.     except Exception as e:
  330.         messagebox.showerror("Preview Error", str(e))
  331. def clear_url_field():
  332.     url_entry.delete(0, END)
  333.  
  334. def clear_result_box():
  335.     result_box.delete("1.0", END)
  336.     media_urls.clear()
  337.  
  338. def load_m3u_file():
  339.     file_path = filedialog.askopenfilename(title="Open M3U File", filetypes=[("M3U/M3U8 Files", "*.m3u *.m3u8")])
  340.     if file_path:
  341.         result_box.delete("1.0", END)
  342.         media_urls.clear()
  343.         with open(file_path, 'r', encoding="utf-8", errors="ignore") as f:
  344.             for line in f:
  345.                 url = line.strip()
  346.                 if url and url.startswith("http"):
  347.                     media_urls.append(url)
  348.                     result_box.insert(END, url + "\n")
  349.         messagebox.showinfo("Loaded", f"{len(media_urls)} media URLs loaded from playlist.")
  350.  
  351. def load_online_m3u():
  352.     url = url_entry.get().strip()
  353.     if not url.lower().endswith((".m3u", ".m3u8")):
  354.         messagebox.showwarning("URL Error", "Please enter a valid .m3u or .m3u8 URL.")
  355.         return
  356.  
  357.     try:
  358.         response = requests.get(url, timeout=10)
  359.         if response.status_code != 200:
  360.             raise Exception("Unable to fetch playlist.")
  361.  
  362.         result_box.delete("1.0", END)
  363.         media_urls.clear()
  364.  
  365.         for line in response.text.splitlines():
  366.             line = line.strip()
  367.             if line and line.startswith("http"):
  368.                 media_urls.append(line)
  369.                 result_box.insert(END, line + "\n")
  370.  
  371.         messagebox.showinfo("Online M3U Loaded", f"{len(media_urls)} stream(s) loaded.")
  372.  
  373.     except Exception as e:
  374.         messagebox.showerror("Error", str(e))
  375.  
  376. def save_as_m3u():
  377.     file_path = filedialog.asksaveasfilename(defaultextension=".m3u", filetypes=[("Text File", "*.txt"), ("M3U Playlist", "*.m3u"), ("M3U8 Playlist", "*.m3u8")])
  378.     if file_path:
  379.         with open(file_path, 'w', encoding="utf-8") as f:
  380.             f.write(result_box.get("1.0", END).strip())
  381.         messagebox.showinfo("Saved", f"Playlist saved to:\n{file_path}")        
  382.  
  383. def scrape_xtream_m3u_url():
  384.     url = url_entry.get().strip()
  385.     if not url or "get.php" not in url:
  386.         messagebox.showwarning("Input Error", "Please enter a valid Xtream M3U URL.")
  387.         return
  388.  
  389.     try:
  390.         headers = {
  391.             "User-Agent": "VLC/3.0.18 LibVLC/3.0.18"
  392.         }
  393.         response = requests.get(url, headers=headers, timeout=15)
  394.  
  395.         if response.status_code == 404:
  396.             raise Exception("404 Not Found — the playlist URL might be wrong or expired.")
  397.         if response.status_code != 200:
  398.             raise Exception(f"Failed to fetch playlist. Status code: {response.status_code}")
  399.  
  400.         content = response.text
  401.         if "#EXTM3U" not in content:
  402.             raise Exception("Invalid playlist. No M3U content found.")
  403.  
  404.         result_box.delete("1.0", END)
  405.         media_urls.clear()
  406.  
  407.         for line in content.splitlines():
  408.             if line.startswith("http"):
  409.                 media_urls.append(line)
  410.                 result_box.insert(END, line + "\n")
  411.  
  412.         if media_urls:
  413.             messagebox.showinfo("Success", f"Scraped {len(media_urls)} stream URLs from Xtream playlist.")
  414.         else:
  415.             messagebox.showwarning("No URLs", "Playlist loaded, but no stream URLs found.")
  416.  
  417.     except Exception as e:
  418.         messagebox.showerror("Error", str(e))
  419. def search_urls():
  420.     query = search_entry.get().strip().lower()
  421.     if not query:
  422.         return
  423.     result_box.tag_remove("highlight", "1.0", END)
  424.    
  425.     lines = result_box.get("1.0", END).splitlines()
  426.     for i, line in enumerate(lines, 1):
  427.         if query in line.lower():
  428.             result_box.tag_add("highlight", f"{i}.0", f"{i}.end")
  429.  
  430.     result_box.tag_config("highlight", background="yellow", foreground="black")
  431.  
  432. def clear_search():
  433.     search_entry.delete(0, END)
  434.     result_box.tag_remove("highlight", "1.0", END)
  435.  
  436. def scrape_directory_media(url):
  437.     """
  438.    Scrape media URLs from subdirectories of the given URL.
  439.    :param url: The base URL to start scraping from.
  440.    """
  441.     global media_urls
  442.     result_box.delete("1.0", END)
  443.     media_urls.clear()
  444.  
  445.     def extract_directories(soup, base_url):
  446.         """
  447.        Extract directory links from the page.
  448.        :param soup: BeautifulSoup object of the page.
  449.        :param base_url: Base URL to resolve relative paths.
  450.        :return: List of directory URLs.
  451.        """
  452.         directories = []
  453.         for a_tag in soup.find_all('a', href=True):
  454.             href = a_tag['href']
  455.             if href.endswith("/") and not href.startswith("#"):  # Subdirectory link
  456.                 full_href = urljoin(base_url, href)
  457.                 if full_href != base_url:  # Avoid infinite loops
  458.                     directories.append(full_href)
  459.         return directories
  460.  
  461.     def extract_media_urls(soup, base_url):
  462.         """
  463.        Extract media URLs from the page.
  464.        :param soup: BeautifulSoup object of the page.
  465.        :param base_url: Base URL to resolve relative paths.
  466.        :return: Set of media URLs.
  467.        """
  468.         media_links = set()
  469.         for tag in soup.find_all(['img', 'video', 'source', 'a']):
  470.             src = tag.get('src') or tag.get('href')
  471.             if src:
  472.                 full_url = urljoin(base_url, src)
  473.                 parsed = urlparse(full_url)
  474.                 ext = os.path.splitext(parsed.path)[1].lower()
  475.                 if ext in image_exts + video_exts:
  476.                     media_links.add(full_url)
  477.         return media_links
  478.  
  479.     try:
  480.         # Fetch the base URL content
  481.         response = requests.get(url, timeout=10)
  482.         if response.status_code != 200:
  483.             messagebox.showerror("Error", f"Failed to fetch {url} (Status Code: {response.status_code})")
  484.             return
  485.  
  486.         soup = BeautifulSoup(response.text, 'html.parser')
  487.  
  488.         # Step 1: Extract all subdirectories
  489.         directories = extract_directories(soup, url)
  490.  
  491.         # Step 2: Scrape media URLs from each subdirectory
  492.         found_media = False
  493.         for directory in directories:
  494.             try:
  495.                 dir_response = requests.get(directory, timeout=10)
  496.                 if dir_response.status_code == 200:
  497.                     dir_soup = BeautifulSoup(dir_response.text, 'html.parser')
  498.                     media_links = extract_media_urls(dir_soup, directory)
  499.                     if media_links:
  500.                         found_media = True
  501.                         for media_url in media_links:
  502.                             if media_url not in media_urls:
  503.                                 media_urls.append(media_url)
  504.                                 result_box.insert(END, media_url + "\n")
  505.             except Exception as e:
  506.                 print(f"Error scraping directory {directory}: {e}")
  507.  
  508.         if not found_media:
  509.             messagebox.showinfo("Info", "No media URLs found in subdirectories.")
  510.         else:
  511.             messagebox.showinfo("Success", f"{len(media_urls)} media URL(s) found!")
  512.  
  513.     except Exception as e:
  514.         messagebox.showerror("Error", str(e))
  515.    
  516. # GUI Setup
  517. root = Tk()
  518. root.title("Najeeb Scrape Media Downloader + Batch Support")
  519. root.geometry("965x700")
  520. #root.configure(bg="#2c3e50")
  521. root.iconbitmap(icon_path)
  522.  
  523. Label(root, text="Najeeb Downloader Enter URL Picture And Video(any site or platform):").pack(pady=5)
  524. search_frame = Frame(root)
  525. search_frame.pack(pady=5)
  526.  
  527. search_entry = Entry(search_frame, width=40)
  528. search_entry.pack(side=LEFT, padx=5)
  529. Button(search_frame, text="Search", command=search_urls, bg="lightblue").pack(side=LEFT, padx=5)
  530. url_entry = Entry(search_frame, width=100)
  531. url_entry.pack(pady=5)
  532.  
  533. frame_buttons = Frame(root)
  534. frame_buttons.pack(pady=5)
  535.  
  536. Button(frame_buttons, text="Scrape Media", command=process_url, bg="lightgreen", width=20).pack(side=LEFT, padx=5)
  537. Button(frame_buttons, text="Browse URL File", command=browse_url_file, bg="lightyellow", width=20).pack(side=LEFT, padx=5)
  538. Button(frame_buttons, text="Download All URLs", command=download_selected, bg="lightblue", width=20).pack(side=LEFT, padx=5)
  539. Button(frame_buttons, text="Download Selected URL", command=download_selected_line, bg="orange", width=20).pack(side=LEFT, padx=5)
  540. Button(frame_buttons, text="Save URLs to File", command=save_urls_to_file, bg="lightgray", width=20).pack(side=LEFT, padx=5)
  541. Button(frame_buttons, text="Stop Download", command=lambda: stop_download(), bg="red", width=20).pack(side=LEFT, padx=5)
  542.  
  543. frame_button = Frame(root)
  544. frame_button.pack(pady=5)
  545.  
  546. Button(frame_button, text="Scrape All Links", command=scrape_all_button, bg="#e0c3fc", width=20).pack(side=LEFT, padx=5)
  547. Button(frame_button, text="Open in VLC", command=open_in_vlc, bg="#c1f0c1", width=20).pack(side=LEFT, padx=5)
  548. Button(frame_button, text="Preview Image", command=preview_image_popup, bg="#f0c1c1", width=20).pack(side=LEFT, padx=5)
  549. Button(frame_button, text="Load Online M3U", command=load_online_m3u, bg="#c9f2ff", width=20).pack(side=LEFT, padx=5)
  550. Button(frame_button, text="Scrape Xtream M3U", command=scrape_xtream_m3u_url, bg="#fff0b3", width=20).pack(side=LEFT, padx=5)
  551. Button(frame_button, text="Load M3U File", command=load_m3u_file, bg="#d0f0fd", width=20).pack(side=LEFT, padx=5)
  552.  
  553.  
  554. #Label(root, text="Editable Media URL List:").pack(pady=10)
  555. result_frame = Frame(root)
  556. result_frame.pack(pady=5)
  557.  
  558. scrollbar = Scrollbar(result_frame)
  559. scrollbar.pack(side=RIGHT, fill=Y)
  560.  
  561. result_box = Text(result_frame, height=28, width=124, yscrollcommand=scrollbar.set)
  562. result_box.pack(side=LEFT, fill=BOTH)
  563.  
  564. scrollbar.config(command=result_box.yview)
  565.  
  566. frame_clear = Frame(root)
  567. frame_clear.pack(pady=5)
  568.  
  569. Button(frame_clear, text="Save Result", command=save_as_m3u, bg="#a7ffcc", width=20).pack(side=LEFT, padx=5)
  570. Button(frame_clear, text="Clear Search", command=clear_search, bg="lightgray").pack(side=LEFT, padx=2)
  571. Button(frame_clear, text="Clear URL Field", command=clear_url_field, bg="#ffd580", width=20).pack(side=LEFT, padx=5)
  572. Button(frame_clear, text="Clear Result Field", command=clear_result_box, bg="#ffb3b3", width=20).pack(side=LEFT, padx=5)
  573. # Add a button for scraping subdirectories
  574. Button(frame_clear, text="Scrape Subdirectories", command=lambda: scrape_directory_media(url_entry.get().strip()), bg="#ffcccb", width=20).pack(side=LEFT, padx=5)
  575.  
  576. root.mainloop()
  577.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement