Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import csv
- import urllib
- import urllib.request
- from bs4 import BeautifulSoup
- # import requests
- # theurl = "https://elinux.org/Beagleboard:BeagleBoneBlack#BeagleBone_Black_Description"
- # thepage = urllib.request.urlopen(theurl)
- # soup = BeautifulSoup(thepage, "html.parser")
- theurl = "https://elinux.org/Beagleboard:BeagleBoneBlack"
- thepage = urllib.request.urlopen(theurl)
- soup = BeautifulSoup(thepage, "html.parser")
- f = csv.writer(open('SecondWebParseII.csv', 'w'))
- f.writerow(['Name', 'Link'])
- print(soup.title.text)
- for link in soup.findAll('a'):
- links = link.get('href')
- for name in soup.findAll('a'):
- names = name.get('href')
- f.writerow([names, links])
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement