Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import time
- import bs4
- import re
- from selenium import webdriver
- from bs4 import BeautifulSoup as soup
- driver = webdriver.Firefox()
- url="https://paperity.org/search/?q="
- #b3="&show=25&sortBy=relevance"
- b1=input("enter text \n")
- b2=b1.replace(" ","+")
- #print(b2)
- print("\n\n")
- url=url+b2
- driver.get(url)
- time.sleep(2)
- refs=driver.page_source
- pagesoup=soup(refs,"html.parser")
- #containers=pagesoup.findAll("div",{"class":'result-item-content'})
- containers=pagesoup.findAll("a",{"href":re.compile('/p/.*')})
- #print(containers)
- i=0
- u="https://paperity.org"
- for container in containers :
- try :
- if(len(container.text)>20):
- i=i+1
- # print(i)
- if(i%2!=0):
- driver.get(u + container['href'])
- refs=driver.page_source
- s=soup(refs,"html.parser")
- c=s.find("blockquote")
- print(c.text)
- print("\n")
- except :
- continue
- driver.close()
- ===========================================================================================
- import time
- import bs4
- import re
- from selenium import webdriver
- from bs4 import BeautifulSoup as soup
- driver = webdriver.Firefox()
- url="https://dblp.org/search?q="
- #b3="&show=25&sortBy=relevance"
- b1=input("enter text \n")
- b2=b1.replace(" ","+")
- #print(b2)
- print("\n\n")
- url=url+b2
- driver.get(url)
- '''
- time.sleep(2)
- refs=driver.page_source
- pagesoup=soup(refs,"html.parser")
- #containers=pagesoup.findAll("div",{"class":'result-item-content'})
- containers=pagesoup.findAll("a",{"href":re.compile('/pii/.*')})
- #print(containers)
- i=0
- u="https://www.sciencedirect.com"
- for container in containers :
- if(len(container.text)>20):
- i=i+1
- # print(i)
- if(i%2!=0):
- driver.get(u + container['href'])
- refs=driver.page_source
- s=soup(refs,"html.parser")
- c=s.find("div",{"id":"abstracts"})
- print(c.text)
- print("\n")
- driver.close()
- '''
- ================================================================
- import time
- import bs4
- import re
- from selenium import webdriver
- from bs4 import BeautifulSoup as soup
- driver = webdriver.Firefox()
- url='https://arxiv.org/search/?query='
- b1=input("enter text \n")
- b2=b1.replace(" ","+")
- b3='&searchtype=all&source=header'
- #print(b2)
- print("\n\n")
- url=url+b2+b3
- driver.get(url)
- time.sleep(2)
- refs=driver.page_source
- pagesoup=soup(refs,"html.parser")
- #containers=pagesoup.findAll("div",{"class":'main-section'})
- containers=pagesoup.findAll("span",{"class":"abstract-full has-text-grey-dark mathjax"})
- #print(containers[0].a['href'])
- #print("reached contA")
- i=0
- #@print(containers)
- #u="https://www.scienceopen.com"
- for container in containers :
- # print("for loop")
- try :
- if(len(container.text)>200):
- # i=i+1
- # print(i)
- #if(i%2!=0):
- # driver.get(u + container['href'])
- # refs=driver.page_source
- # s=soup(refs,"html.parser")
- # c=s.find("div",{"itemprop":"description"})
- print(container.text)
- print("\n")
- except :
- continue
- driver.close()
Add Comment
Please, Sign In to add comment