joelnazarene

arxiv

Mar 31st, 2019
109
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 1.11 KB | None | 0 0
  1. import time
  2. import bs4
  3. import re
  4. from selenium import webdriver
  5. from bs4 import BeautifulSoup as soup
  6. driver = webdriver.Firefox()
  7. url='https://arxiv.org/search/?query='
  8.  
  9. b1=input("enter text \n")
  10. b2=b1.replace(" ","+")
  11. b3='&searchtype=all&source=header'
  12. #print(b2)
  13. print("\n\n")
  14. url=url+b2+b3
  15. driver.get(url)
  16. time.sleep(2)
  17. refs=driver.page_source
  18. pagesoup=soup(refs,"html.parser")
  19. #containers=pagesoup.findAll("div",{"class":'main-section'})
  20. containers=pagesoup.findAll("span",{"class":"abstract-full has-text-grey-dark mathjax"})
  21. #print(containers[0].a['href'])
  22. #print("reached contA")
  23. i=0
  24. #@print(containers)
  25. #u="https://www.scienceopen.com"
  26. for container in containers :
  27.  # print("for loop")
  28.   try :
  29.    if(len(container.text)>200):
  30.    #     i=i+1
  31.        
  32.        # print(i)
  33.         #if(i%2!=0):
  34.        
  35.         #   driver.get(u + container['href'])
  36.          #  refs=driver.page_source
  37.         #   s=soup(refs,"html.parser")
  38.           # c=s.find("div",{"itemprop":"description"})
  39.         print(container.text)
  40.         print("\n")
  41.   except :
  42.         continue
  43. driver.close()
Add Comment
Please, Sign In to add comment