NanoBarrios_9

Balandro

Nov 23rd, 2024
40
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 10.90 KB | None | 0 0
  1. # -*- coding: utf-8 -*-
  2.  
  3. from platformcode import config, logger, platformtools
  4. from core.item import Item
  5. from core import httptools, scrapertools, tmdb, servertools
  6.  
  7.  
  8. host = 'https://www.estrenoscinesaa.com/'
  9.  
  10.  
  11. def item_configurar_proxies(item):
  12.     color_list_proxies = config.get_setting('channels_list_proxies_color', default='red')
  13.  
  14.     color_avis = config.get_setting('notification_avis_color', default='yellow')
  15.     color_exec = config.get_setting('notification_exec_color', default='cyan')
  16.  
  17.     context = []
  18.  
  19.     tit = '[COLOR %s]Información proxies[/COLOR]' % color_avis
  20.     context.append({'title': tit, 'channel': 'helper', 'action': 'show_help_proxies'})
  21.  
  22.     if config.get_setting('channel_estrenoscinesaa_proxies', default=''):
  23.         tit = '[COLOR %s][B]Quitar los proxies del canal[/B][/COLOR]' % color_list_proxies
  24.         context.append({'title': tit, 'channel': item.channel, 'action': 'quitar_proxies'})
  25.  
  26.     tit = '[COLOR %s]Ajustes categoría proxies[/COLOR]' % color_exec
  27.     context.append({'title': tit, 'channel': 'actions', 'action': 'open_settings'})
  28.  
  29.     plot = 'Es posible que para poder utilizar este canal necesites configurar algún proxy, ya que no es accesible desde algunos países/operadoras.'
  30.     plot += '[CR]Si desde un navegador web no te funciona el sitio ' + host + ' necesitarás un proxy.'
  31.     return item.clone( title = '[B]Configurar proxies a usar ...[/B]', action = 'configurar_proxies', folder=False, context=context, plot=plot, text_color='red' )
  32.  
  33. def quitar_proxies(item):
  34.     from modules import submnuctext
  35.     submnuctext._quitar_proxies(item)
  36.     return True
  37.  
  38. def configurar_proxies(item):
  39.     from core import proxytools
  40.     return proxytools.configurar_proxies_canal(item.channel, host)
  41.  
  42.  
  43. def do_downloadpage(url):
  44.     hay_proxies = False
  45.     if config.get_setting('channel_estrenoscinesaa_proxies', default=''): hay_proxies = True
  46.  
  47.     if not url.startswith(host):
  48.         data = httptools.downloadpage(url).data
  49.     else:
  50.         if hay_proxies:
  51.             data = httptools.downloadpage_proxy('estrenoscinesaa', url).data
  52.         else:
  53.             data = httptools.downloadpage(url).data
  54.  
  55.     return data
  56.  
  57.  
  58. def acciones(item):
  59.     logger.info()
  60.     itemlist = []
  61.  
  62.     itemlist.append(item.clone( channel='submnuctext', action='_test_webs', title='Test Web del canal [COLOR yellow][B] ' + host + '[/B][/COLOR]',
  63.                                 from_channel='estrenoscinesaa', folder=False, text_color='chartreuse' ))
  64.  
  65.     itemlist.append(item_configurar_proxies(item))
  66.  
  67.     platformtools.itemlist_refresh()
  68.  
  69.     return itemlist
  70.  
  71.  
  72. def mainlist(item):
  73.     # ~ descartadas series solo hay 22
  74.     return mainlist_pelis(item)
  75.  
  76. def mainlist_pelis(item):
  77.     logger.info()
  78.     itemlist = []
  79.  
  80.     itemlist.append(item.clone( action='acciones', title= '[B]Acciones[/B] [COLOR plum](si no hay resultados)[/COLOR]', text_color='goldenrod' ))
  81.  
  82.     itemlist.append(item.clone( title = 'Buscar película ...', action = 'search', search_type = 'movie', text_color = 'deepskyblue' ))
  83.  
  84.     itemlist.append(item.clone( title = 'Catálogo', action = 'list_all', url = host + 'movies/', search_type = 'movie' ))
  85.  
  86.     itemlist.append(item.clone( title = 'Netflix', action = 'list_all', url = host + 'genre/netflix/', search_type = 'movie', text_color='moccasin' ))
  87.     itemlist.append(item.clone( title = 'Marvel', action = 'list_all', url = host + 'genre/marvel/', search_type = 'movie', text_color='moccasin' ))
  88.     itemlist.append(item.clone( title = 'D.C.', action = 'list_all', url = host + 'genre/d-c/', search_type = 'movie', text_color='moccasin' ))
  89.     itemlist.append(item.clone( title = 'Star wars', action = 'list_all', url = host + 'genre/starwars/', search_type = 'movie', text_color='moccasin' ))
  90.  
  91.     itemlist.append(item.clone( title = 'Por género', action = 'generos', search_type = 'movie' ))
  92.  
  93.     return itemlist
  94.  
  95.  
  96. def generos(item):
  97.     logger.info()
  98.     itemlist = []
  99.  
  100.     data = do_downloadpage(host)
  101.  
  102.     bloque = scrapertools.find_single_match(data, '<ul class="genres(.*?)</ul>')
  103.  
  104.     matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)">(.*?)</a>\s*<i>(.*?)</i>')
  105.  
  106.     for url, title, count in matches:
  107.         title = title.replace('&amp;', '&')
  108.  
  109.         if '/genre/d-c/' in url: continue
  110.         elif '/genre/marvel/' in url: continue
  111.         elif '/genre/netflix/' in url: continue
  112.         elif '/genre/starwars/' in url: continue
  113.         elif '/sci-fi-fantasy/' in url: continue # son series
  114.  
  115.         if count: title = '[COLOR deepskyblue]' + title + '[/COLOR] (' + count + ')'
  116.         else: title = '[COLOR deepskyblue]' + title + '[/COLOR]'
  117.  
  118.         itemlist.append(item.clone( action = 'list_all', title = title, url = url ))
  119.  
  120.     return itemlist
  121.  
  122.  
  123. def list_all(item):
  124.     logger.info()
  125.     itemlist = []
  126.  
  127.     data = do_downloadpage(item.url)
  128.  
  129.     hasta_data = '<div class="pagination">' if '<div class="pagination">' in data else '<nav class="genres">'
  130.  
  131.     bloque = scrapertools.find_single_match(data, '<h2>Añadido recientemente(.*?)' + hasta_data)
  132.     if not bloque: bloque = scrapertools.find_single_match(data, '<h2>Añadido recientemente(.*?)$')
  133.  
  134.     matches = scrapertools.find_multiple_matches(bloque, '<article id="post-(.*?)</article>')
  135.  
  136.     for match in matches:
  137.         url = scrapertools.find_single_match(match, '<a href="([^"]+)')
  138.         title = scrapertools.find_single_match(match, ' alt="([^"]+)').strip()
  139.  
  140.         if not url or not title: continue
  141.  
  142.         title = title.replace('&#8211;', '').replace('&#8217;', '').replace('&#038;', '&')
  143.  
  144.         thumb = scrapertools.find_single_match(match, ' src="([^"]+)')
  145.  
  146.         year = scrapertools.find_single_match(match, '<span>(\d{4})</span>')
  147.         if not year: year = '-'
  148.  
  149.         plot = scrapertools.htmlclean(scrapertools.find_single_match(match, '<div class="texto">(.*?)</div>'))
  150.  
  151.         if '/tvshows/' in url:
  152.            if item.search_type == 'movie': continue
  153.  
  154.         itemlist.append(item.clone( action = 'findvideos', url = url, title = title, thumbnail = thumb,
  155.                                     contentType = 'movie', contentTitle = title, infoLabels = {'year': year, 'plot': plot} ))
  156.  
  157.     tmdb.set_infoLabels(itemlist)
  158.  
  159.     if itemlist:
  160.         if '<div class="pagination">' in data:
  161.             next_page = scrapertools.find_single_match(data, '<span class="current".*?' + "<a href='(.*?)'")
  162.  
  163.             if next_page:
  164.                 if '/page/' in next_page:
  165.                     itemlist.append(item.clone (url = next_page, title = 'Siguientes ...', action = 'list_all', text_color='coral' ))
  166.  
  167.     return itemlist
  168.  
  169.  
  170. def findvideos(item):
  171.     logger.info()
  172.     itemlist = []
  173.  
  174.     data = do_downloadpage(item.url)
  175.  
  176.     ses = 0
  177.  
  178.     matches = scrapertools.find_multiple_matches(data, "(?i)<div class='pframe'><iframe.*?src=(?:'|\")([^'\"]+)")
  179.  
  180.     for url in matches:
  181.         ses += 1
  182.  
  183.         if 'youtube.com' in url: continue
  184.  
  185.         servidor = servertools.get_server_from_url(url)
  186.  
  187.         if servidor and servidor != 'directo':
  188.             url = servertools.normalize_url(servidor, url)
  189.  
  190.             itemlist.append(Item( channel = item.channel, action = 'play', server = servidor, title = '', url = url, language = 'Esp' ))
  191.  
  192.     # Descarga
  193.     bloque = scrapertools.find_single_match(data, "<div id='download'(.*?)</table></div></div></div>")
  194.  
  195.     matches = scrapertools.find_multiple_matches(bloque, "<tr id='link-[^']+'>(.*?)</tr>")
  196.  
  197.     for enlace in matches:
  198.         ses += 1
  199.  
  200.         url = scrapertools.find_single_match(enlace, " href='([^']+)")
  201.  
  202.         servidor = scrapertools.find_single_match(enlace, "domain=(?:www.|dl.|)([^'.]+)")
  203.         servidor = servertools.corregir_servidor(servidor)
  204.  
  205.         if not url or not servidor: continue
  206.  
  207.         quality = 'HD'
  208.         lang = 'Esp'
  209.  
  210.         itemlist.append(Item( channel = item.channel, action = 'play', server = servidor, title = '', url = url, language = lang, quality = quality , other = 'd' ))
  211.  
  212.     if not itemlist:
  213.         if not ses == 0:
  214.             platformtools.dialog_notification(config.__addon_name, '[COLOR tan][B]Sin enlaces Soportados[/B][/COLOR]')
  215.             return
  216.  
  217.     return itemlist
  218.  
  219.  
  220. def play(item):
  221.     logger.info()
  222.     itemlist = []
  223.  
  224.     if host in item.url:
  225.         data = do_downloadpage(item.url)
  226.         url = scrapertools.find_single_match(data, '<a id="link".*?href="([^"]+)')
  227.  
  228.         if url:
  229.             servidor = servertools.get_server_from_url(url)
  230.             if servidor and servidor != 'directo':
  231.                 servidor = servertools.corregir_servidor(servidor)
  232.                 url = servertools.normalize_url(servidor, url)
  233.  
  234.                 itemlist.append(item.clone( url=url, server=servidor ))
  235.  
  236.     else:
  237.         servidor = servertools.get_server_from_url(item.url)
  238.         if servidor and servidor != 'directo':
  239.             servidor = servertools.corregir_servidor(servidor)
  240.             url = servertools.normalize_url(servidor, item.url)
  241.  
  242.             itemlist.append(item.clone( url=url, server=servidor ))
  243.  
  244.     return itemlist
  245.  
  246.  
  247. def list_search(item):
  248.     logger.info()
  249.     itemlist = []
  250.  
  251.     data = do_downloadpage(item.url)
  252.  
  253.     matches = scrapertools.find_multiple_matches(data, '<article>(.*?)</article>')
  254.  
  255.     for match in matches:
  256.         url = scrapertools.find_single_match(match, '<a href="([^"]+)')
  257.         title = scrapertools.find_single_match(match, ' alt="([^"]+)').strip()
  258.  
  259.         if not url or not title: continue
  260.  
  261.         title = title.replace('&#8211;', '').replace('&#8217;', '').replace('&#038;', '&')
  262.  
  263.         thumb = scrapertools.find_single_match(match, ' src="([^"]+)')
  264.  
  265.         year = scrapertools.find_single_match(match, '<span class="year">(\d{4})</span>')
  266.         if not year: year = '-'
  267.  
  268.         plot = scrapertools.htmlclean(scrapertools.find_single_match(match, '<div class="contenido"><p>(.*?)<p></div>'))
  269.  
  270.         if '/tvshows/' in url:
  271.            if item.search_type == 'movie': continue
  272.  
  273.         itemlist.append(item.clone( action = 'findvideos', url = url, title = title, thumbnail = thumb,
  274.                                     contentType = 'movie', contentTitle = title, infoLabels = {'year': year, 'plot': plot} ))
  275.  
  276.     tmdb.set_infoLabels(itemlist)
  277.  
  278.     if itemlist:
  279.         if '<div class="pagination">' in data:
  280.             next_page = scrapertools.find_single_match(data, '<span class="current".*?' + "<a href='(.*?)'")
  281.  
  282.             if next_page:
  283.                 if '/page/' in next_page:
  284.                     itemlist.append(item.clone (url = next_page, title = 'Siguientes ...', action = 'list_search', text_color='coral' ))
  285.  
  286.     return itemlist
  287.  
  288.  
  289. def search(item, texto):
  290.     logger.info()
  291.     try:
  292.         item.url = host + '?s=' + texto.replace(" ", "+")
  293.         return list_search(item)
  294.     except:
  295.         import sys
  296.         for line in sys.exc_info():
  297.             logger.error("%s" % line)
  298.         return []
  299.  
  300.  
Add Comment
Please, Sign In to add comment