Ignore:
Timestamp:
11/19/16 16:32:54 (7 years ago)
Author:
obi
Message:

fix vidzi

File:
1 edited

Legend:

Unmodified
Added
Removed
  • titan/mediathek/localhoster/lib/helpers.py

    r39352 r39354  
    2424#from urlresolver.resolver import ResolverError
    2525import common
     26from net import Net
    2627
    2728def get_hidden(html, form_id=None, index=None, include_submit=True):
     
    149150        try: source_list.sort(key=lambda x: int(x[0]), reverse=True)
    150151        except:
    151             common.log_utils.log_debug('Scrape sources sort failed |int(x[0])|')
     152            test = 1
     153#            common.log_utils.log_debug('Scrape sources sort failed |int(x[0])|')
    152154            try: source_list.sort(key=lambda x: int(x[0][:-1]), reverse=True)
    153155            except:
    154                 common.log_utils.log_debug('Scrape sources sort failed |int(x[0][:-1])|')
     156                 test = 2
     157#                common.log_utils.log_debug('Scrape sources sort failed |int(x[0][:-1])|')
    155158
    156159    return source_list
     
    164167
    165168    result_blacklist = list(set(result_blacklist + ['.smil']))  # smil(not playable) contains potential sources, only blacklist when called from here
    166     net = common.Net()
     169    net = Net()
    167170    parsed_url = urlparse(url)
    168171    headers = {'User-Agent': common.FF_USER_AGENT,
     
    170173
    171174    response = net.http_GET(url, headers=headers)
    172     response_headers = response.get_headers(as_dict=True)
     175#    response_headers = response.get_headers(as_dict=True)
     176    response_headers = response.get_headers()
     177
    173178    headers.update({'Referer': url})
    174     cookie = response_headers.get('Set-Cookie', None)
    175     if cookie:
    176         headers.update({'Cookie': cookie})
     179#    cookie = response_headers.get('Set-Cookie', None)
     180#    if cookie:
     181#        headers.update({'Cookie': cookie})
    177182    html = response.content
    178183
    179184    source_list = scrape_sources(html, result_blacklist)
    180185    source = pick_source(source_list)
    181     return source + append_headers(headers)
     186    return source# + append_headers(headers)
Note: See TracChangeset for help on using the changeset viewer.