Changeset 40086


Ignore:
Timestamp:
02/25/17 01:36:28 (6 years ago)
Author:
obi
Message:

fix openload

Location:
titan/mediathek
Files:
1 deleted
4 edited

Legend:

Unmodified
Added
Removed
  • titan/mediathek/localhoster/hoster.sh

    r40077 r40086  
    99ARCH=`cat /etc/.arch`
    1010BOX=`cat /etc/model`
     11USERAGENT='Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.4.7.3000 Chrome/30.0.1599.101 Safari/537.36'
    1112debuglevel=`cat /mnt/config/titan.cfg | grep debuglevel | cut -d"=" -f2`
    12 curlbin="curl -k -s -L --cookie /mnt/network/cookies --cookie-jar /mnt/network/cookies -A 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.4.7.3000 Chrome/30.0.1599.101 Safari/537.36'"
     13curlbin="curl -k -s -L --cookie /mnt/network/cookies --cookie-jar /mnt/network/cookies -A $USERAGENT"
    1314curlbin2='curl -k -s --cookie /mnt/network/cookies --cookie-jar /mnt/network/cookies'
    1415if [ "$debuglevel" == "99" ]; then curlbin="$curlbin -v"; fi
  • titan/mediathek/localhoster/lib/ol_gmu.py

    r39352 r40086  
    1 # -*- coding: utf-8 -*-
    2 """
    3 openload.io urlresolver plugin
    4 Copyright (C) 2015 tknorris
    5 
    6 This program is free software: you can redistribute it and/or modify
    7 it under the terms of the GNU General Public License as published by
    8 the Free Software Foundation, either version 3 of the License, or
    9 (at your option) any later version.
    10 
    11 This program is distributed in the hope that it will be useful,
    12 but WITHOUT ANY WARRANTY; without even the implied warranty of
    13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    14 GNU General Public License for more details.
    15 
    16 You should have received a copy of the GNU General Public License
    17 along with this program. If not, see <http://www.gnu.org/licenses/>.
    18 """
    19 import urllib
    20 import re
    21 import urllib2
    22 from aa_decoder import AADecoder
    23 from jjdecode import JJDecoder
    24 from HTMLParser import HTMLParser
    25 from net import Net
    26 
    27 net = Net()
    28 MAX_SIZE = 33 * 1024 * 1024
    29 MIN_SIZE = 30 * 1024 * 1024
    30 
    31 def caesar_shift(s, shift=13):
    32     s2 = ''
    33     for c in s:
    34         if c.isalpha():
    35             limit = 90 if c <= 'Z' else 122
    36             new_code = ord(c) + shift
    37             if new_code > limit:
    38                 new_code -= 26
    39             s2 += chr(new_code)
    40         else:
    41             s2 += c
    42     return s2
    43 
    44 def unpack(html):
    45     strings = re.findall('{\s*var\s+a\s*=\s*"([^"]+)', html)
    46     shifts = re.findall('\)\);}\((\d+)\)', html)
    47     for s, shift in zip(strings, shifts):
    48         s = caesar_shift(s, int(shift))
    49         s = urllib.unquote(s)
    50         for i, replace in enumerate(['j', '_', '__', '___']):
    51             s = s.replace(str(i), replace)
    52         html += '<script>%s</script>' % (s)
    53     return html
    54 
    55 def get_media_url(url):
    56     try:
    57         HTTP_HEADER = {
    58             'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0',
    59             'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    60             'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
    61             'Accept-Encoding': 'none',
    62             'Accept-Language': 'en-US,en;q=0.8',
    63             'Referer': url}  # 'Connection': 'keep-alive'
    64 
    65         html = net.http_GET(url, headers=HTTP_HEADER).content
    66         try: html = html.encode('utf-8')
    67         except: pass
    68         html = unpack(html)
    69        
    70         decodes = []
    71         hidden_id = ''
    72         for match in re.finditer('<script[^>]*>(.*?)</script>', html, re.DOTALL):
    73             decode = ''
    74             encoded = match.group(1)
    75             match = re.search("(゚ω゚ノ.*?\('_'\);)", encoded, re.DOTALL)
    76             if match:
    77                 decode = AADecoder(match.group(1)).decode()
    78                 decodes.append(decode)
    79                
    80             match = re.search('(.=~\[\].*\(\);)', encoded, re.DOTALL)
    81             if match:
    82                 decode = JJDecoder(match.group(1)).decode()
    83                 decodes.append(decode)
    84            
    85             match = re.search(r'=\s*\$\("#([^"]+)"', decode, re.DOTALL | re.IGNORECASE)
    86             if match:
    87                 hidden_id = match.group(1)
    88 
    89         if not hidden_id:
    90             print 'Hidden ID Not Found. Deleted?'
    91 
    92         hidden_url = ''       
    93         match = re.search(r'<span[^>]+id\s*="%s"[^>]*>([^<]+)' % (hidden_id), html, re.DOTALL | re.IGNORECASE)
    94         if match:
    95             hidden_url = match.group(1)
    96         else:
    97             print 'Stream Url Not Found. Deleted?'
    98 
    99         if not decodes:
    100             print 'No Encoded Section Found. Deleted?'
    101        
    102         hiddenurl = HTMLParser().unescape(hidden_url)
    103         magic_number = 0
    104         for decode in decodes:
    105             match = re.search('charCodeAt\(\d+\)\s*\+\s*(\d+)\)', decode, re.DOTALL | re.I)
    106             if match:
    107                 magic_number = match.group(1)
    108                 break
    109 
    110         s = []
    111         for idx, i in enumerate(hiddenurl):
    112             j = ord(i)
    113             if (j >= 33 & j <= 126):
    114                 j = 33 + ((j + 14) % 94)
    115                
    116             if idx == len(hiddenurl) - 1:
    117                 j += int(magic_number)
    118             s.append(chr(j))
    119         res = ''.join(s)
    120        
    121         videoUrl = 'https://openload.co/stream/{0}?mime=true'.format(res)
    122         dtext = videoUrl.replace('https', 'http')
    123         headers = {'User-Agent': HTTP_HEADER['User-Agent']}
    124         req = urllib2.Request(dtext, None, headers)
    125         res = urllib2.urlopen(req)
    126         videourl = res.geturl()
    127         if MIN_SIZE < int(res.headers['Content-Length']) < MAX_SIZE:
    128             print 'Openload.co resolve failed. Pigeons? (%s)' % (res.headers['Content-Length'])
    129         res.close()
    130        
    131         return videourl
    132     except Exception as e:
    133 #        common.log_utils.log_debug('Exception during openload resolve parse: %s' % e)
    134         raise
    135 
    136     print 'Unable to resolve openload.io link. Filelink not found.'
     1#    urlresolver XBMC Addon
     2#    Copyright (C) 2011, 2016 t0mm0, tknorris
     3#
     4#    This program is free software: you can redistribute it and/or modify
     5#    it under the terms of the GNU General Public License as published by
     6#    the Free Software Foundation, either version 3 of the License, or
     7#    (at your option) any later version.
     8#
     9#    This program is distributed in the hope that it will be useful,
     10#    but WITHOUT ANY WARRANTY; without even the implied warranty of
     11#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     12#    GNU General Public License for more details.
     13#
     14#    You should have received a copy of the GNU General Public License
     15#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
  • titan/mediathek/localhoster/openload.py

    r39608 r40086  
    77import lib.ol_gmu as ol_gmu
    88import lib.common as common
     9import json
    910
    1011#OL_SOURCE = 'https://offshoregit.com/tvaresolvers/ol_gmu.py'
     
    5354                    f.write(new_py)
    5455        except Exception as e:
    55              print 'Exception during openload code retrieve:'
     56             print 'errormsg=Exception during openload code retrieve:'
    5657#            common.log_utils.log_warning('Exception during openload code retrieve: %s' % e)
    5758           
    5859    def get_media_url(self, host, media_id):
    5960        video_url = ""
     61#        js_data = self.__get_json(GET_URL.format(media_id=media_id))
     62#        print "js_data: %s" % (js_data)
    6063        try:
    61             self._auto_update(self.get_setting('url'), OL_PATH, self.get_setting('key'))
     64 #           self._auto_update(self.get_setting('url'), OL_PATH, self.get_setting('key'))
    6265            reload(ol_gmu)
    6366            return ol_gmu.get_media_url(self.get_url(host, media_id))  # @UndefinedVariable
    6467        except Exception as e:
     68#            print "Exception during openload resolve parse: %s" % (e)
    6569#            common.log_utils.log_debug('Exception during openload resolve parse: %s' % (e))
    6670            try:
     
    7074            #except ResolverError:
    7175            except Exception as e:
    72                 print "raise"
     76                print "errormsg=streamlink not found"
    7377               # raise
    7478           
     
    98102            status, msg = e
    99103            if status == 403:
     104                print "errormsg=%s" % (e)
    100105                return
    101 #            else:
     106            else:
     107                print "errormsg=%s" % (msg)
    102108#                raise ResolverError(msg)
    103109       
  • titan/mediathek/localparser_secret/meinkino.sh

    r40069 r40086  
    182182
    183183#       $curlbin2 -H "X-Requested-With: XMLHttpRequest" -X POST  --referer http://meinkino.to/film/the-zero-theorem-stream-id8795 http://meinkino.to/geturl/8795
    184         $curlbin2 -H "X-Requested-With: XMLHttpRequest" -X POST  --referer $URL/$PAGE $TMPURL -o $TMP/cache.$FILENAME.2
     184        $curlbin -H "X-Requested-With: XMLHttpRequest" -X POST  --referer $URL/$PAGE $TMPURL -o $TMP/cache.$FILENAME.2
    185185
    186186        cat $TMP/cache.$FILENAME.2 | sed 's/{"url":"/\nlink_url":"/g' | sed 's/link_/\nlink_/g' | grep ^link_ | sed 's/"alternative":{"/\nlink_/g' >$TMP/cache.$FILENAME.3
     
    214214                                fi
    215215                        fi
    216                         LINE="$TITLE#$SRC $SRC play '$NEWPAGE'#http://atemio.dyndns.tv/mediathek/menu/$PIC.jpg#$PIC.jpg#$NAME#111"
     216                        LINE="$TITLE#$SRC $SRC play '$NEWPAGE|User-Agent=$USERAGENT&Referer=$URL/$PAGE'#http://atemio.dyndns.tv/mediathek/menu/$PIC.jpg#$PIC.jpg#$NAME#111"
    217217                        echo "$LINE" >> $TMP/$FILENAME.list
    218218                fi
Note: See TracChangeset for help on using the changeset viewer.