PEP8 style fixes and other small style fixes

I used autopep8 on some files and did carefully check the changes.
I ignored E501,E302,E231,E225,E222,E221,E241,E203 in my search, and at
least E501 on any autopep8 run.

Other style fixes not related to PEP8:
* Only use new-style classes. I don't think the usage of old-style
  classes was voluntary. Old-style classes are removed in Python 3.
* Convert an if/else to a one-liner in mediawiki, change docstring style
  change to a comment something that wasn't really appropriate for a
  docstring.
* Unneeded first if condition in meteofrance
This commit is contained in:
Laurent Bachelier 2012-03-14 03:24:02 +01:00
commit 006e97a8be
99 changed files with 441 additions and 350 deletions

View file

@ -298,7 +298,7 @@ class AuMBackend(BaseBackend, ICapMessages, ICapMessagesPost, ICapDating, ICapCh
yield thread.root
except BrowserUnavailable, e:
self.logger.debug('No messages, browser is unavailable: %s' % e)
pass # don't care about waiting
pass # don't care about waiting
def set_message_read(self, message):
if message.id == self.MAGIC_ID_BASKET:

View file

@ -24,7 +24,7 @@ import Image
class CaptchaError(Exception): pass
class Tile:
class Tile(object):
hash = {
'bc8d52d96058478a6def26226145d53b': 'A',
'c62ecdfddb72b2feaed96cd9fe7c2802': 'A',
@ -111,7 +111,7 @@ class Tile:
print 'hash: %s' % checksum
raise CaptchaError()
class Captcha:
class Captcha(object):
def __init__(self, f):
self.img = Image.open(f)
self.w, self.h = self.img.size
@ -152,7 +152,7 @@ class Captcha:
s += tile.letter
return s
class Decoder:
class Decoder(object):
def __init__(self):
self.hash = {}

View file

@ -26,7 +26,8 @@ from weboob.tools.ordereddict import OrderedDict
from weboob.capabilities.contact import Contact as _Contact, ProfileNode
from weboob.tools.misc import html2text
class FieldBase:
class FieldBase(object):
def __init__(self, key, key2=None):
self.key = key
self.key2 = key2
@ -34,18 +35,22 @@ class FieldBase:
def get_value(self, value, consts):
raise NotImplementedError()
class FieldStr(FieldBase):
def get_value(self, profile, consts):
return html2text(unicode(profile[self.key])).strip()
class FieldBool(FieldBase):
def get_value(self, profile, consts):
return bool(int(profile[self.key]))
class FieldDist(FieldBase):
def get_value(self, profile, consts):
return '%.2f km' % float(profile[self.key])
class FieldIP(FieldBase):
def get_hostname(self, s):
try:
@ -59,6 +64,7 @@ class FieldIP(FieldBase):
s += ' (first %s)' % self.get_hostname(profile[self.key2])
return s
class FieldProfileURL(FieldBase):
def get_value(self, profile, consts):
id = int(profile[self.key])
@ -67,10 +73,12 @@ class FieldProfileURL(FieldBase):
else:
return ''
class FieldPopu(FieldBase):
def get_value(self, profile, consts):
return unicode(profile['popu'][self.key])
class FieldPopuRatio(FieldBase):
def get_value(self, profile, consts):
v1 = float(profile['popu'][self.key])
@ -80,15 +88,18 @@ class FieldPopuRatio(FieldBase):
else:
return '%.2f' % (v1 / v2)
class FieldOld(FieldBase):
def get_value(self, profile, consts):
birthday = parse_dt(profile[self.key])
return int((datetime.now() - birthday).days / 365.25)
class FieldSplit(FieldBase):
def get_value(self, profile, consts):
return [html2text(s).strip() for s in profile[self.key].split(self.key2) if len(s.strip()) > 0]
class FieldBMI(FieldBase):
def __init__(self, key, key2, fat=False):
FieldBase.__init__(self, key, key2)
@ -100,7 +111,7 @@ class FieldBMI(FieldBase):
if height == 0 or weight == 0:
return ''
bmi = (weight/float(pow(height/100.0, 2)))
bmi = (weight / float(pow(height / 100.0, 2)))
if not self.fat:
return bmi
elif bmi < 15.5:
@ -114,6 +125,7 @@ class FieldBMI(FieldBase):
else:
return 'obese'
class FieldFlags(FieldBase):
def get_value(self, profile, consts):
i = int(profile[self.key])
@ -123,6 +135,7 @@ class FieldFlags(FieldBase):
labels.append(html2text(d['label']).strip())
return labels
class FieldList(FieldBase):
def get_value(self, profile, consts):
i = int(profile[self.key])
@ -131,6 +144,7 @@ class FieldList(FieldBase):
return html2text(d['label']).strip()
return ''
class Contact(_Contact):
TABLE = OrderedDict((
('_info', OrderedDict((
@ -247,9 +261,9 @@ class Contact(_Contact):
if node.flags & node.SECTION:
result += u'\t' * level + node.label + '\n'
for sub in node.value.itervalues():
result += print_node(sub, level+1)
result += print_node(sub, level + 1)
else:
if isinstance(node.value, (tuple,list)):
if isinstance(node.value, (tuple, list)):
value = ', '.join(unicode(v) for v in node.value)
elif isinstance(node.value, float):
value = '%.2f' % node.value

View file

@ -124,9 +124,9 @@ class PriorityConnection(Optimization):
browser = AuMBrowser('%s@%s' % (name, self.config['domain']), proxy=self.browser.proxy)
try:
browser.register(password= password,
sex= 1, #slut
birthday_d= random.randint(1,28),
birthday_m= random.randint(1,12),
sex= 1, # slut
birthday_d= random.randint(1, 28),
birthday_m= random.randint(1, 12),
birthday_y= random.randint(1975, 1990),
zipcode= 75001,
country= 'fr',

View file

@ -49,7 +49,7 @@ class ProfilesWalker(Optimization):
def start(self):
self.walk_cron = self.sched.repeat(60, self.enqueue_profiles)
self.view_cron = self.sched.schedule(randint(10,40), self.view_profile)
self.view_cron = self.sched.schedule(randint(10, 40), self.view_profile)
return True
def stop(self):
@ -77,7 +77,7 @@ class ProfilesWalker(Optimization):
try:
id = self.profiles_queue.pop()
except KeyError:
return # empty queue
return # empty queue
try:
with self.browser:
@ -101,4 +101,4 @@ class ProfilesWalker(Optimization):
print e
finally:
if self.view_cron is not None:
self.view_cron = self.sched.schedule(randint(10,40), self.view_profile)
self.view_cron = self.sched.schedule(randint(10, 40), self.view_profile)

View file

@ -22,7 +22,6 @@ from weboob.tools.test import BackendTest
from weboob.tools.browser import BrowserUnavailable
__all__ = ['AuMTest']

View file

@ -21,6 +21,7 @@ from weboob.tools.capabilities.gallery.genericcomicreader import GenericComicRea
__all__ = ['BatotoBackend']
class BatotoBackend(GenericComicReaderBackend):
NAME = 'batoto'
DESCRIPTION = 'Batoto manga reading website'
@ -31,4 +32,4 @@ class BatotoBackend(GenericComicReaderBackend):
ID_REGEXP = r'[^/]+/[^/]+'
URL_REGEXP = r'.+batoto.(?:com|net)/read/_/(%s).+' % ID_REGEXP
ID_TO_URL = 'http://www.batoto.net/read/_/%s'
PAGES = { URL_REGEXP: DisplayPage }
PAGES = {URL_REGEXP: DisplayPage}

View file

@ -19,8 +19,9 @@
from weboob.tools.capabilities.gallery.genericcomicreader import GenericComicReaderTest
class BatotoTest(GenericComicReaderTest):
BACKEND = 'batoto'
def test_download(self):
return self._test_download('26287/yurumates_ch4_by_primitive-scans')

View file

@ -70,4 +70,3 @@ class BoursoramaBackend(BaseBackend, ICapBank):
with self.browser:
for coming in self.browser.get_coming_operations(account):
yield coming

View file

@ -32,7 +32,7 @@ __all__ = ['Boursorama']
class Boursorama(BaseBrowser):
DOMAIN = 'www.boursorama.com'
PROTOCOL = 'https'
ENCODING = None # refer to the HTML encoding
ENCODING = None # refer to the HTML encoding
PAGES = {
'.*connexion.phtml.*': LoginPage,
'.*/comptes/synthese.phtml': AccountsList,
@ -84,11 +84,11 @@ class Boursorama(BaseBrowser):
self.location(account._link_id)
operations = self.page.get_operations()
# load last month as well
target = date.today() - relativedelta( months = 1 )
target = date.today() - relativedelta(months=1)
self.location(account._link_id + ("&month=%d&year=%d" % (target.month, target.year)))
operations += self.page.get_operations()
# and the month before, just in case you're greedy
target = date.today() - relativedelta( months = 2 )
target = date.today() - relativedelta(months=2)
self.location(account._link_id + ("&month=%d&year=%d" % (target.month, target.year)))
operations += self.page.get_operations()
for index, op in enumerate(operations):

View file

@ -23,7 +23,8 @@ from .account_history import AccountHistory
from .accounts_list import AccountsList
from .login import LoginPage
class AccountPrelevement(AccountsList): pass
class AccountPrelevement(AccountsList):
pass
__all__ = ['LoginPage',
'AccountsList',

View file

@ -22,8 +22,8 @@
from weboob.capabilities.bank import Account
from weboob.tools.browser import BasePage
class AccountsList(BasePage):
class AccountsList(BasePage):
def on_loaded(self):
pass
@ -34,34 +34,34 @@ class AccountsList(BasePage):
for tr in div.getiterator('tr'):
account = Account()
for td in tr.getiterator('td'):
if td.attrib.get('class', '') == 'account-cb':
break
if td.attrib.get('class', '') == 'account-cb':
break
elif td.attrib.get('class', '') == 'account-name':
a = td.find('a')
account.label = a.text
account._link_id = a.get('href', '')
elif td.attrib.get('class', '') == 'account-name':
a = td.find('a')
account.label = a.text
account._link_id = a.get('href', '')
elif td.attrib.get('class', '') == 'account-number':
id = td.text
id = id.strip(u' \n\t')
account.id = id
elif td.attrib.get('class', '') == 'account-number':
id = td.text
id = id.strip(u' \n\t')
account.id = id
elif td.attrib.get('class', '') == 'account-total':
span = td.find('span')
if span == None:
balance = td.text
else:
balance = span.text
balance = balance.strip(u' \n\t€+').replace(',','.').replace(' ','')
if balance != "":
account.balance = float(balance)
else:
account.balance = 0.0
elif td.attrib.get('class', '') == 'account-total':
span = td.find('span')
if span == None:
balance = td.text
else:
balance = span.text
balance = balance.strip(u' \n\t€+').replace(',', '.').replace(' ', '')
if balance != "":
account.balance = float(balance)
else:
account.balance = 0.0
else:
# because of some weird useless <tr>
if account.id != 0:
l.append(account)
# because of some weird useless <tr>
if account.id != 0:
l.append(account)
return l

View file

@ -26,7 +26,7 @@ __all__ = ['LoginPage']
class LoginPage(BasePage):
def on_loaded(self):
pass
pass
# for td in self.document.getroot().cssselect('td.LibelleErreur'):
# if td.text is None:
# continue

View file

@ -35,7 +35,7 @@ __all__ = ['BPBrowser']
class BPBrowser(BaseBrowser):
DOMAIN = 'voscomptesenligne.labanquepostale.fr'
PROTOCOL = 'https'
ENCODING = None # refer to the HTML encoding
ENCODING = None # refer to the HTML encoding
PAGES = {r'.*wsost/OstBrokerWeb/loginform.*' : LoginPage,
r'.*authentification/repositionnerCheminCourant-identif.ea' : repositionnerCheminCourant,
r'.*authentification/initialiser-identif.ea' : Initident,

View file

@ -54,7 +54,7 @@ class AccountHistory(BasePage):
for t in tmp:
if r.search(t.text):
amount = t.text
amount = ''.join( amount.replace('.', '').replace(',', '.').split() )
amount = ''.join(amount.replace('.', '').replace(',', '.').split())
if amount[0] == "-":
operation.amount = -float(amount[1:])
else:

View file

@ -26,6 +26,7 @@ __all__ = ['CanalplusVideo']
class CanalplusVideo(BaseVideo):
swf_player = False
@classmethod
def id2url(cls, _id):
return 'http://service.canal-plus.com/video/rest/getVideosLiees/cplus/%s' % _id

View file

@ -33,8 +33,10 @@ from lxml import etree
from datetime import date
from StringIO import StringIO
__all__ = ['CmbBackend']
class CmbBackend(BaseBackend, ICapBank):
NAME = 'cmb'
MAINTAINER = 'Johann Broudin'
@ -83,7 +85,6 @@ class CmbBackend(BaseBackend, ICapBank):
)
]
cookie = None
headers = {
'User-Agent':
@ -167,13 +168,13 @@ class CmbBackend(BaseBackend, ICapBank):
balance = td[1].text
balance = balance.replace(',','.').replace(u"\xa0",'')
balance = balance.replace(',', '.').replace(u"\xa0", '')
account.balance = float(balance)
span = td[3].xpath('a/span')
if len(span):
coming = span[0].text.replace(' ','').replace(',','.')
coming = coming.replace(u"\xa0",'')
coming = span[0].text.replace(' ', '').replace(',', '.')
coming = coming.replace(u"\xa0", '')
account.coming = float(coming)
else:
account.coming = NotAvailable
@ -248,7 +249,7 @@ class CmbBackend(BaseBackend, ICapBank):
operation.date = date(*reversed([int(x) for x in d]))
div = td[2].xpath('div')
label = div[0].xpath('a')[0].text.replace('\n','')
label = div[0].xpath('a')[0].text.replace('\n', '')
operation.raw = unicode(' '.join(label.split()))
for pattern, _type, _label in self.LABEL_PATTERNS:
mm = pattern.match(operation.raw)
@ -260,12 +261,11 @@ class CmbBackend(BaseBackend, ICapBank):
amount = td[3].text
if amount.count(',') != 1:
amount = td[4].text
amount = amount.replace(',','.').replace(u'\xa0','')
amount = amount.replace(',', '.').replace(u'\xa0', '')
operation.amount = float(amount)
else:
amount = amount.replace(',','.').replace(u'\xa0','')
amount = amount.replace(',', '.').replace(u'\xa0', '')
operation.amount = - float(amount)
i += 1
yield operation

View file

@ -24,6 +24,7 @@ from weboob.capabilities.bank import Account
from .base import CragrBasePage
from weboob.capabilities.bank import Transaction
def clean_amount(amount):
"""
Removes weird characters and converts to a float
@ -34,6 +35,7 @@ def clean_amount(amount):
matches = re.findall('^(-?[0-9]+\.[0-9]{2}).*$', data)
return float(matches[0]) if (matches) else 0.0
class AccountsList(CragrBasePage):
def get_list(self):
@ -109,7 +111,7 @@ class AccountsList(CragrBasePage):
select_name is the name of the select field to analyze
"""
if not self.is_transfer_page():
return False
return False
source_accounts = {}
source_account_options = self.document.xpath('/html/body//form//select[@name="%s"]/option' % select_name)
for option in source_account_options:
@ -212,7 +214,7 @@ class AccountsList(CragrBasePage):
year = today.year
return date(year, month, day)
def get_history(self, start_index = 0, start_offset = 0):
def get_history(self, start_index=0, start_offset=0):
"""
Returns the history of a specific account. Note that this function
expects the current page to be the one dedicated to this history.

View file

@ -26,6 +26,7 @@ from .pages import LoginPage, LoginErrorPage, AccountsPage, UserSpacePage, Opera
__all__ = ['CreditMutuelBrowser']
# Browser
class CreditMutuelBrowser(BaseBrowser):
PROTOCOL = 'https'
@ -36,11 +37,11 @@ class CreditMutuelBrowser(BaseBrowser):
'https://www.creditmutuel.fr/groupe/fr/identification/default.cgi': LoginErrorPage,
'https://www.creditmutuel.fr/.*/fr/banque/situation_financiere.cgi': AccountsPage,
'https://www.creditmutuel.fr/.*/fr/banque/espace_personnel.aspx': UserSpacePage,
'https://www.creditmutuel.fr/.*/fr/banque/mouvements.cgi.*' : OperationsPage,
'https://www.creditmutuel.fr/.*/fr/banque/nr/nr_devbooster.aspx.*' : OperationsPage,
'https://www.creditmutuel.fr/.*/fr/banque/operations_carte\.cgi.*' : OperationsPage,
'https://www.creditmutuel.fr/.*/fr/banque/BAD.*' : InfoPage,
'https://www.creditmutuel.fr/.*/fr/banque/.*Vir.*' : TransfertPage
'https://www.creditmutuel.fr/.*/fr/banque/mouvements.cgi.*': OperationsPage,
'https://www.creditmutuel.fr/.*/fr/banque/nr/nr_devbooster.aspx.*': OperationsPage,
'https://www.creditmutuel.fr/.*/fr/banque/operations_carte\.cgi.*': OperationsPage,
'https://www.creditmutuel.fr/.*/fr/banque/BAD.*': InfoPage,
'https://www.creditmutuel.fr/.*/fr/banque/.*Vir.*': TransfertPage
}
def __init__(self, *args, **kwargs):
@ -61,17 +62,17 @@ class CreditMutuelBrowser(BaseBrowser):
if not self.is_on_page(LoginPage):
self.location('https://www.creditmutuel.fr/', no_login=True)
self.page.login( self.username, self.password)
self.page.login(self.username, self.password)
if not self.is_logged() or self.is_on_page(LoginErrorPage):
raise BrowserIncorrectPassword()
self.SUB_BANKS = ['cmdv','cmcee','cmse', 'cmidf', 'cmsmb', 'cmma', 'cmmabn', 'cmc', 'cmlaco', 'cmnormandie', 'cmm']
self.SUB_BANKS = ['cmdv', 'cmcee', 'cmse', 'cmidf', 'cmsmb', 'cmma', 'cmmabn', 'cmc', 'cmlaco', 'cmnormandie', 'cmm']
self.getCurrentSubBank()
def get_accounts_list(self):
if not self.is_on_page(AccountsPage):
self.location('https://www.creditmutuel.fr/%s/fr/banque/situation_financiere.cgi'%self.currentSubBank)
self.location('https://www.creditmutuel.fr/%s/fr/banque/situation_financiere.cgi' % self.currentSubBank)
return self.page.get_list()
def get_account(self, id):
@ -121,13 +122,13 @@ class CreditMutuelBrowser(BaseBrowser):
def transfer(self, account, to, amount, reason=None):
# access the transfer page
transfert_url = 'WI_VPLV_VirUniSaiCpt.asp?RAZ=ALL&Cat=6&PERM=N&CHX=A'
self.location('https://%s/%s/fr/banque/%s'%(self.DOMAIN, self.currentSubBank, transfert_url))
self.location('https://%s/%s/fr/banque/%s' % (self.DOMAIN, self.currentSubBank, transfert_url))
# fill the form
self.select_form(name='FormVirUniSaiCpt')
self['IDB'] = [account[-1]]
self['IDB'] = [account[-1]]
self['ICR'] = [to[-1]]
self['MTTVIR'] = '%s' % str(amount).replace('.',',')
self['MTTVIR'] = '%s' % str(amount).replace('.', ',')
if reason != None:
self['LIBDBT'] = reason
self['LIBCRT'] = reason

View file

@ -27,7 +27,7 @@ from ..tools import url2id
from .index import DLFPPage
class RSSComment(DLFPPage):
def on_loaded(self):
def on_loaded(self):
pass
class Content(object):

View file

@ -21,6 +21,7 @@ from weboob.tools.capabilities.gallery.genericcomicreader import GenericComicRea
__all__ = ['EatmangaBackend']
class EatmangaBackend(GenericComicReaderBackend):
NAME = 'eatmanga'
DESCRIPTION = 'EatManga manga reading website'
@ -31,4 +32,4 @@ class EatmangaBackend(GenericComicReaderBackend):
ID_REGEXP = r'[^/]+/[^/]+'
URL_REGEXP = r'.+eatmanga.com/(?:index.php/)?Manga-Scan/(%s).+' % ID_REGEXP
ID_TO_URL = 'http://www.eatmanga.com/index.php/Manga-Scan/%s'
PAGES = { URL_REGEXP: DisplayPage }
PAGES = {URL_REGEXP: DisplayPage}

View file

@ -19,8 +19,9 @@
from weboob.tools.capabilities.gallery.genericcomicreader import GenericComicReaderTest
class EatmangaTest(GenericComicReaderTest):
BACKEND = 'eatmanga'
def test_download(self):
return self._test_download('Glass-Mask/Glass-Mask-Vol-031')

View file

@ -96,4 +96,4 @@ class EHentaiBackend(BaseBackend, ICapGallery):
OBJECTS = {
EHentaiGallery: fill_gallery,
EHentaiImage: fill_image }
EHentaiImage: fill_image}

View file

@ -61,7 +61,7 @@ class EHentaiBrowser(BaseBrowser):
assert self.is_on_page(GalleryPage)
i = 0
while True:
n = self.page._next_page_link();
n = self.page._next_page_link()
for img in self.page.image_pages():
yield EHentaiImage(img)
@ -104,4 +104,3 @@ class EHentaiBrowser(BaseBrowser):
# necessary in order to reach the fjords
self.home()

View file

@ -27,6 +27,7 @@ from .gallery import EHentaiGallery
__all__ = ['GalleryPage', 'ImagePage', 'IndexPage', 'HomePage', 'LoginPage']
class LoginPage(BasePage):
def is_logged(self):
success_p = self.document.xpath(
@ -38,9 +39,11 @@ class LoginPage(BasePage):
print 'not logged on'
return False
class HomePage(BasePage):
pass
class IndexPage(BasePage):
def iter_galleries(self):
lines = self.document.xpath('//table[@class="itg"]//tr[@class="gtr0" or @class="gtr1"]')
@ -50,6 +53,7 @@ class IndexPage(BasePage):
title = a.text.strip()
yield EHentaiGallery(re.search('(?<=/g/)\d+/[\dabcdef]+', url).group(0), title=title)
class GalleryPage(BasePage):
def image_pages(self):
return self.document.xpath('//div[@class="gdtm"]//a/attribute::href')
@ -102,7 +106,7 @@ class GalleryPage(BasePage):
except IndexError:
return None
class ImagePage(BasePage):
def get_url(self):
return self.document.xpath('//div[@class="sni"]/a/img/attribute::src')[0]

View file

@ -20,6 +20,7 @@
from weboob.tools.test import BackendTest
class EHentaiTest(BackendTest):
BACKEND = 'ehentai'
@ -35,4 +36,3 @@ class EHentaiTest(BackendTest):
self.backend.fillobj(img, ('url',))
self.assertTrue(v.url and v.url.startswith('http://'), 'URL for first image in gallery "%s" not found: %s' % (v.id, img.url))
self.backend.browser.openurl(img.url)

View file

@ -68,7 +68,7 @@ class FourChanBackend(BaseBackend, ICapMessages):
thread = Thread(id)
thread.title = _thread.filename
thread.root = Message(thread=thread,
id=0, # root message
id=0, # root message
title=_thread.filename,
sender=_thread.author,
receivers=None,

View file

@ -30,9 +30,11 @@ __all__ = ['ValidationPage', 'HomePage', 'HistoryPage', 'StoryPage']
class ValidationPage(BasePage):
pass
class HomePage(BasePage):
pass
class Author(object):
(UNKNOWN,
MALE,
@ -45,6 +47,7 @@ class Author(object):
self.email = None
self.description = None
class Story(object):
def __init__(self, id):
self.id = id
@ -54,6 +57,7 @@ class Story(object):
self.author = None
self.body = None
class HistoryPage(BasePage):
def get_numerous(self):
td = self.parser.select(self.document.getroot(), 'td.t0', 1)
@ -89,6 +93,7 @@ class HistoryPage(BasePage):
yield story
story = None
class StoryPage(BasePage):
def get_story(self):
p_tags = self.document.getroot().xpath('//body/p')
@ -179,4 +184,3 @@ class AuthorPage(BasePage):
if author.description.startswith(u'0 récit '):
self.logger.warning('This author does not have published any story.')
return author

View file

@ -18,22 +18,24 @@
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
def id2url(_id):
"return an url from an id"
regexp2 = re.compile("(\w+).([0-9]+).(.*$)")
match = regexp2.match(_id)
if match:
return 'http://www.20minutes.fr/%s/%s/%s' % ( match.group(1),
match.group(2),
match.group(3))
return 'http://www.20minutes.fr/%s/%s/%s' % (match.group(1),
match.group(2),
match.group(3))
else:
raise ValueError("id doesn't match")
def url2id(url):
"return an id from an url"
return url
def rssid(entry):
return url2id(entry.id)

View file

@ -32,6 +32,7 @@ from weboob.tools.browser import BaseBrowser, BasePage
__all__ = ['IzneoBackend']
class ReaderV2(BasePage):
def get_ean(self):
return self.document.xpath("//div[@id='viewer']/attribute::rel")[0]
@ -43,14 +44,15 @@ class ReaderV2(BasePage):
% ean))
for page in pages:
width = 1200 # maximum width
width = 1200 # maximum width
yield BaseImage(page['page'],
gallery=gallery,
url=("http://www.izneo.com/playerv2/%s/%s/%s/%d/%s" %
(page['expires'], page['token'], ean, width, page['page'])))
class IzneoBrowser(BaseBrowser):
PAGES = { r'http://.+\.izneo.\w+/readv2-.+': ReaderV2 }
PAGES = {r'http://.+\.izneo.\w+/readv2-.+': ReaderV2}
def iter_gallery_images(self, gallery):
self.location(gallery.url)
@ -88,7 +90,7 @@ class IzneoBackend(BaseBackend, ICapGallery):
return gallery
def fill_gallery(self, gallery, fields):
gallery.title = gallery.id
gallery.title = gallery.id
def fill_image(self, image, fields):
with self.browser:
@ -96,4 +98,4 @@ class IzneoBackend(BaseBackend, ICapGallery):
OBJECTS = {
BaseGallery: fill_gallery,
BaseImage: fill_image }
BaseImage: fill_image}

View file

@ -67,4 +67,3 @@ class LCLBackend(BaseBackend, ICapBank):
with self.browser:
for history in self.browser.get_history(account):
yield history

View file

@ -28,17 +28,18 @@ import tempfile
import math
import random
class LCLVirtKeyboard(MappedVirtKeyboard):
symbols={'0':'9da2724133f2221482013151735f033c',
'1':'873ab0087447610841ae1332221be37b',
'2':'93ce6c330393ff5980949d7b6c800f77',
'3':'b2d70c69693784e1bf1f0973d81223c0',
'4':'498c8f5d885611938f94f1c746c32978',
'5':'359bcd60a9b8565917a7bf34522052c3',
'6':'aba912172f21f78cd6da437cfc4cdbd0',
'7':'f710190d6b947869879ec02d8e851dfa',
'8':'b42cc25e1539a15f767aa7a641f3bfec',
'9':'cc60e5894a9d8e12ee0c2c104c1d5490'
symbols={'0': '9da2724133f2221482013151735f033c',
'1': '873ab0087447610841ae1332221be37b',
'2': '93ce6c330393ff5980949d7b6c800f77',
'3': 'b2d70c69693784e1bf1f0973d81223c0',
'4': '498c8f5d885611938f94f1c746c32978',
'5': '359bcd60a9b8565917a7bf34522052c3',
'6': 'aba912172f21f78cd6da437cfc4cdbd0',
'7': 'f710190d6b947869879ec02d8e851dfa',
'8': 'b42cc25e1539a15f767aa7a641f3bfec',
'9': 'cc60e5894a9d8e12ee0c2c104c1d5490'
}
url="/outil/UAUT/Clavier/creationClavier?random="
@ -66,9 +67,11 @@ class LCLVirtKeyboard(MappedVirtKeyboard):
code+=self.get_symbol_code(self.symbols[c])
return code
class SkipPage(BasePage):
pass
class LoginPage(BasePage):
def myXOR(self,value,seed):
s=''
@ -120,6 +123,7 @@ class LoginPage(BasePage):
return True
return False
class AccountsPage(BasePage):
def get_list(self):
l = []
@ -145,6 +149,7 @@ class AccountsPage(BasePage):
l.append(account)
return l
class AccountHistoryPage(BasePage):
def get_operations(self,account):
operations = []
@ -194,5 +199,3 @@ class AccountHistoryPage(BasePage):
operation.amount=amount
operations.append(operation)
return operations

View file

@ -20,6 +20,7 @@
from weboob.tools.capabilities.messages.genericArticle import GenericNewsPage, remove_from_selector_list, drop_comments, try_drop_tree, try_remove_from_selector_list
class ArticlePage(GenericNewsPage):
"ArticlePage object for inrocks"
def on_loaded(self):
@ -51,7 +52,6 @@ class ArticlePage(GenericNewsPage):
a.drop_tree()
div.drop_tree()
# This part of the article seems manually generated.
for crappy_title in self.parser.select(element_body, 'p strong'):
if crappy_title.text == 'LIRE AUSSI :' or crappy_title.text == 'LIRE AUSSI:':

View file

@ -20,11 +20,12 @@
from weboob.tools.capabilities.messages.genericArticle import GenericNewsPage
class FlashActuPage(GenericNewsPage):
"ArticlePage object for inrocks"
def on_loaded(self):
self.main_div = self.document.getroot()
self.element_title_selector = "h1"
self.element_title_selector = "h1"
self.element_author_selector = "div.name>span"
self.element_body_selector = "h2"
@ -32,4 +33,3 @@ class FlashActuPage(GenericNewsPage):
element_body = self.get_element_body()
element_body.tag = "div"
return self.parser.tostring(element_body)

View file

@ -20,10 +20,11 @@
from weboob.tools.capabilities.messages.genericArticle import GenericNewsPage
class SimplePage(GenericNewsPage):
"ArticlePage object for minutes20"
def on_loaded(self):
self.main_div = self.document.getroot()
self.element_author_selector = "div.mna-signature"
self.element_body_selector = "#article"

View file

@ -19,11 +19,12 @@
from weboob.tools.capabilities.messages.genericArticle import GenericNewsPage, try_remove_from_selector_list
class SpecialPage(GenericNewsPage):
"ArticlePage object for inrocks"
def on_loaded(self):
self.main_div = self.document.getroot()
self.element_title_selector = "h2"
self.element_title_selector = "h2"
self.element_author_selector = "div.name>span"
self.element_body_selector = ".block-text"
@ -32,4 +33,3 @@ class SpecialPage(GenericNewsPage):
try_remove_from_selector_list(self.parser, element_body, ['div'])
element_body.tag = "div"
return self.parser.tostring(element_body)

View file

@ -18,22 +18,24 @@
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
def id2url(_id):
"return an url from an id"
regexp2 = re.compile("(\w+).([0-9]+).(.*$)")
match = regexp2.match(_id)
if match:
return 'http://www.20minutes.fr/%s/%s/%s' % ( match.group(1),
match.group(2),
match.group(3))
return 'http://www.20minutes.fr/%s/%s/%s' % (match.group(1),
match.group(2),
match.group(3))
else:
raise ValueError("id doesn't match")
def url2id(url):
"return an id from an url"
return url
def rssid(entry):
return url2id(entry.id)

View file

@ -21,6 +21,7 @@ from weboob.tools.capabilities.gallery.genericcomicreader import GenericComicRea
__all__ = ['MangafoxBackend']
class MangafoxBackend(GenericComicReaderBackend):
NAME = 'mangafox'
DESCRIPTION = 'Manga Fox manga reading website'
@ -31,4 +32,4 @@ class MangafoxBackend(GenericComicReaderBackend):
ID_REGEXP = r'[^/]+/[^/]+(?:/[^/]+)?'
URL_REGEXP = r'.+mangafox.com/manga/(%s).*' % ID_REGEXP
ID_TO_URL = 'http://www.mangafox.com/manga/%s'
PAGES = { r'http://.+\.mangafox.\w+/manga/[^/]+/[^/]+/([^/]+/)?(.+\.html)?': DisplayPage }
PAGES = {r'http://.+\.mangafox.\w+/manga/[^/]+/[^/]+/([^/]+/)?(.+\.html)?': DisplayPage}

View file

@ -20,7 +20,9 @@
from weboob.tools.capabilities.gallery.genericcomicreader import GenericComicReaderTest
class MangafoxTest(GenericComicReaderTest):
BACKEND = 'mangafox'
def test_download(self):
return self._test_download('glass_no_kamen/v02/c000')

View file

@ -21,6 +21,7 @@ from weboob.tools.capabilities.gallery.genericcomicreader import GenericComicRea
__all__ = ['MangahereBackend']
class MangahereBackend(GenericComicReaderBackend):
NAME = 'mangahere'
DESCRIPTION = 'Manga Here manga reading website'
@ -31,4 +32,4 @@ class MangahereBackend(GenericComicReaderBackend):
ID_REGEXP = r'[^/]+/[^/]+/[^/]+'
URL_REGEXP = r'.+mangahere.com/manga/(%s).+' % ID_REGEXP
ID_TO_URL = 'http://www.mangahere.com/manga/%s'
PAGES = { URL_REGEXP: DisplayPage }
PAGES = {URL_REGEXP: DisplayPage}

View file

@ -19,8 +19,9 @@
from weboob.tools.capabilities.gallery.genericcomicreader import GenericComicReaderTest
class MangahereTest(GenericComicReaderTest):
BACKEND = 'mangahere'
def test_download(self):
return self._test_download('glass_no_kamen/v02/c000')

View file

@ -19,8 +19,10 @@
from weboob.tools.capabilities.gallery.genericcomicreader import GenericComicReaderBackend, DisplayPage
__all__ = ['MangareaderBackend']
class MangareaderBackend(GenericComicReaderBackend):
NAME = 'mangareader'
DESCRIPTION = 'MangaReader manga reading website'
@ -31,4 +33,4 @@ class MangareaderBackend(GenericComicReaderBackend):
ID_REGEXP = r'[^/]+/[^/]+'
URL_REGEXP = r'.+mangareader.net/(%s).+' % ID_REGEXP
ID_TO_URL = 'http://www.mangareader.net/%s'
PAGES = { r'http://.+\.mangareader.net/.+': DisplayPage } # oh well
PAGES = {r'http://.+\.mangareader.net/.+': DisplayPage} # oh well

View file

@ -19,8 +19,9 @@
from weboob.tools.capabilities.gallery.genericcomicreader import GenericComicReaderTest
class MangareaderTest(GenericComicReaderTest):
BACKEND = 'mangareader'
def test_download(self):
return self._test_download('glass-mask/3')

View file

@ -29,6 +29,7 @@ from .browser import MediawikiBrowser
__all__ = ['MediawikiBackend']
class MediawikiBackend(BaseBackend, ICapContent):
NAME = 'mediawiki'
MAINTAINER = u'Clément Schreiner'
@ -42,6 +43,7 @@ class MediawikiBackend(BaseBackend, ICapContent):
ValueBackendPassword('password', label='Password', default=''))
BROWSER = MediawikiBrowser
def create_default_browser(self):
username = self.config['username'].get()
if len(username) > 0:

View file

@ -21,7 +21,6 @@ from urlparse import urlsplit
import urllib
import datetime
from weboob.tools.browser import BaseBrowser, BrowserIncorrectPassword
from weboob.capabilities.content import Revision
@ -37,6 +36,7 @@ __all__ = ['MediawikiBrowser']
class APIError(Exception):
pass
# Browser
class MediawikiBrowser(BaseBrowser):
ENCODING = 'utf-8'
@ -63,8 +63,6 @@ class MediawikiBrowser(BaseBrowser):
'intoken': 'edit',
}
result = self.API_get(data)
pageid = result['query']['pages'].keys()[0]
if pageid == "-1": # Page does not exist
@ -83,8 +81,7 @@ class MediawikiBrowser(BaseBrowser):
}
result = self.API_get(data)
pageid = result['query']['pages'].keys()[0]
return result['query']['pages'][str(pageid)][_type+'token']
return result['query']['pages'][str(pageid)][_type + 'token']
def set_wiki_source(self, content, message=None, minor=False):
if len(self.username) > 0 and not self.is_logged():
@ -138,7 +135,9 @@ class MediawikiBrowser(BaseBrowser):
self.API_post(data)
def iter_wiki_revisions(self, page, nb_entries):
'''Yield 'Revision' objects for the last <nb_entries> revisions of the specified page.'''
"""
Yield 'Revision' objects for the last <nb_entries> revisions of the specified page.
"""
if len(self.username) > 0 and not self.is_logged():
self.login()
data = {'action': 'query',
@ -158,14 +157,11 @@ class MediawikiBrowser(BaseBrowser):
rev_content.revision = str(rev['revid'])
rev_content.author = rev['user']
rev_content.timestamp = datetime.datetime.strptime(rev['timestamp'], '%Y-%m-%dT%H:%M:%SZ')
if rev.has_key('minor'):
rev_content.minor = True
else:
rev_content.minor = False
rev_content.minor = 'minor' in rev
yield rev_content
def home(self):
'''We don't need to change location, we're using the JSON API here.'''
# We don't need to change location, we're using the JSON API here.
pass
def check_result(self, result):
@ -173,17 +169,20 @@ class MediawikiBrowser(BaseBrowser):
raise APIError('%s' % result['error']['info'])
def API_get(self, data):
'''Submit a GET request to the website
The JSON data is parsed and returned as a dictionary'''
"""
Submit a GET request to the website
The JSON data is parsed and returned as a dictionary
"""
data['format'] = 'json'
result = simplejson.loads(self.readurl(self.buildurl(self.apiurl, **data)), 'utf-8')
self.check_result(result)
return result
def API_post(self, data):
'''Submit a POST request to the website
The JSON data is parsed and returned as a dictionary'''
"""
Submit a POST request to the website
The JSON data is parsed and returned as a dictionary
"""
data['format'] = 'json'
result = simplejson.loads(self.readurl(self.apiurl, urllib.urlencode(data)), 'utf-8')
self.check_result(result)

View file

@ -18,7 +18,6 @@
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.browser import BasePage
from weboob.capabilities.weather import Forecast, Current, City
@ -69,10 +68,10 @@ class WeatherPage(BasePage):
Return the city from the forecastpage.
"""
for div in self.document.getiterator('div'):
if div.attrib.has_key("class") and div.attrib.get("class") == "choix":
if div.attrib.get("class", "") == "choix":
for strong in div.getiterator("strong"):
city_name=strong.text +" "+ strong.tail.replace("(","").replace(")","")
city_id=self.url.split("/")[-1]
city_name = strong.text + " " + strong.tail.replace("(", "").replace(")", "")
city_id = self.url.split("/")[-1]
return City(city_id, city_name)
@ -84,5 +83,5 @@ class CityPage(BasePage):
city_name = li.text_content()
for children in li.getchildren():
city_id = children.attrib.get("href").split("/")[-1]
mcity = City( city_id, city_name)
mcity = City(city_id, city_name)
yield mcity

View file

@ -23,6 +23,7 @@ from weboob.tools.capabilities.messages.GenericBackend import GenericNewspaperBa
from .browser import Newspaper20minutesBrowser
from .tools import rssid
class Newspaper20minutesBackend(GenericNewspaperBackend, ICapMessages):
MAINTAINER = 'Julien Hebert'
EMAIL = 'juke@free.fr'
@ -34,4 +35,3 @@ class Newspaper20minutesBackend(GenericNewspaperBackend, ICapMessages):
BROWSER = Newspaper20minutesBrowser
RSS_FEED = 'http://www.20minutes.fr/rss/20minutes.xml'
RSSID = rssid

View file

@ -21,6 +21,7 @@
from weboob.tools.capabilities.messages.genericArticle import NoAuthorElement, try_remove, NoneMainDiv
from .simple import SimplePage
class ArticlePage(SimplePage):
"ArticlePage object for minutes20"
def on_loaded(self):
@ -37,9 +38,8 @@ class ArticlePage(SimplePage):
else:
try_remove(self.parser, element_body, "div.mna-tools")
try_remove(self.parser, element_body, "div.mna-comment-call")
try :
try:
element_body.remove(self.get_element_author())
except NoAuthorElement:
pass
return self.parser.tostring(element_body)

View file

@ -20,6 +20,7 @@
from weboob.tools.capabilities.messages.genericArticle import GenericNewsPage
class SimplePage(GenericNewsPage):
"ArticlePage object for minutes20"
def on_loaded(self):
@ -27,4 +28,3 @@ class SimplePage(GenericNewsPage):
self.element_title_selector = "h1"
self.element_author_selector = "div.mna-signature"
self.element_body_selector = "div.mna-body"

View file

@ -18,24 +18,27 @@
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
def id2url(_id):
"return an url from an id"
regexp2 = re.compile("(\w+).([0-9]+).(.*$)")
match = regexp2.match(_id)
if match:
return 'http://www.20minutes.fr/%s/%s/%s' % ( match.group(1),
match.group(2),
match.group(3))
return 'http://www.20minutes.fr/%s/%s/%s' % (match.group(1),
match.group(2),
match.group(3))
else:
raise ValueError("id doesn't match")
def url2id(url):
"return an id from an url"
regexp = re.compile("http://www.20minutes.fr/(\w+)/([0-9]+)/(.*$)")
match = regexp.match(url)
return '%s.%d.%s' % (match.group(1), int(match.group(2)), match.group(3))
def rssid(entry):
return url2id(entry.id)

View file

@ -20,10 +20,10 @@
from weboob.tools.test import BackendTest
class NewsfeedTest(BackendTest):
BACKEND = 'newsfeed'
def test_newsfeed(self):
for message in self.backend.iter_unread_messages():
pass

View file

@ -35,8 +35,8 @@ __all__ = ['VideoPage']
class ForbiddenVideo(Exception):
pass
class VideoPage(BasePage):
class VideoPage(BasePage):
def get_video(self, video=None):
_id = to_unicode(self.group_dict['id'])
if video is None:
@ -84,4 +84,3 @@ class VideoPage(BasePage):
video.url = values['url']
return video

View file

@ -67,4 +67,3 @@ class AloesBackend(BaseBackend, ICapBook):
def search_books(self, _string):
raise NotImplementedError()

View file

@ -42,7 +42,7 @@ class AloesBrowser(BaseBrowser):
}
def __init__(self, baseurl, *args, **kwargs):
self.BASEURL=baseurl
self.BASEURL = baseurl
BaseBrowser.__init__(self, *args, **kwargs)
def is_logged(self):
@ -60,10 +60,9 @@ class AloesBrowser(BaseBrowser):
no_login=True)
if not self.page.login(self.username, self.password) or \
not self.is_logged() or \
(self.is_on_page(LoginPage) and self.page.is_error()) :
(self.is_on_page(LoginPage) and self.page.is_error()):
raise BrowserIncorrectPassword()
def get_rented_books_list(self):
if not self.is_on_page(RentedPage):
self.location('%s://%s/index.aspx?IdPage=45' \
@ -73,6 +72,6 @@ class AloesBrowser(BaseBrowser):
def get_booked_books_list(self):
if not self.is_on_page(BookedPage):
self.location('%s://%s/index.aspx?IdPage=44' \
self.location('%s://%s/index.aspx?IdPage=44' \
% (self.PROTOCOL, self.BASEURL))
return self.page.get_list()

View file

@ -24,6 +24,7 @@ from weboob.tools.browser import BasePage
__all__ = ['ComposePage', 'ConfirmPage']
class ConfirmPage(BasePage):
def on_loaded(self):
pass
@ -33,9 +34,9 @@ class ComposePage(BasePage):
phone_regex = re.compile('^(\+33|0033|0)(6|7)(\d{8})$')
def on_loaded(self):
#Deal with bad encoding... for ie6 ...
# Deal with bad encoding... for ie6...
response = self.browser.response()
response.set_data(response.get_data().decode('utf-8', 'ignore') )
response.set_data(response.get_data().decode('utf-8', 'ignore'))
self.browser.set_response(response)
def get_nb_remaining_free_sms(self):
@ -46,12 +47,12 @@ class ComposePage(BasePage):
if self.phone_regex.match(receiver) is None:
raise CantSendMessage(u'Invalid receiver: %s' % receiver)
listetel = ",,"+ receiver
listetel = ",," + receiver
#Fill the form
self.browser.select_form(name="formulaire")
self.browser.new_control("hidden", "autorize",{'value':''})
self.browser.new_control("textarea", "msg", {'value':''})
self.browser.new_control("hidden", "autorize", {'value': ''})
self.browser.new_control("textarea", "msg", {'value': ''})
self.browser.set_all_readonly(False)

View file

@ -43,8 +43,8 @@ class PastealaconBackend(BaseBackend, BasePasteBackend):
BROWSER = PastealaconBrowser
EXPIRATIONS = {
24*3600: 'd',
24*3600*30: 'm',
24 * 3600: 'd',
24 * 3600 * 30: 'm',
False: 'f',
}
@ -83,7 +83,7 @@ class PastealaconBackend(BaseBackend, BasePasteBackend):
self.browser.fill_paste(paste)
return paste
def post_paste(self, paste, max_age = None):
def post_paste(self, paste, max_age=None):
if max_age is not None:
expiration = self.get_closest_expiration(max_age)
else:

View file

@ -21,6 +21,7 @@ from weboob.tools.capabilities.gallery.genericcomicreader import GenericComicRea
__all__ = ['SimplyreaditBackend']
class SimplyreaditBackend(GenericComicReaderBackend):
NAME = 'simplyreadit'
DESCRIPTION = 'SimplyReadIt manga reading website'
@ -30,4 +31,4 @@ class SimplyreaditBackend(GenericComicReaderBackend):
ID_TO_URL = 'http://www.simplyread.it/reader/read/%s'
ID_REGEXP = r'[^/]+(?:/[^/]+)*'
URL_REGEXP = r'.+simplyread.it/reader/read/(%s)/page/.+' % ID_REGEXP
PAGES = { r'http://.+\.simplyread.it/reader/read/.+': DisplayPage }
PAGES = {r'http://.+\.simplyread.it/reader/read/.+': DisplayPage}

View file

@ -19,8 +19,9 @@
from weboob.tools.capabilities.gallery.genericcomicreader import GenericComicReaderTest
class SimplyreaditTest(GenericComicReaderTest):
BACKEND = 'simplyreadit'
def test_download(self):
return self._test_download('bonnouji/en/1/3')

View file

@ -21,18 +21,20 @@
import hashlib
import Image
class TileError(Exception):
def __init__(self, msg, tile = None):
def __init__(self, msg, tile=None):
Exception.__init__(self, msg)
self.tile = tile
class Captcha:
class Captcha(object):
def __init__(self, file, infos):
self.inim = Image.open(file)
self.infos = infos
self.nbr = int(infos["nblignes"])
self.nbc = int(infos["nbcolonnes"])
(self.nx,self.ny) = self.inim.size
(self.nx, self.ny) = self.inim.size
self.inmat = self.inim.load()
self.map = {}
@ -77,7 +79,7 @@ class Captcha:
self.map[num] = tile
class Tile:
class Tile(object):
hash = {'ff1441b2c5f90703ef04e688e399aca5': 1,
'53d7f3dfd64f54723b231fc398b6be57': 2,
'5bcba7fa2107ba9a606e8d0131c162eb': 3,
@ -116,4 +118,3 @@ class Tile:
def display(self):
print self.checksum()

View file

@ -21,7 +21,9 @@
from .accounts_list import AccountsList
from .login import LoginPage, BadLoginPage
class AccountPrelevement(AccountsList): pass
class AccountPrelevement(AccountsList):
pass
__all__ = ['LoginPage',
'BadLoginPage',

View file

@ -53,11 +53,11 @@ class LoginPage(BasePage):
infos_xml = etree.XML(infos_data)
infos = {}
for el in ("cryptogramme", "nblignes", "nbcolonnes"):
infos[el] = infos_xml.find(el).text
infos[el] = infos_xml.find(el).text
infos["grille"] = ""
for g in infos_xml.findall("grille"):
infos["grille"] += g.text + ","
infos["grille"] += g.text + ","
infos["keyCodes"] = infos["grille"].split(",")
url = base_url + '/cvcsgenimage?modeClavier=0&cryptogramme=' + infos["cryptogramme"]
@ -79,5 +79,6 @@ class LoginPage(BasePage):
self.browser['cryptocvcs'] = infos["cryptogramme"]
self.browser.submit()
class BadLoginPage(BasePage):
pass

View file

@ -16,6 +16,3 @@
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.

View file

@ -63,4 +63,3 @@ class VideoPage(BasePage):
video.url = video_file_urls[0]
return video

View file

@ -44,6 +44,7 @@ class YoupornBackend(BaseBackend, ICapVideo):
return self.browser.get_video(_id)
SORTBY = ['relevance', 'rating', 'views', 'time']
def search_videos(self, pattern=None, sortby=ICapVideo.SEARCH_RELEVANCE, nsfw=False, max_results=None):
if not nsfw:
return set()

View file

@ -47,6 +47,7 @@ class LoginPage(BasePage):
self.browser['Passwd'] = password
self.browser.submit()
class LoginRedirectPage(BasePage):
pass
@ -64,6 +65,7 @@ class BaseYoutubePage(BasePage):
else:
return True
class ForbiddenVideoPage(BaseYoutubePage):
def on_loaded(self):
element = self.parser.select(self.document.getroot(), '.yt-alert-content', 1)
@ -78,11 +80,13 @@ class VerifyAgePage(BaseYoutubePage):
self.browser.select_form(predicate=lambda form: form.attrs.get('id', '') == 'confirm-age-form')
self.browser.submit()
class VerifyControversyPage(BaseYoutubePage):
def on_loaded(self):
self.browser.select_form(predicate=lambda form: 'verify_controversy' in form.attrs.get('action', ''))
self.browser.submit()
class VideoPage(BaseYoutubePage):
AVAILABLE_FORMATS = [38, 37, 45, 22, 43, 35, 34, 18, 6, 5, 17, 13]
FORMAT_EXTENSIONS = {
@ -91,7 +95,7 @@ class VideoPage(BaseYoutubePage):
18: 'mp4',
22: 'mp4',
37: 'mp4',
38: 'video', # You actually don't know if this will be MOV, AVI or whatever
38: 'video', # You actually don't know if this will be MOV, AVI or whatever
43: 'webm',
45: 'webm',
}