Use flake8 if available instead of pyflakes

With flake8, we can check for more issues and ignore those who are not
real issues.

This allowed me to find genuine errors in:
- modules/boursorama/pages/account_history.py
- modules/ing/pages/login.py
- weboob/tools/application/qt/qt.py
I left one in weboob/tools/browser/browser.py for the time being.

Some PEP8 fixes on other files.
This commit is contained in:
Laurent Bachelier 2012-11-24 19:46:34 +01:00
commit 541d080c9d
18 changed files with 54 additions and 45 deletions

View file

@ -23,7 +23,6 @@ from datetime import date
import re
from weboob.tools.browser import BasePage
from weboob.capabilities.bank import Transaction
from weboob.tools.capabilities.bank.transactions import FrenchTransaction

View file

@ -25,7 +25,7 @@ from logging import warning, debug
try:
from urlparse import parse_qs
except ImportError:
from cgi import parse_qs
from cgi import parse_qs # NOQA
from weboob.tools.misc import html2text, get_bytes_size
from weboob.capabilities.torrent import Torrent
@ -69,7 +69,7 @@ class TorrentsPage(BasePage):
current_group += ' - '
current_group += a.text
elif tr.attrib.get('class', '').startswith('group_torrent') or \
tr.attrib.get('class', '').startswith('torrent'):
tr.attrib.get('class', '').startswith('torrent'):
tds = tr.findall('td')
title = current_group
@ -100,9 +100,9 @@ class TorrentsPage(BasePage):
continue
id = '%s.%s' % (params['id'][0], m.group(1))
try:
size, unit = tds[i+3].text.split()
size, unit = tds[i + 3].text.split()
except ValueError:
size, unit = tds[i+2].text.split()
size, unit = tds[i + 2].text.split()
size = get_bytes_size(float(size.replace(',', '')), unit)
seeders = int(tds[-2].text)
leechers = int(tds[-1].text)
@ -159,8 +159,8 @@ class TorrentsPage(BasePage):
torrent.seeders = int(tds[3].text)
torrent.leechers = int(tds[4].text)
break
elif not is_table and tr.attrib.get('class', '').startswith('torrent_widget') and \
tr.attrib.get('class', '').endswith('pad'):
elif not is_table and tr.attrib.get('class', '').startswith('torrent_widget') \
and tr.attrib.get('class', '').endswith('pad'):
url = tr.cssselect('a[title=Download]')[0].attrib['href']
m = self.TORRENTID_REGEXP.match(url)
if not m:

View file

@ -23,7 +23,7 @@ import re
try:
from urlparse import parse_qs
except ImportError:
from cgi import parse_qs
from cgi import parse_qs # NOQA
from weboob.capabilities import NotAvailable
from weboob.tools.browser import BasePage, BrokenPageError
@ -87,6 +87,7 @@ class BaseVideoPage(BasePage):
def get_description(self):
raise NotImplementedError()
class VideoPage(BaseVideoPage):
URL_REGEXP = re.compile('http://www.ina.fr/(.+)\.html')

View file

@ -104,8 +104,9 @@ class LoginPage(BasePage):
self.browser.submit(nologin=True)
def error(self):
error = self.document.find('//span[@class="error"]')
return error is not None
err = self.document.find('//span[@class="error"]')
return err is not None
class LoginPage2(BasePage):
def on_loaded(self):

View file

@ -21,7 +21,7 @@
try:
from urlparse import parse_qs
except ImportError:
from cgi import parse_qs
from cgi import parse_qs # NOQA
from urlparse import urlsplit

View file

@ -29,7 +29,7 @@ import re
try:
from urlparse import parse_qs
except ImportError:
from cgi import parse_qs
from cgi import parse_qs # NOQA
__all__ = ['RadioFranceBrowser', 'RadioFranceVideo']
@ -93,7 +93,6 @@ class ReplayPage(BasePage):
return (radio_domain, player_id)
class DataPage(BasePage):
def get_current(self):
document = self.document