Easy spacing fixes, trailing stuff

Remove useless trailing \
Remove trailing spaces
Add missing empty lines

autopep8 -ir -j2 --select=E301,E302,E502,W291,W293,W391 .

Diff quickly checked.
This commit is contained in:
Laurent Bachelier 2013-03-15 20:01:49 +01:00
commit 7094931c92
231 changed files with 474 additions and 67 deletions

View file

@ -25,6 +25,7 @@ from .base import CragrBasePage
from .tokenextractor import TokenExtractor
from weboob.tools.capabilities.bank.transactions import FrenchTransaction
class Transaction(FrenchTransaction):
PATTERNS = [
(re.compile('^(Vp|Vt|Vrt|Virt|Vir(ement)?)\s*(?P<text>.*)', re.IGNORECASE), FrenchTransaction.TYPE_TRANSFER),
@ -44,6 +45,7 @@ class Transaction(FrenchTransaction):
(re.compile('^RET.CARTE (?P<text>.*)', re.IGNORECASE), FrenchTransaction.TYPE_WITHDRAWAL),
]
class AccountsList(CragrBasePage):
"""
Unlike most pages used with the Browser class, this class represents

View file

@ -21,6 +21,7 @@
from weboob.tools.browser import BasePage
from weboob.tools.browser import BrowserUnavailable
class CragrBasePage(BasePage):
def on_loaded(self):
# Check for an error

View file

@ -17,16 +17,19 @@
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
class TokenExtractor:
""" Extracts texts token from an HTML document """
def __init__(self):
self.iterated_elements = []
def clear(self):
"""
Reset any content stored within a TokenExtractor object. Useful to start
a new parsing without creating a new instance.
"""
self.iterated_elements = []
def element_iterated_already(self, html_element):
if html_element in self.iterated_elements:
return True
@ -34,6 +37,7 @@ class TokenExtractor:
if ancestor in self.iterated_elements:
return True
return False
def extract_tokens(self, html_element):
if self.element_iterated_already(html_element):
return
@ -44,10 +48,12 @@ class TokenExtractor:
for token in self.split_text_into_smaller_tokens(text):
if self.token_looks_relevant(token):
yield token.strip()
@staticmethod
def split_text_into_smaller_tokens(text):
for subtext1 in text.split('\t'):
yield subtext1
@staticmethod
def token_looks_relevant(token):
return len(token.strip()) > 1