support repositories to manage backends (closes #747)
This commit is contained in:
parent
ef16a5b726
commit
14a7a1d362
410 changed files with 1079 additions and 297 deletions
0
modules/dlfp/pages/__init__.py
Normal file
0
modules/dlfp/pages/__init__.py
Normal file
58
modules/dlfp/pages/board.py
Normal file
58
modules/dlfp/pages/board.py
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright(C) 2010-2011 Romain Bignon
|
||||
#
|
||||
# This file is part of weboob.
|
||||
#
|
||||
# weboob is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# weboob is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
import re
|
||||
from logging import warning
|
||||
|
||||
from weboob.tools.browser import BasePage
|
||||
|
||||
class Message(object):
|
||||
TIMESTAMP_REGEXP = re.compile(r'(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})')
|
||||
def __init__(self, id, timestamp, login, message, is_me):
|
||||
self.id = id
|
||||
self.timestamp = timestamp
|
||||
self.login = login
|
||||
self.message = message
|
||||
self.is_me = is_me
|
||||
self.norloge = timestamp
|
||||
m = self.TIMESTAMP_REGEXP.match(timestamp)
|
||||
if m:
|
||||
self.norloge = '%02d:%02d:%02d' % (int(m.group(4)),
|
||||
int(m.group(5)),
|
||||
int(m.group(6)))
|
||||
else:
|
||||
warning('Unable to parse timestamp "%s"' % timestamp)
|
||||
|
||||
class BoardIndexPage(BasePage):
|
||||
def is_logged(self):
|
||||
return True
|
||||
|
||||
def get_messages(self, last=None):
|
||||
msgs = []
|
||||
for post in self.parser.select(self.document.getroot(), 'post'):
|
||||
m = Message(int(post.attrib['id']),
|
||||
post.attrib['time'],
|
||||
post.find('login').text,
|
||||
post.find('message').text,
|
||||
post.find('login').text.lower() == self.browser.username.lower())
|
||||
if last is not None and last == m.id:
|
||||
break
|
||||
msgs.append(m)
|
||||
return msgs
|
||||
39
modules/dlfp/pages/index.py
Normal file
39
modules/dlfp/pages/index.py
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright(C) 2010-2011 Romain Bignon
|
||||
#
|
||||
# This file is part of weboob.
|
||||
#
|
||||
# weboob is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# weboob is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
from weboob.tools.browser import BasePage
|
||||
|
||||
class DLFPPage(BasePage):
|
||||
def is_logged(self):
|
||||
for form in self.document.getiterator('form'):
|
||||
if form.attrib.get('id', None) == 'new_account_sidebar':
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
class IndexPage(DLFPPage):
|
||||
def get_login_token(self):
|
||||
form = self.parser.select(self.document.getroot(), 'form#new_account_sidebar', 1)
|
||||
for i in form.find('div').getiterator('input'):
|
||||
if i.attrib['name'] == 'authenticity_token':
|
||||
return i.attrib['value']
|
||||
|
||||
class LoginPage(DLFPPage):
|
||||
pass
|
||||
211
modules/dlfp/pages/news.py
Normal file
211
modules/dlfp/pages/news.py
Normal file
|
|
@ -0,0 +1,211 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright(C) 2010-2011 Romain Bignon
|
||||
#
|
||||
# This file is part of weboob.
|
||||
#
|
||||
# weboob is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# weboob is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from weboob.tools.browser import BrokenPageError
|
||||
from weboob.tools.misc import local2utc
|
||||
from dlfp.tools import url2id
|
||||
|
||||
from .index import DLFPPage
|
||||
|
||||
class Content(object):
|
||||
TAGGABLE = False
|
||||
|
||||
def __init__(self, browser):
|
||||
self.browser = browser
|
||||
self.url = u''
|
||||
self.id = u''
|
||||
self.title = u''
|
||||
self.author = u''
|
||||
self.username = u''
|
||||
self.body = u''
|
||||
self.date = None
|
||||
self.score = 0
|
||||
self.comments = []
|
||||
self.relevance_url = None
|
||||
self.relevance_token = None
|
||||
|
||||
def is_taggable(self):
|
||||
return False
|
||||
|
||||
class Comment(Content):
|
||||
def __init__(self, article, div, reply_id):
|
||||
Content.__init__(self, article.browser)
|
||||
self.reply_id = reply_id
|
||||
self.signature = u''
|
||||
|
||||
self.id = div.attrib['id'].split('-')[1]
|
||||
self.url = '%s#%s' % (article.url, div.attrib['id'])
|
||||
self.title = unicode(self.browser.parser.select(div.find('h2'), 'a.title', 1).text)
|
||||
try:
|
||||
a = self.browser.parser.select(div.find('p'), 'a[rel=author]', 1)
|
||||
except BrokenPageError:
|
||||
self.author = 'Anonyme'
|
||||
self.username = None
|
||||
else:
|
||||
self.author = unicode(a.text)
|
||||
self.username = unicode(a.attrib['href'].split('/')[2])
|
||||
self.date = datetime.strptime(self.browser.parser.select(div.find('p'), 'time', 1).attrib['datetime'].split('+')[0],
|
||||
'%Y-%m-%dT%H:%M:%S')
|
||||
self.date = local2utc(self.date)
|
||||
|
||||
content = div.find('div')
|
||||
try:
|
||||
signature = self.browser.parser.select(content, 'p.signature', 1)
|
||||
except BrokenPageError:
|
||||
# No signature.
|
||||
pass
|
||||
else:
|
||||
content.remove(signature)
|
||||
self.signature = self.browser.parser.tostring(signature)
|
||||
self.body = self.browser.parser.tostring(content)
|
||||
|
||||
self.score = int(self.browser.parser.select(div.find('p'), 'span.score', 1).text)
|
||||
forms = self.browser.parser.select(div.find('footer'), 'form.button_to')
|
||||
if len(forms) > 0:
|
||||
self.relevance_url = forms[0].attrib['action'].rstrip('for').rstrip('against')
|
||||
self.relevance_token = self.browser.parser.select(forms[0], 'input[name=authenticity_token]', 1).attrib['value']
|
||||
|
||||
subs = div.find('ul')
|
||||
if subs is not None:
|
||||
for sub in subs.findall('li'):
|
||||
comment = Comment(article, sub, self.id)
|
||||
self.comments.append(comment)
|
||||
|
||||
def iter_all_comments(self):
|
||||
for comment in self.comments:
|
||||
yield comment
|
||||
for c in comment.iter_all_comments():
|
||||
yield c
|
||||
|
||||
def __repr__(self):
|
||||
return u"<Comment id=%r author=%r title=%r>" % (self.id, self.author, self.title)
|
||||
|
||||
class Article(Content):
|
||||
TAGGABLE = True
|
||||
|
||||
def __init__(self, browser, url, tree):
|
||||
Content.__init__(self, browser)
|
||||
self.url = url
|
||||
self.id = url2id(self.url)
|
||||
|
||||
if tree is None:
|
||||
return
|
||||
|
||||
header = tree.find('header')
|
||||
self.title = u' — '.join([a.text for a in header.find('h1').findall('a')])
|
||||
try:
|
||||
a = self.browser.parser.select(header, 'a[rel=author]', 1)
|
||||
except BrokenPageError:
|
||||
self.author = 'Anonyme'
|
||||
self.username = None
|
||||
else:
|
||||
self.author = unicode(a.text)
|
||||
self.username = unicode(a.attrib['href'].split('/')[2])
|
||||
self.body = self.browser.parser.tostring(self.browser.parser.select(tree, 'div.content', 1))
|
||||
try:
|
||||
self.date = datetime.strptime(self.browser.parser.select(header, 'time', 1).attrib['datetime'].split('+')[0],
|
||||
'%Y-%m-%dT%H:%M:%S')
|
||||
self.date = local2utc(self.date)
|
||||
except BrokenPageError:
|
||||
pass
|
||||
for form in self.browser.parser.select(tree.find('footer'), 'form.button_to'):
|
||||
if form.attrib['action'].endswith('/for'):
|
||||
self.relevance_url = form.attrib['action'].rstrip('for').rstrip('against')
|
||||
self.relevance_token = self.browser.parser.select(form, 'input[name=authenticity_token]', 1).attrib['value']
|
||||
|
||||
self.score = int(self.browser.parser.select(tree, 'div.figures figure.score', 1).text)
|
||||
|
||||
def append_comment(self, comment):
|
||||
self.comments.append(comment)
|
||||
|
||||
def iter_all_comments(self):
|
||||
for comment in self.comments:
|
||||
yield comment
|
||||
for c in comment.iter_all_comments():
|
||||
yield c
|
||||
|
||||
class CommentPage(DLFPPage):
|
||||
def get_comment(self):
|
||||
article = Article(self.browser, self.url, None)
|
||||
return Comment(article, self.parser.select(self.document.getroot(), 'li.comment', 1), 0)
|
||||
|
||||
class ContentPage(DLFPPage):
|
||||
def on_loaded(self):
|
||||
self.article = None
|
||||
|
||||
def is_taggable(self):
|
||||
return True
|
||||
|
||||
def get_comment(self, id):
|
||||
article = Article(self.browser, self.url, None)
|
||||
try:
|
||||
li = self.parser.select(self.document.getroot(), 'li#comment-%s' % id, 1)
|
||||
except BrokenPageError:
|
||||
return None
|
||||
else:
|
||||
return Comment(article, li, 0)
|
||||
|
||||
def get_article(self):
|
||||
if not self.article:
|
||||
self.article = Article(self.browser,
|
||||
self.url,
|
||||
self.parser.select(self.document.getroot(), 'div#contents article', 1))
|
||||
|
||||
try:
|
||||
threads = self.parser.select(self.document.getroot(), 'ul.threads', 1)
|
||||
except BrokenPageError:
|
||||
pass # no comments
|
||||
else:
|
||||
for comment in threads.findall('li'):
|
||||
self.article.append_comment(Comment(self.article, comment, 0))
|
||||
|
||||
return self.article
|
||||
|
||||
def get_post_comment_url(self):
|
||||
return self.parser.select(self.document.getroot(), 'p#send-comment', 1).find('a').attrib['href']
|
||||
|
||||
def get_tag_url(self):
|
||||
return self.parser.select(self.document.getroot(), 'div.tag_in_place', 1).find('a').attrib['href']
|
||||
|
||||
class NewCommentPage(DLFPPage):
|
||||
pass
|
||||
|
||||
class NewTagPage(DLFPPage):
|
||||
def _is_tag_form(self, form):
|
||||
return form.action.endswith('/tags')
|
||||
|
||||
def tag(self, tag):
|
||||
self.browser.select_form(predicate=self._is_tag_form)
|
||||
self.browser['tags'] = tag
|
||||
self.browser.submit()
|
||||
|
||||
class NodePage(DLFPPage):
|
||||
def get_errors(self):
|
||||
try:
|
||||
div = self.parser.select(self.document.getroot(), 'div.errors', 1)
|
||||
except BrokenPageError:
|
||||
return []
|
||||
|
||||
l = []
|
||||
for li in div.find('ul').findall('li'):
|
||||
l.append(li.text)
|
||||
return l
|
||||
56
modules/dlfp/pages/wiki.py
Normal file
56
modules/dlfp/pages/wiki.py
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright(C) 2010-2011 Romain Bignon
|
||||
#
|
||||
# This file is part of weboob.
|
||||
#
|
||||
# weboob is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# weboob is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from weboob.tools.browser import BrokenPageError
|
||||
|
||||
from .index import DLFPPage
|
||||
|
||||
class WikiEditPage(DLFPPage):
|
||||
def get_body(self):
|
||||
try:
|
||||
return self.parser.select(self.document.getroot(), 'textarea#wiki_page_wiki_body', 1).text
|
||||
except BrokenPageError:
|
||||
return ''
|
||||
|
||||
def _is_wiki_form(self, form):
|
||||
return form.attrs.get('class', '') in ('new_wiki_page', 'edit_wiki_page')
|
||||
|
||||
def post_content(self, title, body, message):
|
||||
self.browser.select_form(predicate=self._is_wiki_form)
|
||||
self.browser.set_all_readonly(False)
|
||||
|
||||
if title is not None:
|
||||
self.browser['wiki_page[title]'] = title.encode('utf-8')
|
||||
self.browser['commit'] = 'Créer'
|
||||
else:
|
||||
self.browser['commit'] = 'Mettre à jour'
|
||||
self.browser['wiki_page[wiki_body]'] = body.encode('utf-8')
|
||||
if message is not None:
|
||||
self.browser['wiki_page[message]'] = message.encode('utf-8')
|
||||
|
||||
self.browser.submit()
|
||||
|
||||
def post_preview(self, body):
|
||||
self.browser.select_form(predicate=self._is_wiki_form)
|
||||
self.browser['wiki_page[wiki_body]'] = body
|
||||
self.browser.submit()
|
||||
|
||||
def get_preview_html(self):
|
||||
body = self.parser.select(self.document.getroot(), 'article.wikipage div.content', 1)
|
||||
return self.parser.tostring(body)
|
||||
Loading…
Add table
Add a link
Reference in a new issue