[cuisineaz] site changed / rewritten using browser 2

This commit is contained in:
Bezleputh 2015-08-06 19:34:07 +02:00
commit ab533ad1b7
4 changed files with 110 additions and 154 deletions

View file

@ -18,7 +18,7 @@
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.deprecated.browser import Browser, BrowserHTTPNotFound
from weboob.browser import PagesBrowser, URL
from .pages import RecipePage, ResultsPage
@ -26,26 +26,18 @@ from .pages import RecipePage, ResultsPage
__all__ = ['CuisineazBrowser']
class CuisineazBrowser(Browser):
DOMAIN = 'www.cuisineaz.com'
PROTOCOL = 'http'
ENCODING = 'utf-8'
USER_AGENT = Browser.USER_AGENTS['wget']
PAGES = {
'http://www.cuisineaz.com/recettes/recherche_v2.aspx\?recherche=.*': ResultsPage,
'http://www.cuisineaz.com/recettes/.*[0-9]*.aspx': RecipePage,
}
class CuisineazBrowser(PagesBrowser):
BASEURL = 'http://www.cuisineaz.com'
search = URL('recettes/recherche_v2.aspx\?recherche=(?P<pattern>.*)', ResultsPage)
recipe = URL('recettes/(?P<_id>.*).aspx', RecipePage)
def iter_recipes(self, pattern):
self.location('http://www.cuisineaz.com/recettes/recherche_v2.aspx?recherche=%s' % (
pattern.replace(' ', '-')))
assert self.is_on_page(ResultsPage)
return self.page.iter_recipes()
return self.search.go(pattern=pattern.replace(' ', '-')).iter_recipes()
def get_recipe(self, id):
try:
self.location('http://www.cuisineaz.com/recettes/%s.aspx' % id)
except BrowserHTTPNotFound:
return
if self.is_on_page(RecipePage):
return self.page.get_recipe(id)
def get_recipe(self, _id, obj=None):
return self.recipe.go(_id=_id).get_recipe(obj=obj)
def get_comments(self, _id):
return self.recipe.go(_id=_id).get_comments()

View file

@ -50,15 +50,10 @@ class CuisineazModule(Module, CapRecipe):
def fill_recipe(self, recipe, fields):
if 'nb_person' in fields or 'instructions' in fields:
rec = self.get_recipe(recipe.id)
recipe.picture_url = rec.picture_url
recipe.instructions = rec.instructions
recipe.ingredients = rec.ingredients
recipe.comments = rec.comments
recipe.author = rec.author
recipe.nb_person = rec.nb_person
recipe.cooking_time = rec.cooking_time
recipe.preparation_time = rec.preparation_time
recipe = self.browser.get_recipe(recipe.id, recipe)
if 'comments' in fields:
recipe.comments = list(self.browser.get_comments(recipe.id))
return recipe

View file

@ -19,133 +19,100 @@
from weboob.capabilities.recipe import Recipe, Comment
from weboob.capabilities.base import NotAvailable, NotLoaded
from weboob.deprecated.browser import Page
from weboob.capabilities.base import NotAvailable
from weboob.browser.pages import HTMLPage, pagination
from weboob.browser.elements import ItemElement, method, ListElement
from weboob.browser.filters.standard import CleanText, Regexp, Env, Time
from weboob.browser.filters.html import XPath, CleanHTML
import re
import datetime
class ResultsPage(Page):
class CuisineazDuration(Time):
klass = datetime.timedelta
_regexp = re.compile(r'((?P<hh>\d+) h)?((?P<mm>\d+) min)?(?P<ss>\d+)?')
kwargs = {'hours': 'hh', 'minutes': 'mm', 'seconds': 'ss'}
class ResultsPage(HTMLPage):
""" Page which contains results as a list of recipies
"""
def iter_recipes(self):
for div in self.parser.select(self.document.getroot(), 'div.rechRecette'):
thumbnail_url = NotAvailable
short_description = NotAvailable
imgs = self.parser.select(div, 'img')
if len(imgs) > 0:
url = unicode(imgs[0].attrib.get('src', ''))
if url.startswith('http://'):
thumbnail_url = url
@pagination
@method
class iter_recipes(ListElement):
item_xpath = '//div[@id="divRecette"]'
link = self.parser.select(div, 'a.rechRecetTitle', 1)
title = unicode(link.text)
id = unicode(link.attrib.get('href', '').split(
'/')[-1].replace('.aspx', ''))
def next_page(self):
next = CleanText('//li[@class="next"]/span/a/@href',
default=None)(self)
if next:
return next
short_description = u''
ldivprix = self.parser.select(div, 'div.prix')
if len(ldivprix) > 0:
divprix = ldivprix[0]
nbprixneg = 0
spanprix = self.parser.select(divprix, 'span')
if len(spanprix) > 0:
nbprixneg = unicode(spanprix[0].text).count(u'')
nbprixtot = unicode(divprix.text_content()).count(u'')
short_description += u'Cost: %s/%s ; ' % (
nbprixtot - nbprixneg, nbprixtot)
class item(ItemElement):
klass = Recipe
short_description += unicode(' '.join(self.parser.select(
div, 'div.rechResume', 1).text_content().split()).strip()).replace(u'', '')
short_description += u' '
short_description += unicode(' '.join(self.parser.select(
div, 'div.rechIngredients', 1).text_content().split()).strip())
def condition(self):
return Regexp(CleanText('./div[has-class("searchTitle")]/h2/a/@href'),
'http://www.cuisineaz.com/recettes/(.*).aspx',
default=None)(self.el)
recipe = Recipe(id, title)
recipe.thumbnail_url = thumbnail_url
recipe.short_description = short_description
recipe.instructions = NotLoaded
recipe.ingredients = NotLoaded
recipe.nb_person = NotLoaded
recipe.cooking_time = NotLoaded
recipe.preparation_time = NotLoaded
recipe.author = NotLoaded
yield recipe
obj_id = Regexp(CleanText('./div[has-class("searchTitle")]/h2/a/@href'),
'http://www.cuisineaz.com/recettes/(.*).aspx')
obj_title = CleanText('./div[has-class("searchTitle")]/h2/a')
obj_thumbnail_url = CleanText('./div[has-class("searchImg")]/span/img[@data-src!=""]/@data-src|./div[has-class("searchImg")]/div/span/img[@src!=""]/@src',
default=None)
obj_short_description = CleanText('./div[has-class("searchIngredients")]')
class RecipePage(Page):
class RecipePage(HTMLPage):
""" Page which contains a recipe
"""
@method
class get_recipe(ItemElement):
klass = Recipe
def get_recipe(self, id):
title = NotAvailable
preparation_time = NotAvailable
cooking_time = NotAvailable
author = NotAvailable
nb_person = NotAvailable
ingredients = NotAvailable
picture_url = NotAvailable
instructions = NotAvailable
comments = NotAvailable
obj_id = Env('_id')
obj_title = CleanText('//div[@id="ficheRecette"]/h1')
title = unicode(self.parser.select(
self.document.getroot(), 'div#ficheRecette h1.fn.recetteH1', 1).text)
main = self.parser.select(
self.document.getroot(), 'div#ficheRecette', 1)
imgillu = self.parser.select(main, 'div#recetteLeft img.photo')
if len(imgillu) > 0:
picture_url = unicode(imgillu[0].attrib.get('src', ''))
obj_picture_url = CleanText('//img[@id="shareimg" and @src!=""]/@src', default=None)
l_spanprep = self.parser.select(main, 'span.preptime')
if len(l_spanprep) > 0:
preparation_time = int(self.parser.tocleanstring(l_spanprep[0]).split()[0])
l_cooktime = self.parser.select(main, 'span.cooktime')
if len(l_cooktime) > 0:
cooking_time = int(self.parser.tocleanstring(l_cooktime[0]).split()[0])
l_nbpers = self.parser.select(main, 'td#recipeQuantity span')
if len(l_nbpers) > 0:
rawnb = l_nbpers[0].text.split()[0]
if '/' in rawnb:
nbs = rawnb.split('/')
nb_person = [int(nbs[0]), int(nbs[1])]
else:
nb_person = [int(rawnb)]
obj_thumbnail_url = CleanText('//img[@id="shareimg" and @src!=""]/@src', default=None)
ingredients = []
l_ing = self.parser.select(main, 'div#ingredients li.ingredient')
for ing in l_ing:
ingtxt = unicode(ing.text_content().strip())
if ingtxt != '':
ingredients.append(ingtxt)
def obj_preparation_time(self):
_prep = CuisineazDuration(CleanText('//span[@id="ctl00_ContentPlaceHolder_LblRecetteTempsPrepa"]'))(self)
return int(_prep.total_seconds() / 60)
instructions = u''
l_divinst = self.parser.select(
main, 'div#preparation span.instructions div')
for inst in l_divinst:
instructions += '%s: ' % inst.text
instructions += '%s\n' % inst.getnext().text
def obj_cooking_time(self):
_cook = CuisineazDuration(CleanText('//span[@id="ctl00_ContentPlaceHolder_LblRecetteTempsCuisson"]'))(self)
return int(_cook.total_seconds() / 60)
divcoms = self.parser.select(self.document.getroot(), 'div.comment')
if len(divcoms) > 0:
comments = []
for divcom in divcoms:
author = unicode(self.parser.select(
divcom, 'div.commentAuthor span', 1).text)
comtxt = unicode(self.parser.select(
divcom, 'p', 1).text_content().strip())
comments.append(Comment(author=author, text=comtxt))
def obj_nb_person(self):
nb_pers = CleanText('//span[@id="ctl00_ContentPlaceHolder_LblRecetteNombre"]')(self)
return [nb_pers] if nb_pers else NotAvailable
spans_author = self.parser.select(self.document.getroot(), 'span.author')
if len(spans_author) > 0:
author = unicode(spans_author[0].text_content().strip())
def obj_ingredients(self):
ingredients = []
for el in XPath('//div[@id="ingredients"]/ul/li')(self):
ingredients.append(CleanText('.')(el))
return ingredients
recipe = Recipe(id, title)
recipe.preparation_time = preparation_time
recipe.cooking_time = cooking_time
recipe.nb_person = nb_person
recipe.ingredients = ingredients
recipe.instructions = instructions
recipe.picture_url = picture_url
recipe.comments = comments
recipe.author = author
recipe.thumbnail_url = NotLoaded
return recipe
obj_instructions = CleanHTML('//div[@id="preparation"]/span[@class="instructions"]')
@method
class get_comments(ListElement):
item_xpath = '//div[@class="comment pb15 row"]'
class item(ItemElement):
klass = Comment
obj_author = CleanText('./div[has-class("comment-left")]/div/div/div[@class="fs18 txtcaz mb5 first-letter"]')
obj_text = CleanText('./div[has-class("comment-right")]/div/p')
obj_id = CleanText('./@id')
def obj_rate(self):
return len(XPath('./div[has-class("comment-right")]/div/div/div/span/span[@class="icon icon-star"]')(self))

View file

@ -18,13 +18,15 @@
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
import itertools
class CuisineazTest(BackendTest):
MODULE = 'cuisineaz'
def test_recipe(self):
recipes = self.backend.iter_recipes('fondue')
recipes = list(itertools.islice(self.backend.iter_recipes(u'purée'), 0, 20))
assert len(recipes)
for recipe in recipes:
full_recipe = self.backend.get_recipe(recipe.id)
assert full_recipe.instructions