import urllib2, re
from BeautifulSoup import *

class lolioppai:
	def __init__(self):
		self.open_page(1)
	
	def open_page(self, page, record=1):
		hdr={	'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
				'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
				'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
				'Accept-Encoding': 'none',
				'Accept-Language': 'en-US,en;q=0.8',
				'Connection': 'keep-alive'
			}
		site = "http://lolioppai.com/index.php/?page=%d"
		req = urllib2.Request(site % page, headers=hdr)
		data = urllib2.urlopen(req)
		if record:
			self.data = data
			self.page = data.read()
		else:
			return data
	
	def get_pages(self):
		data = self.open_page(1, 0).read()
		soup = BeautifulSoup(data)
		text = soup.findAll("ul", {"class":"g-paginator ui-helper-clearfix"})[0].findAll("li", {"class":"g-info"})[0].contents[0]
		res = re.search("(\d+) of (\d+)", text)
		perpage = res.group(1)
		maximum = res.group(2)
		return int(maximum)/int(perpage) + (1 if float(maximum)/float(perpage) - int(maximum)/int(perpage) != 0 else 0)
	
	def get_names(self):
		data = self.page
		soup = BeautifulSoup(data)
		lolist = []
		lolis = soup.findAll('li')
		for loli in lolis:
			if len(loli.contents) == 7:
				lolist.append(loli.contents[3].contents[2].contents[0])
		return lolist
	
	def get_links(self):
		data = self.page
		soup = BeautifulSoup(data)
		lolist = []
		lolis = soup.findAll('li')
		for loli in lolis:
			if len(loli.contents) == 7:
				lolist.append("http://lolioppai.com"+loli.contents[3].contents[2].get("href"))
		return lolist