diff --git a/.gitignore b/.gitignore index 15fafde..d94917d 100644 --- a/.gitignore +++ b/.gitignore @@ -10,7 +10,6 @@ dist build eggs parts -bin var sdist develop-eggs diff --git a/README.md b/README.md index 67e4903..4db06a7 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,10 @@ -#Guide +# Why + +[Pythonic.info](http://pythonic.info) is a [HackerNews](http://news.ycombinator.com) clone with python/tornado + +It's also a demo of [NoMagic](https://github.com/kernel1983/NoMagic) data framework, showing that [NoMagic](https://github.com/kernel1983/NoMagic) is actually working in production enviorment. + +# Guide ## How to ### requirements diff --git a/bin/delete.py b/bin/delete.py new file mode 100644 index 0000000..95ae858 --- /dev/null +++ b/bin/delete.py @@ -0,0 +1,55 @@ +import sys +import datetime +import time +import pickle +import uuid +import binascii +import json +import zlib + +sys.path.append(".") +sys.path.append("..") + +from setting import settings +from setting import conn +#from setting import ring + +import nomagic +import nomagic.feeds + + +def get_comments(comments): + comment_ids = [] + for comment in comments: + assert comment.get("type") == "comment" + #comment["like_count"] = len(comment.get("likes", [])) + #comment["like"] = self.user_id in set(comment.get("likes", [])) if self.current_user else False + #comment["comment_count"] = 0 + #print comment["comments"] if comment.get("comments") else [] + comment_ids.append(comment["id"]) + comment_ids.extend(get_comments(comment["comments"]) if comment.get("comments") else []) + + return comment_ids + +if len(sys.argv) < 2: + sys.exit() + +print sys.argv[1] +activity_id = sys.argv[1] + +activity = nomagic._get_entity_by_id(activity_id) +print activity +assert activity.get("type") == "status" + +comments, user_ids = nomagic.feeds.get_comments(activity) +comment_ids = get_comments(comments) +print comment_ids + +# delete comment_ids +for comment_id in comment_ids: + nomagic._node(comment_id).execute_rowcount("DELETE FROM entities WHERE id = %s", comment_id) + +# delete activity_id +nomagic._node(activity_id).execute_rowcount("DELETE FROM entities WHERE id = %s", activity_id) + +conn.execute_rowcount("DELETE FROM index_posts WHERE entity_id = %s", activity_id) diff --git a/bin/email_daily.py b/bin/email_daily.py new file mode 100644 index 0000000..1d8014a --- /dev/null +++ b/bin/email_daily.py @@ -0,0 +1,81 @@ +import sys +import os +import datetime +import time +import pickle +import uuid +import binascii +import json +import zlib + +sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../') +sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../vendor') + +import tornado.template +import tornado.locale +import amazon_ses + +from setting import settings +from setting import conn + +import nomagic + + +def daily(hours): + now = time.time() + offset = 0 + post_ids_to_email = set() + while True: + index_posts = conn.query("SELECT * FROM index_posts ORDER BY rank DESC LIMIT %s, 100", offset) + + if len(index_posts) == 0: + break + + post_ids = [post["entity_id"] for post in index_posts] + for post_id, post in nomagic._get_entities_by_ids(post_ids): + period = (now - time.mktime(datetime.datetime.strptime(post["datetime"], "%Y-%m-%dT%H:%M:%S.%f").timetuple())) / 3600 + + if period <= hours: + post_ids_to_email.add(post_id) + + offset += 100 + + posts_to_email = nomagic._get_entities_by_ids(post_ids_to_email) + loader = tornado.template.Loader(os.path.dirname(os.path.abspath(__file__)) + "/../template/") + + locale = tornado.locale.get() + msg = amazon_ses.EmailMessage() + msg.subject = locale.translate('Pythonic Info Daily').encode("utf-8") + msg.bodyHtml = loader.load("email_daily.html").generate(posts=posts_to_email, _=locale.translate) + + users = [] + users_not_to_send = [] + users_exists = conn.query("SELECT * FROM index_login") + for user_id, user in nomagic._get_entities_by_ids([user_exists["entity_id"] for user_exists in users_exists]): + if user.get("receive_daily_email", True): + users.append(user) + else: + users_not_to_send.append(user) + + users_invited = conn.query("SELECT * FROM invite") + + sender = amazon_ses.AmazonSES(settings["AmazonAccessKeyID"], settings["AmazonSecretAccessKey"]) + emails = set([user["login"] for user in users_exists] + [user["email"] for user in users_invited]) - set([user["email"] for user in users_not_to_send]) + for email in emails: + if "@" in email: + print email + sender.sendEmail(settings["email_sender"], email, msg) + + +if __name__ == '__main__': + tornado.locale.load_translations(os.path.join(os.path.dirname(__file__) + "/../csv_translations/")) + tornado.locale.set_default_locale("zh_CN") + + if len(sys.argv) < 2: + sys.exit() + + hours = float(sys.argv[1]) + #print hours + + daily(hours) + diff --git a/bin/migration.py b/bin/migration.py new file mode 100644 index 0000000..54a554f --- /dev/null +++ b/bin/migration.py @@ -0,0 +1,44 @@ +import sys +import pickle +import uuid +import binascii +import json +import zlib + +sys.path.append(".") +sys.path.append("..") + +from setting import settings +from setting import conn +from setting import ring + +import nomagic +from nomagic import _RING + +_NUMBER = len(ring) + +def _number(key): return int(key, 16) % _NUMBER + +def _node(key): return ring[_RING.get_node(key)] + + +for r in ring: + offset = 0 + ids_to_delete = [] + while True: + users = r.query("SELECT * FROM entities ORDER BY auto_increment LIMIT %s, 100", offset) + + if len(users) == 0: + break + + for user in users: + user_id = str(user["id"]) + print user["id"], _number(user_id), nomagic._RING.get_node(user_id) + if r is not _node(user_id): + _node(user_id).execute_rowcount("INSERT INTO entities (id, body) VALUES (%s, %s)", user["id"], user["body"]) + ids_to_delete.append(user_id) + + offset += 100 + + for id_to_delete in ids_to_delete: + print r.execute_rowcount("DELETE FROM entities WHERE id = %s", id_to_delete) diff --git a/bin/rank.py b/bin/rank.py new file mode 100644 index 0000000..a90fa2c --- /dev/null +++ b/bin/rank.py @@ -0,0 +1,45 @@ +import sys +import os +import datetime +import time +import pickle +import uuid +import binascii +import json +import zlib + +sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../') +sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../vendor') + +from setting import settings +from setting import conn + +import nomagic + + +def rank(points, period): return int( (points + 1) / ( (period + 2) ** 1.8) * 1000000000 ) + +def ranking(): + now = time.time() + offset = 0 + while True: + index_posts = conn.query("SELECT * FROM index_posts ORDER BY rank DESC LIMIT %s, 100", offset) + + if len(index_posts) == 0: + break + + post_ids = [post["entity_id"] for post in index_posts] + for post_id, post in nomagic._get_entities_by_ids(post_ids): + period = (now - time.mktime(datetime.datetime.strptime(post["datetime"], "%Y-%m-%dT%H:%M:%S.%f").timetuple())) / 3600 + points = len(post["likes"]) + post_rank = rank(points, period) + + conn.execute("UPDATE index_posts SET rank = %s WHERE entity_id = %s", post_rank, post_id) + + offset += 100 + +if __name__ == '__main__': + #print rank(0, 0) + #print rank(3000, 0) + ranking() + diff --git a/controller/api.py b/controller/api.py index 8ba39d8..847576b 100644 --- a/controller/api.py +++ b/controller/api.py @@ -11,7 +11,10 @@ import tornado.web import tornado.httpclient as httpclient +import markdown2 from PIL import Image +from tornado_ses import EmailHandler +from amazon_ses import EmailMessage import nomagic import nomagic.feeds @@ -19,9 +22,6 @@ from setting import conn from controller.base import BaseHandler -from controller.base import EmailHandler - -from vendor.amazon_ses import EmailMessage ##### mock data API ##### @@ -53,9 +53,9 @@ def post(self): #send email msg = EmailMessage() - msg.subject = u"Confirm from Project" - msg.bodyText= u"http://www.com:8000/email_verify?token=%s" % token - self.send("no-reply@appkungfu.net", email, msg) + msg.subject = u"Confirm email from Pythonic Info" + msg.bodyText= u"http://pythonic.info/verify_email?token=%s" % token + self.send("info@pythonic.info", email, msg) self.finish({}) class UserInfoAPIHandler(BaseHandler): @@ -78,47 +78,53 @@ def post(self): self.finish({"result":"error"}) -class NewsFeedAPIHandler(BaseHandler): +class FeedAPIHandler(BaseHandler): def get(self): + self.set_header("Cache-Control", "max-age=0") if not self.current_user: - #raise tornado.web.HTTPError(401, "User not login") - news_feeds = nomagic.feeds.get_public_news_feed() - users = dict(nomagic._get_entities_by_ids(set([i["user_id"] for i in news_feeds]))) - - self.finish({"users": users, - "news_feeds": news_feeds}) - + raise tornado.web.HTTPError(401, "User not login") return - user_id = self.current_user["user_id"].encode("utf8") - news_feeds = nomagic.feeds.get_news_feed_by_user_id(user_id) - users = dict(nomagic._get_entities_by_ids(set([i["user_id"] for i in news_feeds]+[user_id]))) + from_id = self.get_argument("from", None) + feeds = nomagic.feeds.get_public_feed(item_start_id = from_id) + users = dict(nomagic._get_entities_by_ids(set([i["user_id"] for i in feeds]))) - self.finish({"user_id": user_id, - "users": users, - "news_feeds": news_feeds}) + self.finish({"users": users, "feeds": feeds}) -class NewsItemAPIHandler(BaseHandler): +class ItemAPIHandler(BaseHandler): + def get_comments(self, comments): + for comment in comments: + comment["comments"] = self.get_comments(comment["comments"]) if comment.get("comment_ids") else [] + comment["content"] = markdown2.markdown(comment["content"], safe_mode=True) + return comments + def get(self): - activity_id = self.get_argument("id") + self.set_header("Cache-Control", "max-age=0") + if not self.current_user: + raise tornado.web.HTTPError(401, "User not login") + return + + entity_id = self.get_argument("id") #user_id = self.current_user["user_id"].encode("utf8") - news_feed, user_ids = nomagic.feeds.get_news_by_id(activity_id) + item, user_ids = nomagic.feeds.get_item_by_id(entity_id) + item["comments"] = self.get_comments(item["comments"]) + item["content"] = markdown2.markdown(item["content"], safe_mode=True) users = dict(nomagic._get_entities_by_ids(user_ids)) - self.finish({"users": users, - "news_feeds": [news_feed]}) + self.finish({"users": users, "item": item}) class PostStatusAPIHandler(BaseHandler): def post(self): if not self.current_user: raise tornado.web.HTTPError(401, "User not login") + return user_id = self.current_user["user_id"].encode("utf8") content = self.get_argument("content").encode("utf8") data = {"content": content} - status_id, status = nomagic.new_status(user_id, data) + status_id, status = nomagic.feeds.new_status(user_id, data) self.finish(dict(status, id=status_id)) @@ -126,35 +132,38 @@ class LikeAPIHandler(BaseHandler): def post(self): if not self.current_user: raise tornado.web.HTTPError(401, "User not login") + return activity_id = self.get_argument("id").encode("utf8") user_id = self.current_user["user_id"].encode("utf8") - likes = nomagic.like(user_id, activity_id) + likes = nomagic.feeds.like(user_id, activity_id) self.finish({"likes": likes, "like_count":len(likes)}) class UnlikeAPIHandler(BaseHandler): def post(self): if not self.current_user: raise tornado.web.HTTPError(401, "User not login") + return activity_id = self.get_argument("id").encode("utf8") user_id = self.current_user["user_id"].encode("utf8") - likes = nomagic.unlike(user_id, activity_id) + likes = nomagic.feeds.unlike(user_id, activity_id) self.finish({"likes": likes, "like_count":len(likes)}) class PostCommentAPIHandler(BaseHandler): def post(self): if not self.current_user: raise tornado.web.HTTPError(401, "User not login") + return activity_id = self.get_argument("id").encode("utf8") user_id = self.current_user["user_id"].encode("utf8") content = self.get_argument("content").encode("utf8") data = {"content": content} - comment_ids, new_comment = nomagic.new_comment(user_id, activity_id, data) + comment_ids, new_comment = nomagic.feeds.new_comment(user_id, activity_id, data) self.finish({"new_comment":new_comment, "comment_ids":comment_ids, "comment_count":len(comment_ids)}) @@ -162,6 +171,7 @@ class ProfileImgAPIHandler(BaseHandler): def post(self): if not self.current_user: raise tornado.web.HTTPError(401, "User not login") + return self.set_header('Access-Control-Allow-Origin', '*') self.set_header('Access-Control-Allow-Methods', 'POST, DELETE') @@ -189,6 +199,7 @@ class FollowAPIHandler(BaseHandler): def post(self): if not self.current_user: raise tornado.web.HTTPError(401, "User not login") + return user_id = self.current_user["user_id"].encode("utf8") friend_ids = self.get_argument("friend_ids").encode("utf8").split(",") @@ -198,8 +209,12 @@ class UnfollowAPIHandler(BaseHandler): def post(self): if not self.current_user: raise tornado.web.HTTPError(401, "User not login") + return user_id = self.current_user["user_id"].encode("utf8") friend_ids = self.get_argument("friend_ids").encode("utf8").split(",") nomagic.unfollow_users(user_id, friend_ids) +class ResendVerifyEmailAPIHandler(BaseHandler): + def post(self): + self.verify_code = self.get_argument("verify_code") diff --git a/controller/base.py b/controller/base.py index 9126cc4..3e249b1 100644 --- a/controller/base.py +++ b/controller/base.py @@ -1,13 +1,11 @@ import urllib import tornado.web +import tornado.locale from tornado import httpclient -from vendor.amazon_ses import AmazonSES - -from vendor.amazon_ses import EmailMessage - -from vendor.BeautifulSoup import BeautifulSoup +from amazon_ses import AmazonSES +from amazon_ses import EmailMessage from setting import settings @@ -18,6 +16,13 @@ def get_current_user(self): if not user_json: return None return tornado.escape.json_decode(user_json) + def get_access_token(self): + return None + + #def get_user_locale(self): + # return tornado.locale.get("zh_CN") + +""" class EmailHandler(AmazonSES): def send(self, from_email, user_email, user_msg): AmazonSES.__init__(self, settings["AmazonAccessKeyID"], settings["AmazonSecretAccessKey"]) @@ -37,7 +42,4 @@ def _performAction(self, actionName, params=None): client = httpclient.AsyncHTTPClient() req = httpclient.HTTPRequest("https://email.us-east-1.amazonaws.com/", "POST", self._getHeaders(), params) client.fetch(req, self.async_callback(self.handle_email)) - - - - +""" diff --git a/controller/main.py b/controller/main.py index fb7ff64..f6823f3 100644 --- a/controller/main.py +++ b/controller/main.py @@ -3,6 +3,9 @@ import logging import cgi import json +import random +import string +import urllib import tornado.options import tornado.ioloop @@ -11,140 +14,413 @@ import tornado.template import tornado.database import tornado.auth +import tornado.locale + +import markdown2 +from tornado_ses import EmailHandler +from amazon_ses import EmailMessage from setting import settings from setting import conn import nomagic +import nomagic.auth +import nomagic.feeds from controller.base import * +loader = tornado.template.Loader(os.path.join(os.path.dirname(__file__), "../template/")) -class LoginHandler(BaseHandler): +class SettingHandler(BaseHandler): def get(self): - self.render('../templates/login.html') + self.set_header("Cache-Control", "max-age=0") + if not self.current_user: + self.redirect("/login") + return + + user_id = self.current_user["user_id"].encode("utf8") + self.user = nomagic._get_entity_by_id(user_id) + self.render('../template/setting.html') + + def post(self): + if self.current_user: + user_id = self.current_user["user_id"].encode("utf8") + self.user = nomagic._get_entity_by_id(user_id) + post_data = {} + + name = self.get_argument("name", None) + if name: + post_data["name"] = name -class SignupHandler(BaseHandler): + receive_daily_email = bool(self.get_argument("receive_daily_email", False)) + if self.user.get("receive_daily_email", True) != receive_daily_email: + print receive_daily_email + post_data["receive_daily_email"] = receive_daily_email + if post_data: + nomagic.auth.update_user(user_id, post_data) + + password0 = self.get_argument("password0", None) + password1 = self.get_argument("password1", None) + password2 = self.get_argument("password2", None) + if password0 is not None and password1 is not None and password1 == password2: + post_data = {} + post_data["password0"] = password0 + post_data["password1"] = password1 + nomagic.auth.update_user(user_id, post_data) + self.redirect("/setting?status=password_updated") + return + + self.redirect("/setting") + + +class LoginHandler(BaseHandler, EmailHandler): def get(self): - self.render('../templates/signup.html') + self.email = self.get_argument("email", u"") + self.invite_code = self.get_argument("invite_code", u"") + self.render('../template/login.html') + + def post(self): + login = self.get_argument("login", None) + password = self.get_argument("password", None) + + invite_code = self.get_argument("invite_code", None) + email = self.get_argument("email", None) + name = self.get_argument("name", None) + password1 = self.get_argument("password1", None) + password2 = self.get_argument("password2", None) + + if login and password: + user_id, user = nomagic.auth.check_user(login, password) + if user_id: + self.set_secure_cookie("user", tornado.escape.json_encode({"user_id": user_id})) + self.redirect("/?status=login") + return + + elif email and name and password1 and password2 and password1 == password2 and invite_code: + invited = conn.get("SELECT * FROM invite WHERE code = %s", invite_code) + if not invited: + self.redirect("/login?status=need_invite_code") + return + + data = {"email": email, "name": name, "password": password1} + try: + user_id, user = nomagic.auth.create_user(data) + + self.set_secure_cookie("user", tornado.escape.json_encode({"user_id": user_id})) + + email_verify_code = ''.join(random.choice(string.digits+string.letters) for x in range(14)) + result = nomagic.auth.update_user(user_id, {"email_verified": False, "email_verify_code": email_verify_code}) + + #send verify email here + msg = EmailMessage() + msg.subject = "Confirm Email from Pythonic Info" + msg.bodyText = "http://pythonic.info/verify_email?user_id=%s&verify_code=%s" % (user_id, email_verify_code) + self.send("info@pythonic.info", str(email), msg) + print "url:", msg.bodyText + + self.redirect("/?status=created") + return + except: + pass + + self.redirect("/login?status=error") class LogoutHandler(BaseHandler): def get(self): self.clear_cookie("user") - self.redirect("/login") + self.render("../template/logout.html") class FeedHandler(BaseHandler): - topic_temp = tornado.template.Template(""" -
{{ content }}
- -{{ content }}
- - {% for comment in comments %} - {% raw comment %} - {% end for %} -| + {{_('Pythonic Info')}} | {{_('submit')}} + | +
| + {{ post['title'] }} + | +
{% raw content %}
+{% raw content %}
+{% raw content %}
+ tags.
+ """
+ yield 0, ""
+ for tup in inner:
+ yield tup
+ yield 0, ""
+
+ def wrap(self, source, outfile):
+ """Return the source with a code, pre, and div."""
+ return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
+
+ formatter_opts.setdefault("cssclass", "codehilite")
+ formatter = HtmlCodeFormatter(**formatter_opts)
+ return pygments.highlight(codeblock, lexer, formatter)
+
+ def _code_block_sub(self, match, is_fenced_code_block=False):
+ lexer_name = None
+ if is_fenced_code_block:
+ lexer_name = match.group(1)
+ if lexer_name:
+ formatter_opts = self.extras['fenced-code-blocks'] or {}
+ codeblock = match.group(2)
+ codeblock = codeblock[:-1] # drop one trailing newline
+ else:
+ codeblock = match.group(1)
+ codeblock = self._outdent(codeblock)
+ codeblock = self._detab(codeblock)
+ codeblock = codeblock.lstrip('\n') # trim leading newlines
+ codeblock = codeblock.rstrip() # trim trailing whitespace
+
+ # Note: "code-color" extra is DEPRECATED.
+ if "code-color" in self.extras and codeblock.startswith(":::"):
+ lexer_name, rest = codeblock.split('\n', 1)
+ lexer_name = lexer_name[3:].strip()
+ codeblock = rest.lstrip("\n") # Remove lexer declaration line.
+ formatter_opts = self.extras['code-color'] or {}
+
+ if lexer_name:
+ lexer = self._get_pygments_lexer(lexer_name)
+ if lexer:
+ colored = self._color_with_pygments(codeblock, lexer,
+ **formatter_opts)
+ return "\n\n%s\n\n" % colored
+
+ codeblock = self._encode_code(codeblock)
+ pre_class_str = self._html_class_str_from_tag("pre")
+ code_class_str = self._html_class_str_from_tag("code")
+ return "\n\n%s\n
\n\n" % (
+ pre_class_str, code_class_str, codeblock)
+
+ def _html_class_str_from_tag(self, tag):
+ """Get the appropriate ' class="..."' string (note the leading
+ space), if any, for the given tag.
+ """
+ if "html-classes" not in self.extras:
+ return ""
+ try:
+ html_classes_from_tag = self.extras["html-classes"]
+ except TypeError:
+ return ""
+ else:
+ if tag in html_classes_from_tag:
+ return ' class="%s"' % html_classes_from_tag[tag]
+ return ""
+
+ def _do_code_blocks(self, text):
+ """Process Markdown `` blocks."""
+ code_block_re = re.compile(r'''
+ (?:\n\n|\A\n?)
+ ( # $1 = the code block -- one or more lines, starting with a space/tab
+ (?:
+ (?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces
+ .*\n+
+ )+
+ )
+ ((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
+ ''' % (self.tab_width, self.tab_width),
+ re.M | re.X)
+ return code_block_re.sub(self._code_block_sub, text)
+
+ _fenced_code_block_re = re.compile(r'''
+ (?:\n\n|\A\n?)
+ ^```([\w+-]+)?[ \t]*\n # opening fence, $1 = optional lang
+ (.*?) # $2 = code block content
+ ^```[ \t]*\n # closing fence
+ ''', re.M | re.X | re.S)
+
+ def _fenced_code_block_sub(self, match):
+ return self._code_block_sub(match, is_fenced_code_block=True);
+
+ def _do_fenced_code_blocks(self, text):
+ """Process ```-fenced unindented code blocks ('fenced-code-blocks' extra)."""
+ return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text)
+
+ # Rules for a code span:
+ # - backslash escapes are not interpreted in a code span
+ # - to include one or or a run of more backticks the delimiters must
+ # be a longer run of backticks
+ # - cannot start or end a code span with a backtick; pad with a
+ # space and that space will be removed in the emitted HTML
+ # See `test/tm-cases/escapes.text` for a number of edge-case
+ # examples.
+ _code_span_re = re.compile(r'''
+ (?%s" % c
+
+ def _do_code_spans(self, text):
+ # * Backtick quotes are used for spans.
+ #
+ # * You can use multiple backticks as the delimiters if you want to
+ # include literal backticks in the code span. So, this input:
+ #
+ # Just type ``foo `bar` baz`` at the prompt.
+ #
+ # Will translate to:
+ #
+ # Just type foo `bar` baz at the prompt.
+ #
+ # There's no arbitrary limit to the number of backticks you
+ # can use as delimters. If you need three consecutive backticks
+ # in your code, use four for delimiters, etc.
+ #
+ # * You can use spaces to get literal backticks at the edges:
+ #
+ # ... type `` `bar` `` ...
+ #
+ # Turns to:
+ #
+ # ... type `bar` ...
+ return self._code_span_re.sub(self._code_span_sub, text)
+
+ def _encode_code(self, text):
+ """Encode/escape certain characters inside Markdown code runs.
+ The point is that in code, these characters are literals,
+ and lose their special Markdown meanings.
+ """
+ replacements = [
+ # Encode all ampersands; HTML entities are not
+ # entities within a Markdown code span.
+ ('&', '&'),
+ # Do the angle bracket song and dance:
+ ('<', '<'),
+ ('>', '>'),
+ ]
+ for before, after in replacements:
+ text = text.replace(before, after)
+ hashed = _hash_text(text)
+ self._escape_table[text] = hashed
+ return hashed
+
+ _strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
+ _em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
+ _code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
+ _code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
+ def _do_italics_and_bold(self, text):
+ # must go first:
+ if "code-friendly" in self.extras:
+ text = self._code_friendly_strong_re.sub(r"\1", text)
+ text = self._code_friendly_em_re.sub(r"\1", text)
+ else:
+ text = self._strong_re.sub(r"\2", text)
+ text = self._em_re.sub(r"\2", text)
+ return text
+
+ # "smarty-pants" extra: Very liberal in interpreting a single prime as an
+ # apostrophe; e.g. ignores the fact that "round", "bout", "twer", and
+ # "twixt" can be written without an initial apostrophe. This is fine because
+ # using scare quotes (single quotation marks) is rare.
+ _apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))")
+ _contractions = ["tis", "twas", "twer", "neath", "o", "n",
+ "round", "bout", "twixt", "nuff", "fraid", "sup"]
+ def _do_smart_contractions(self, text):
+ text = self._apostrophe_year_re.sub(r"’\1", text)
+ for c in self._contractions:
+ text = text.replace("'%s" % c, "’%s" % c)
+ text = text.replace("'%s" % c.capitalize(),
+ "’%s" % c.capitalize())
+ return text
+
+ # Substitute double-quotes before single-quotes.
+ _opening_single_quote_re = re.compile(r"(?
+ See "test/tm-cases/smarty_pants.text" for a full discussion of the
+ support here and
+ for a
+ discussion of some diversion from the original SmartyPants.
+ """
+ if "'" in text: # guard for perf
+ text = self._do_smart_contractions(text)
+ text = self._opening_single_quote_re.sub("‘", text)
+ text = self._closing_single_quote_re.sub("’", text)
+
+ if '"' in text: # guard for perf
+ text = self._opening_double_quote_re.sub("“", text)
+ text = self._closing_double_quote_re.sub("”", text)
+
+ text = text.replace("---", "—")
+ text = text.replace("--", "–")
+ text = text.replace("...", "…")
+ text = text.replace(" . . . ", "…")
+ text = text.replace(". . .", "…")
+ return text
+
+ _block_quote_re = re.compile(r'''
+ ( # Wrap whole match in \1
+ (
+ ^[ \t]*>[ \t]? # '>' at the start of a line
+ .+\n # rest of the first line
+ (.+\n)* # subsequent consecutive lines
+ \n* # blanks
+ )+
+ )
+ ''', re.M | re.X)
+ _bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M);
+
+ _html_pre_block_re = re.compile(r'(\s*.+?
)', re.S)
+ def _dedent_two_spaces_sub(self, match):
+ return re.sub(r'(?m)^ ', '', match.group(1))
+
+ def _block_quote_sub(self, match):
+ bq = match.group(1)
+ bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting
+ bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines
+ bq = self._run_block_gamut(bq) # recurse
+
+ bq = re.sub('(?m)^', ' ', bq)
+ # These leading spaces screw with content, so we need to fix that:
+ bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
+
+ return "\n%s\n
\n\n" % bq
+
+ def _do_block_quotes(self, text):
+ if '>' not in text:
+ return text
+ return self._block_quote_re.sub(self._block_quote_sub, text)
+
+ def _form_paragraphs(self, text):
+ # Strip leading and trailing lines:
+ text = text.strip('\n')
+
+ # Wrap tags.
+ grafs = []
+ for i, graf in enumerate(re.split(r"\n{2,}", text)):
+ if graf in self.html_blocks:
+ # Unhashify HTML blocks
+ grafs.append(self.html_blocks[graf])
+ else:
+ cuddled_list = None
+ if "cuddled-lists" in self.extras:
+ # Need to put back trailing '\n' for `_list_item_re`
+ # match at the end of the paragraph.
+ li = self._list_item_re.search(graf + '\n')
+ # Two of the same list marker in this paragraph: a likely
+ # candidate for a list cuddled to preceding paragraph
+ # text (issue 33). Note the `[-1]` is a quick way to
+ # consider numeric bullets (e.g. "1." and "2.") to be
+ # equal.
+ if (li and len(li.group(2)) <= 3 and li.group("next_marker")
+ and li.group("marker")[-1] == li.group("next_marker")[-1]):
+ start = li.start()
+ cuddled_list = self._do_lists(graf[start:]).rstrip("\n")
+ assert cuddled_list.startswith("
") or cuddled_list.startswith("")
+ graf = graf[:start]
+
+ # Wrap tags.
+ graf = self._run_span_gamut(graf)
+ grafs.append("
" + graf.lstrip(" \t") + "
")
+
+ if cuddled_list:
+ grafs.append(cuddled_list)
+
+ return "\n\n".join(grafs)
+
+ def _add_footnotes(self, text):
+ if self.footnotes:
+ footer = [
+ '',
+ '
',
+ ]
+ for i, id in enumerate(self.footnote_ids):
+ if i != 0:
+ footer.append('')
+ footer.append('- ' % id)
+ footer.append(self._run_block_gamut(self.footnotes[id]))
+ backlink = (''
+ '↩' % (id, i+1))
+ if footer[-1].endswith(""):
+ footer[-1] = footer[-1][:-len("")] \
+ + ' ' + backlink + ""
+ else:
+ footer.append("\n
%s
" % backlink)
+ footer.append(' ')
+ footer.append('')
+ footer.append('')
+ return text + '\n\n' + '\n'.join(footer)
+ else:
+ return text
+
+ # Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
+ # http://bumppo.net/projects/amputator/
+ _ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
+ _naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
+ _naked_gt_re = re.compile(r'''(?''', re.I)
+
+ def _encode_amps_and_angles(self, text):
+ # Smart processing for ampersands and angle brackets that need
+ # to be encoded.
+ text = self._ampersand_re.sub('&', text)
+
+ # Encode naked <'s
+ text = self._naked_lt_re.sub('<', text)
+
+ # Encode naked >'s
+ # Note: Other markdown implementations (e.g. Markdown.pl, PHP
+ # Markdown) don't do this.
+ text = self._naked_gt_re.sub('>', text)
+ return text
+
+ def _encode_backslash_escapes(self, text):
+ for ch, escape in list(self._escape_table.items()):
+ text = text.replace("\\"+ch, escape)
+ return text
+
+ _auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
+ def _auto_link_sub(self, match):
+ g1 = match.group(1)
+ return '%s' % (g1, g1)
+
+ _auto_email_link_re = re.compile(r"""
+ <
+ (?:mailto:)?
+ (
+ [-.\w]+
+ \@
+ [-\w]+(\.[-\w]+)*\.[a-z]+
+ )
+ >
+ """, re.I | re.X | re.U)
+ def _auto_email_link_sub(self, match):
+ return self._encode_email_address(
+ self._unescape_special_chars(match.group(1)))
+
+ def _do_auto_links(self, text):
+ text = self._auto_link_re.sub(self._auto_link_sub, text)
+ text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
+ return text
+
+ def _encode_email_address(self, addr):
+ # Input: an email address, e.g. "foo@example.com"
+ #
+ # Output: the email address as a mailto link, with each character
+ # of the address encoded as either a decimal or hex entity, in
+ # the hopes of foiling most address harvesting spam bots. E.g.:
+ #
+ # foo
+ # @example.com
+ #
+ # Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
+ # mailing list:
+ chars = [_xml_encode_email_char_at_random(ch)
+ for ch in "mailto:" + addr]
+ # Strip the mailto: from the visible part.
+ addr = '%s' \
+ % (''.join(chars), ''.join(chars[7:]))
+ return addr
+
+ def _do_link_patterns(self, text):
+ """Caveat emptor: there isn't much guarding against link
+ patterns being formed inside other standard Markdown links, e.g.
+ inside a [link def][like this].
+
+ Dev Notes: *Could* consider prefixing regexes with a negative
+ lookbehind assertion to attempt to guard against this.
+ """
+ link_from_hash = {}
+ for regex, repl in self.link_patterns:
+ replacements = []
+ for match in regex.finditer(text):
+ if hasattr(repl, "__call__"):
+ href = repl(match)
+ else:
+ href = match.expand(repl)
+ replacements.append((match.span(), href))
+ for (start, end), href in reversed(replacements):
+ escaped_href = (
+ href.replace('"', '"') # b/c of attr quote
+ # To avoid markdown and :
+ .replace('*', self._escape_table['*'])
+ .replace('_', self._escape_table['_']))
+ link = '%s' % (escaped_href, text[start:end])
+ hash = _hash_text(link)
+ link_from_hash[hash] = link
+ text = text[:start] + hash + text[end:]
+ for hash, link in list(link_from_hash.items()):
+ text = text.replace(hash, link)
+ return text
+
+ def _unescape_special_chars(self, text):
+ # Swap back in all the special characters we've hidden.
+ for ch, hash in list(self._escape_table.items()):
+ text = text.replace(hash, ch)
+ return text
+
+ def _outdent(self, text):
+ # Remove one level of line-leading tabs or spaces
+ return self._outdent_re.sub('', text)
+
+
+class MarkdownWithExtras(Markdown):
+ """A markdowner class that enables most extras:
+
+ - footnotes
+ - code-color (only has effect if 'pygments' Python module on path)
+
+ These are not included:
+ - pyshell (specific to Python-related documenting)
+ - code-friendly (because it *disables* part of the syntax)
+ - link-patterns (because you need to specify some actual
+ link-patterns anyway)
+ """
+ extras = ["footnotes", "code-color"]
+
+
+#---- internal support functions
+
+class UnicodeWithAttrs(unicode):
+ """A subclass of unicode used for the return value of conversion to
+ possibly attach some attributes. E.g. the "toc_html" attribute when
+ the "toc" extra is used.
+ """
+ metadata = None
+ _toc = None
+ def toc_html(self):
+ """Return the HTML for the current TOC.
+
+ This expects the `_toc` attribute to have been set on this instance.
+ """
+ if self._toc is None:
+ return None
+
+ def indent():
+ return ' ' * (len(h_stack) - 1)
+ lines = []
+ h_stack = [0] # stack of header-level numbers
+ for level, id, name in self._toc:
+ if level > h_stack[-1]:
+ lines.append("%s" % indent())
+ h_stack.append(level)
+ elif level == h_stack[-1]:
+ lines[-1] += ""
+ else:
+ while level < h_stack[-1]:
+ h_stack.pop()
+ if not lines[-1].endswith(""):
+ lines[-1] += ""
+ lines.append("%s
" % indent())
+ lines.append('%s- %s' % (
+ indent(), id, name))
+ while len(h_stack) > 1:
+ h_stack.pop()
+ if not lines[-1].endswith("
"):
+ lines[-1] += ""
+ lines.append("%s
" % indent())
+ return '\n'.join(lines) + '\n'
+ toc_html = property(toc_html)
+
+## {{{ http://code.activestate.com/recipes/577257/ (r1)
+_slugify_strip_re = re.compile(r'[^\w\s-]')
+_slugify_hyphenate_re = re.compile(r'[-\s]+')
+def _slugify(value):
+ """
+ Normalizes string, converts to lowercase, removes non-alpha characters,
+ and converts spaces to hyphens.
+
+ From Django's "django/template/defaultfilters.py".
+ """
+ import unicodedata
+ value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode()
+ value = _slugify_strip_re.sub('', value).strip().lower()
+ return _slugify_hyphenate_re.sub('-', value)
+## end of http://code.activestate.com/recipes/577257/ }}}
+
+
+# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
+def _curry(*args, **kwargs):
+ function, args = args[0], args[1:]
+ def result(*rest, **kwrest):
+ combined = kwargs.copy()
+ combined.update(kwrest)
+ return function(*args + rest, **combined)
+ return result
+
+# Recipe: regex_from_encoded_pattern (1.0)
+def _regex_from_encoded_pattern(s):
+ """'foo' -> re.compile(re.escape('foo'))
+ '/foo/' -> re.compile('foo')
+ '/foo/i' -> re.compile('foo', re.I)
+ """
+ if s.startswith('/') and s.rfind('/') != 0:
+ # Parse it: /PATTERN/FLAGS
+ idx = s.rfind('/')
+ pattern, flags_str = s[1:idx], s[idx+1:]
+ flag_from_char = {
+ "i": re.IGNORECASE,
+ "l": re.LOCALE,
+ "s": re.DOTALL,
+ "m": re.MULTILINE,
+ "u": re.UNICODE,
+ }
+ flags = 0
+ for char in flags_str:
+ try:
+ flags |= flag_from_char[char]
+ except KeyError:
+ raise ValueError("unsupported regex flag: '%s' in '%s' "
+ "(must be one of '%s')"
+ % (char, s, ''.join(list(flag_from_char.keys()))))
+ return re.compile(s[1:idx], flags)
+ else: # not an encoded regex
+ return re.compile(re.escape(s))
+
+# Recipe: dedent (0.1.2)
+def _dedentlines(lines, tabsize=8, skip_first_line=False):
+ """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
+
+ "lines" is a list of lines to dedent.
+ "tabsize" is the tab width to use for indent width calculations.
+ "skip_first_line" is a boolean indicating if the first line should
+ be skipped for calculating the indent width and for dedenting.
+ This is sometimes useful for docstrings and similar.
+
+ Same as dedent() except operates on a sequence of lines. Note: the
+ lines list is modified **in-place**.
+ """
+ DEBUG = False
+ if DEBUG:
+ print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
+ % (tabsize, skip_first_line))
+ indents = []
+ margin = None
+ for i, line in enumerate(lines):
+ if i == 0 and skip_first_line: continue
+ indent = 0
+ for ch in line:
+ if ch == ' ':
+ indent += 1
+ elif ch == '\t':
+ indent += tabsize - (indent % tabsize)
+ elif ch in '\r\n':
+ continue # skip all-whitespace lines
+ else:
+ break
+ else:
+ continue # skip all-whitespace lines
+ if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
+ if margin is None:
+ margin = indent
+ else:
+ margin = min(margin, indent)
+ if DEBUG: print("dedent: margin=%r" % margin)
+
+ if margin is not None and margin > 0:
+ for i, line in enumerate(lines):
+ if i == 0 and skip_first_line: continue
+ removed = 0
+ for j, ch in enumerate(line):
+ if ch == ' ':
+ removed += 1
+ elif ch == '\t':
+ removed += tabsize - (removed % tabsize)
+ elif ch in '\r\n':
+ if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line)
+ lines[i] = lines[i][j:]
+ break
+ else:
+ raise ValueError("unexpected non-whitespace char %r in "
+ "line %r while removing %d-space margin"
+ % (ch, line, margin))
+ if DEBUG:
+ print("dedent: %r: %r -> removed %d/%d"\
+ % (line, ch, removed, margin))
+ if removed == margin:
+ lines[i] = lines[i][j+1:]
+ break
+ elif removed > margin:
+ lines[i] = ' '*(removed-margin) + lines[i][j+1:]
+ break
+ else:
+ if removed:
+ lines[i] = lines[i][removed:]
+ return lines
+
+def _dedent(text, tabsize=8, skip_first_line=False):
+ """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
+
+ "text" is the text to dedent.
+ "tabsize" is the tab width to use for indent width calculations.
+ "skip_first_line" is a boolean indicating if the first line should
+ be skipped for calculating the indent width and for dedenting.
+ This is sometimes useful for docstrings and similar.
+
+ textwrap.dedent(s), but don't expand tabs to spaces
+ """
+ lines = text.splitlines(1)
+ _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
+ return ''.join(lines)
+
+
+class _memoized(object):
+ """Decorator that caches a function's return value each time it is called.
+ If called later with the same arguments, the cached value is returned, and
+ not re-evaluated.
+
+ http://wiki.python.org/moin/PythonDecoratorLibrary
+ """
+ def __init__(self, func):
+ self.func = func
+ self.cache = {}
+ def __call__(self, *args):
+ try:
+ return self.cache[args]
+ except KeyError:
+ self.cache[args] = value = self.func(*args)
+ return value
+ except TypeError:
+ # uncachable -- for instance, passing a list as an argument.
+ # Better to not cache than to blow up entirely.
+ return self.func(*args)
+ def __repr__(self):
+ """Return the function's docstring."""
+ return self.func.__doc__
+
+
+def _xml_oneliner_re_from_tab_width(tab_width):
+ """Standalone XML processing instruction regex."""
+ return re.compile(r"""
+ (?:
+ (?<=\n\n) # Starting after a blank line
+ | # or
+ \A\n? # the beginning of the doc
+ )
+ ( # save in $1
+ [ ]{0,%d}
+ (?:
+ <\?\w+\b\s+.*?\?> # XML processing instruction
+ |
+ <\w+:\w+\b\s+.*?/> # namespaced single tag
+ )
+ [ \t]*
+ (?=\n{2,}|\Z) # followed by a blank line or end of document
+ )
+ """ % (tab_width - 1), re.X)
+_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
+
+def _hr_tag_re_from_tab_width(tab_width):
+ return re.compile(r"""
+ (?:
+ (?<=\n\n) # Starting after a blank line
+ | # or
+ \A\n? # the beginning of the doc
+ )
+ ( # save in \1
+ [ ]{0,%d}
+ <(hr) # start tag = \2
+ \b # word break
+ ([^<>])*? #
+ /?> # the matching end tag
+ [ \t]*
+ (?=\n{2,}|\Z) # followed by a blank line or end of document
+ )
+ """ % (tab_width - 1), re.X)
+_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
+
+
+def _xml_escape_attr(attr, skip_single_quote=True):
+ """Escape the given string for use in an HTML/XML tag attribute.
+
+ By default this doesn't bother with escaping `'` to `'`, presuming that
+ the tag attribute is surrounded by double quotes.
+ """
+ escaped = (attr
+ .replace('&', '&')
+ .replace('"', '"')
+ .replace('<', '<')
+ .replace('>', '>'))
+ if not skip_single_quote:
+ escaped = escaped.replace("'", "'")
+ return escaped
+
+
+def _xml_encode_email_char_at_random(ch):
+ r = random()
+ # Roughly 10% raw, 45% hex, 45% dec.
+ # '@' *must* be encoded. I [John Gruber] insist.
+ # Issue 26: '_' must be encoded.
+ if r > 0.9 and ch not in "@_":
+ return ch
+ elif r < 0.45:
+ # The [1:] is to drop leading '0': 0x63 -> x63
+ return '%s;' % hex(ord(ch))[1:]
+ else:
+ return '%s;' % ord(ch)
+
+
+
+#---- mainline
+
+class _NoReflowFormatter(optparse.IndentedHelpFormatter):
+ """An optparse formatter that does NOT reflow the description."""
+ def format_description(self, description):
+ return description or ""
+
+def _test():
+ import doctest
+ doctest.testmod()
+
+def main(argv=None):
+ if argv is None:
+ argv = sys.argv
+ if not logging.root.handlers:
+ logging.basicConfig()
+
+ usage = "usage: %prog [PATHS...]"
+ version = "%prog "+__version__
+ parser = optparse.OptionParser(prog="markdown2", usage=usage,
+ version=version, description=cmdln_desc,
+ formatter=_NoReflowFormatter())
+ parser.add_option("-v", "--verbose", dest="log_level",
+ action="store_const", const=logging.DEBUG,
+ help="more verbose output")
+ parser.add_option("--encoding",
+ help="specify encoding of text content")
+ parser.add_option("--html4tags", action="store_true", default=False,
+ help="use HTML 4 style for empty element tags")
+ parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
+ help="sanitize literal HTML: 'escape' escapes "
+ "HTML meta chars, 'replace' replaces with an "
+ "[HTML_REMOVED] note")
+ parser.add_option("-x", "--extras", action="append",
+ help="Turn on specific extra features (not part of "
+ "the core Markdown spec). See above.")
+ parser.add_option("--use-file-vars",
+ help="Look for and use Emacs-style 'markdown-extras' "
+ "file var to turn on extras. See "
+ "")
+ parser.add_option("--link-patterns-file",
+ help="path to a link pattern file")
+ parser.add_option("--self-test", action="store_true",
+ help="run internal self-tests (some doctests)")
+ parser.add_option("--compare", action="store_true",
+ help="run against Markdown.pl as well (for testing)")
+ parser.set_defaults(log_level=logging.INFO, compare=False,
+ encoding="utf-8", safe_mode=None, use_file_vars=False)
+ opts, paths = parser.parse_args()
+ log.setLevel(opts.log_level)
+
+ if opts.self_test:
+ return _test()
+
+ if opts.extras:
+ extras = {}
+ for s in opts.extras:
+ splitter = re.compile("[,;: ]+")
+ for e in splitter.split(s):
+ if '=' in e:
+ ename, earg = e.split('=', 1)
+ try:
+ earg = int(earg)
+ except ValueError:
+ pass
+ else:
+ ename, earg = e, None
+ extras[ename] = earg
+ else:
+ extras = None
+
+ if opts.link_patterns_file:
+ link_patterns = []
+ f = open(opts.link_patterns_file)
+ try:
+ for i, line in enumerate(f.readlines()):
+ if not line.strip(): continue
+ if line.lstrip().startswith("#"): continue
+ try:
+ pat, href = line.rstrip().rsplit(None, 1)
+ except ValueError:
+ raise MarkdownError("%s:%d: invalid link pattern line: %r"
+ % (opts.link_patterns_file, i+1, line))
+ link_patterns.append(
+ (_regex_from_encoded_pattern(pat), href))
+ finally:
+ f.close()
+ else:
+ link_patterns = None
+
+ from os.path import join, dirname, abspath, exists
+ markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
+ "Markdown.pl")
+ if not paths:
+ paths = ['-']
+ for path in paths:
+ if path == '-':
+ text = sys.stdin.read()
+ else:
+ fp = codecs.open(path, 'r', opts.encoding)
+ text = fp.read()
+ fp.close()
+ if opts.compare:
+ from subprocess import Popen, PIPE
+ print("==== Markdown.pl ====")
+ p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
+ p.stdin.write(text.encode('utf-8'))
+ p.stdin.close()
+ perl_html = p.stdout.read().decode('utf-8')
+ if py3:
+ sys.stdout.write(perl_html)
+ else:
+ sys.stdout.write(perl_html.encode(
+ sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
+ print("==== markdown2.py ====")
+ html = markdown(text,
+ html4tags=opts.html4tags,
+ safe_mode=opts.safe_mode,
+ extras=extras, link_patterns=link_patterns,
+ use_file_vars=opts.use_file_vars)
+ if py3:
+ sys.stdout.write(html)
+ else:
+ sys.stdout.write(html.encode(
+ sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
+ if extras and "toc" in extras:
+ log.debug("toc_html: " +
+ html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
+ if opts.compare:
+ test_dir = join(dirname(dirname(abspath(__file__))), "test")
+ if exists(join(test_dir, "test_markdown2.py")):
+ sys.path.insert(0, test_dir)
+ from test_markdown2 import norm_html_from_html
+ norm_html = norm_html_from_html(html)
+ norm_perl_html = norm_html_from_html(perl_html)
+ else:
+ norm_html = html
+ norm_perl_html = perl_html
+ print("==== match? %r ====" % (norm_perl_html == norm_html))
+
+
+if __name__ == "__main__":
+ sys.exit( main(sys.argv) )
diff --git a/vendor/tornado_ses.py b/vendor/tornado_ses.py
new file mode 100644
index 0000000..7ab163b
--- /dev/null
+++ b/vendor/tornado_ses.py
@@ -0,0 +1,24 @@
+import urllib
+
+import tornado.httpclient as httpclient
+
+from amazon_ses import AmazonSES
+
+
+class EmailHandler(AmazonSES):
+ def send(self, from_email, user_email, user_msg):
+ AmazonSES.__init__(self, self.settings["AmazonAccessKeyID"], self.settings["AmazonSecretAccessKey"])
+ self.sendEmail(from_email, user_email, user_msg)
+
+ def handle_email(self, response):
+ pass
+
+ def _performAction(self, actionName, params=None):
+ if not params:
+ params = {}
+ params['Action'] = actionName
+ params = urllib.urlencode(params)
+
+ conn = httpclient.AsyncHTTPClient()
+ req = httpclient.HTTPRequest("https://email.us-east-1.amazonaws.com/", "POST", self._getHeaders(), params)
+ conn.fetch(req, self.handle_email)
diff --git a/web.py b/web.py
index 50a242a..c1983fb 100755
--- a/web.py
+++ b/web.py
@@ -3,6 +3,11 @@
import logging
import cgi
+sys.path.append(os.path.dirname(os.path.abspath(__file__)))
+sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/vendor')
+
+os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
#import wsgiref.simple_server
#import wsgiref.handlers
#import tornado.wsgi
@@ -12,6 +17,7 @@
import tornado.template
import tornado.database
import tornado.auth
+import tornado.locale
from setting import settings
from setting import conn
@@ -23,16 +29,22 @@
handlers = [
(r"/", main.FeedHandler),
(r"/item", main.ItemHandler),
+ (r"/edit_item", main.EditItemHandler),
+ (r"/submit", main.SubmitHandler),
+ (r"/comment", main.CommentHandler),
+ (r"/edit_comment", main.EditCommentHandler),
+ (r"/verify_email", main.VerifyEmailHandler),
+ (r"/invite", main.InviteHandler),
- (r"/signup", main.SignupHandler),
+ (r"/setting", main.SettingHandler),
(r"/login", main.LoginHandler),
(r"/logout", main.LogoutHandler),
(r"/api/login", api.LoginAPIHandler),
(r"/api/signup", api.SignupAPIHandler),
(r"/api/user_info", api.UserInfoAPIHandler),
- (r"/api/get_news_feed", api.NewsFeedAPIHandler),
- (r"/api/get_news_item", api.NewsItemAPIHandler),
+ (r"/api/get_feed", api.FeedAPIHandler),
+ (r"/api/get_item", api.ItemAPIHandler),
(r"/api/profile_img", api.ProfileImgAPIHandler),
(r"/api/like", api.LikeAPIHandler),
@@ -52,6 +64,8 @@
if __name__ == "__main__":
+ tornado.locale.load_translations(os.path.join(os.path.dirname(__file__), "csv_translations"))
+ tornado.locale.set_default_locale("zh_CN")
tornado.options.define("port", default=8000, help="Run server on a specific port", type=int)
tornado.options.parse_command_line()
application = tornado.web.Application(handlers, **settings)
{{ content }}
-