forked from realpython/python-scripts
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path08_basic_email_web_crawler.py
More file actions
45 lines (31 loc) · 892 Bytes
/
08_basic_email_web_crawler.py
File metadata and controls
45 lines (31 loc) · 892 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import requests
import re
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
# regex
email_re = re.compile(r'([\w\.,]+@[\w\.,]+\.\w+)')
link_re = re.compile(r'href="(.*?)"')
def crawl(url):
result = set()
req = requests.get(url)
# Check if successful
if(req.status_code != 200):
return []
# Find links
links = link_re.findall(req.text)
print("\nFound {} links".format(len(links)))
# Search links for emails
for link in links:
# Get an absolute URL for a link
link = urljoin(url, link)
# Find all emails on current page
result.update(email_re.findall(req.text))
return result
if __name__ == '__main__':
emails = crawl('http://www.realpython.com')
print("\nScrapped e-mail addresses:")
for email in emails:
print(email)
print("\n")