2019. 9. 22.

[python] 현재 사이트에서 랜덤으로 외부 사이트 링크 타고 가기

현재 사이트에서 a href 목록을 수집하여 외부사이트를 선별하여 링크를 출력한다.

from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
from urllib.parse import urlparse
import random
import datetime

pages = set()
random.seed(datetime.datetime.now())

def internalLinks(bsObj, host) :
internalLink = []
#/로 시작하거나 중간에 루트 도메인 주소가 포함된 링크를 찾는다.
for link in bsObj.findAll("a", href=re.compile("(^/|.*"+host+")")) :
if link.attrs["href"] is not None :
if link.attrs["href"] not in internalLink :
if link.attrs["href"].startswith("/") :
internalLink.append(host+link.attrs["href"])
else :
internalLink.append(link.attrs["href"])
return internalLink

def externalLinks(bsObj, host) :
externalLink = []
for link in bsObj.findAll("a", {"href":re.compile("^(http|www)((?!"+host+").)*$")}) :
if link.attrs["href"] is not None :
externalLink.append(link.attrs["href"])
return externalLink


def getRandomExternalLink(firstUrl) :
html = urlopen(firstUrl)
bsObj = BeautifulSoup(html.read(), "html.parser")
#print(urlparse(firstUrl).netloc)
externalLink1 = externalLinks(bsObj, urlparse(firstUrl).netloc)
if len(externalLink1) == 0 :
domain = urlparse(firstUrl).scheme+"://"+urlparse(firstUrl).netloc
internalLink1 = internalLinks(bsObj, domain)
return externalLinks(random.randint(0, len(internalLink1)-1))
else :
return externalLink1[random.randint(0, len(externalLink1)-1)]

def followExternalOnly(firstUrl) :
choiceOne = getRandomExternalLink(firstUrl)
print("******** random externalLink is",choiceOne)
followExternalOnly(choiceOne)
followExternalOnly("https://www.oreilly.com")


#test("www.naver.com")
#>>> a = urllib.request.urlopen(b'http://www.google.com'.decode('ASCII')).read()

Popular Posts

Recent Posts

Powered by Blogger.