Hello, I’m Yue Chuang. This time, I’m going to show you 50 lines of code that generates a sketch. Make yourself a sketch master, too. So without further ado, let’s see the effect first.
On the right of the image above is our effect, so what are the steps?
1. Process analysis
The above process is very simple, so let’s look at the implementation.
2. Concrete implementation
Libraries required for installation:
pip install opencv-python
Copy the code
Import the required libraries:
import cv2
Copy the code
Writing the body code is also very simple, as follows:
import cv2
SRC = 'images/image_1.jpg'
image_rgb = cv2.imread(SRC)
image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)
image_blur = cv2.GaussianBlur(image_gray, ksize=(21.21), sigmaX=0, sigmaY=0)
image_blend = cv2.divide(image_gray, image_blur, scale=255)
cv2.imwrite('result.jpg', image_blend)
Copy the code
The above code is actually not difficult, then in order to make friends can better understand, I write the following code:
""" project = 'Code', file_name = 'study.py', author = 'AI Yue Hua 'time = '2020/5/19 8:35', product_name = PyCharm, AI Yue Chuang code is far away from bugs with the God animal protecting I love animals. They taste delicious. """
import cv2
# Original path
SRC = 'images/image_1.jpg'
# Read image
image_rgb = cv2.imread(SRC)
# cv2.imshow(' RGB ', image_rgb
# cv2.waitKey(0)
# exit()
image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)
# cv2.imshow('gray', image_gray
# cv2.waitKey(0)
# exit()
image_bulr = cv2.GaussianBlur(image_gray, ksize=(21.21), sigmaX=0, sigmaY=0)
cv2.imshow('image_blur', image_bulr) # Gaussian blur
cv2.waitKey(0)
exit()
Divide: Extract two different lines and content
image_blend = cv2.divide(image_gray, image_bulr, scale=255)
# cv2.imshow('image_blend', image_blend
cv2.waitKey(0)
# cv2.imwrite('result1.jpg', image_blend)
Copy the code
The code above, we are in the original basis on the addition of some real-time display of the code, to facilitate students to understand.
In fact, some of you will ask, can’t I use software to directly generate the tracing?
What are the benefits of the program?
The advantage of the program is that if you have a lot of pictures, this time to use the program batch generation is also very convenient and efficient.
So we finished, turned the little sister’s picture into a sketch.
3. Baidu Picture crawler + generative tracing
However, this is not our massive pictures, in order to achieve massive this word, I wrote a Baidu picture crawler, but this article is not to teach how to write crawler code, here I directly released crawler code, and software engineering specifications:
# Crawler.Spider.py
import re
import os
import time
import collections
from collections import namedtuple
import requests
from concurrent import futures
from tqdm import tqdm
from enum import Enum
BASE_URL = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord={keyword}&cl=2&lm=- 1 & ie = utf-8 & oe = utf-8 & adpicid = & st = 1 & z = & IC = & hd = & latest = = & © right word = {keyword} & s = & se = & TAB = & width = & height = & face = 0 & istype & q = 2 c=&nc=1&fr=&expermode=&force=&pn={page}&rn=30&gsm=&1568638554041='
HEADERS = {
'Referer': 'http://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fr=&sf=1&fmq=1567133149621_R&pv=& IC = 0 & nc = 1 & z = 0 & hd = 0 & latest = = 0 0 © right & se = 1 & showtab = 0 & fb = 0 & width = & height = & face = 0 & istype = 2 & ie = utf-8 & sid = & E7 A3 E5 word = % % % 81% % B A%B8'.'User-Agent': 'the Mozilla / 5.0 (Windows NT 10.0; Win64; X64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari.'X-Requested-With': 'XMLHttpRequest',}class BaiDuSpider:
def __init__(self, max_works, images_type) :
self.max_works = max_works
self.HTTPStatus = Enum('Status'['ok'.'not_found'.'error'])
self.result = namedtuple('Result'.'status data')
self.session = requests.session()
self.img_type = images_type
self.img_num = None
self.headers = HEADERS
self.index = 1
def get_img(self, img_url) :
res = self.session.get(img_url)
ifres.status_code ! =200:
res.raise_for_status()
return res.content
def download_one(self, img_url, verbose) :
try:
image = self.get_img(img_url)
except requests.exceptions.HTTPError as e:
res = e.response
if res.status_code == 404:
status = self.HTTPStatus.not_found
msg = 'not_found'
else:
raise
else:
self.save_img(self.img_type, image)
status = self.HTTPStatus.ok
msg = 'ok'
if verbose:
print(img_url, msg)
return self.result(status, msg)
def get_img_url(self) :
urls = [BASE_URL.format(keyword=self.img_type, page=page) for page in self.img_num]
for url in urls:
res = self.session.get(url, headers=self.headers)
if res.status_code == 200:
img_list = re.findall(r'"thumbURL":"(.*?) "', res.text)
# return the address of the image to work with other functions
yield {img_url for img_url in img_list}
elif res.status_code == 404:
print('----- access failed, resource ----- not found ')
yield None
elif res.status_code == 403:
print('***** access failed, server denied access to *****')
yield None
else:
print('>>> Network connection failed <<<')
yield None
def download_many(self, img_url_set, verbose=False) :
if img_url_set:
counter = collections.Counter()
with futures.ThreadPoolExecutor(self.max_works) as executor:
to_do_map = {}
for img in img_url_set:
future = executor.submit(self.download_one, img, verbose)
to_do_map[future] = img
done_iter = futures.as_completed(to_do_map)
if not verbose:
done_iter = tqdm(done_iter, total=len(img_url_set))
for future in done_iter:
try:
res = future.result()
except requests.exceptions.HTTPError as e:
error_msg = 'HTTP error {res.status_code} - {res.reason}'
error_msg = error_msg.format(res=e.response)
except requests.exceptions.ConnectionError:
error_msg = 'ConnectionError error'
else:
error_msg = ' '
status = res.status
if error_msg:
status = self.HTTPStatus.error
counter[status] += 1
if verbose and error_msg:
img = to_do_map[future]
print('***Error for {} : {}'.format(img, error_msg))
return counter
else:
pass
def save_img(self, img_type, image) :
with open('{}/{}.jpg'.format(img_type, self.index), 'wb') as f:
f.write(image)
self.index += 1
def what_want2download(self) :
Img_type = input(' Please input the type of image you want to download, anything is ok ~ >>> ')
try:
os.mkdir(self.img_type)
except FileExistsError:
pass
img_num = input('Please enter the number of copies to download (1 digit means 30 copies, 2 means 60 copies) : >>>')
while True:
if img_num.isdigit():
img_num = int(img_num) * 30
self.img_num = range(30, img_num + 1.30)
break
else:
img_num = input('Input error, please re-enter the number of downloads >>>')
def main(self) :
# Get image type and number of downloads
total_counter = {}
self.what_want2download()
for img_url_set in self.get_img_url():
if img_url_set:
counter = self.download_many(img_url_set, False)
for key in counter:
if key in total_counter:
total_counter[key] += counter[key]
else:
total_counter[key] = counter[key]
else:
Error reporting can be added to it
pass
time.sleep(. 5)
return total_counter
if __name__ == '__main__':
max_works = 20
bd_spider = BaiDuSpider(max_works)
print(bd_spider.main())
Copy the code
# Sketch_the_generated_code.py
import cv2
def drawing(src, id=None) :
image_rgb = cv2.imread(src)
image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)
image_blur = cv2.GaussianBlur(image_gray, ksize=(21.21), sigmaX=0, sigmaY=0)
image_blend = cv2.divide(image_gray, image_blur, scale=255)
cv2.imwrite(f'Drawing_images/result-{id}.jpg', image_blend)
Copy the code
# image_list.image_list_path.py
import os
from natsort import natsorted
IMAGES_LIST = []
def image_list(path) :
global IMAGES_LIST
for root, dirs, files in os.walk(path):
Sort by file name
# files.sort()
files = natsorted(files)
# Traverse all files
for file in files:
# if the suffix is.jpg
if os.path.splitext(file)[1] = ='.jpg':
# Splice to complete path
# print(file)
filePath = os.path.join(root, file)
print(filePath)
# Add to array
IMAGES_LIST.append(filePath)
return IMAGES_LIST
Copy the code
# main.py
import time
from Sketch_the_generated_code import drawing
from Crawler.Spider import BaiDuSpider
from image_list.image_list_path import image_list
import os
MAX_WORDS = 20
if __name__ == '__main__':
# now_path = os.getcwd()
# img_type = 'ai'
img_type = input('Please enter the type of image you want to download, anything is ok ~ >>>')
bd_spider = BaiDuSpider(MAX_WORDS, img_type)
print(bd_spider.main())
time.sleep(10) # Set the sleep time here, so that there is enough time to add, so read, remove or too short will report an error, so
for index, path in enumerate(image_list(img_type)):
drawing(src = path, id = index)
Copy the code
So the final directory structure looks like this:
C:.
│ main.py
│ Sketch_the_generated_code.py│ ├ ─Crawler
│ │ Spider.py│ │ │ └ ─__pycache__
│ Spider.cpython37.pyc│ ├ ─drawing
│ │ result.jpg
│ │ result1.jpg
│ │ Sketch_the_generated_code.py
│ │ study.py│ │ │ ├ ─images
│ │ image_1.jpg│ │ │ └ ─__pycache__
│ Sketch_the_generated_code.cpython37.pyc│ ├ ─Drawing_images├ ─image_list
│ │ image_list_path.py│ │ │ └ ─__pycache__
│ image_list_path.cpython37.pyc│ └ ─__pycache__
Sketch_the_generated_code.cpython37.pyc
Copy the code
At this point, all the code is complete.