I wanted to collect images for deep learning, so I ended up with this article. It didn't work because the contents of the web page changed, so I rewrote it.
image_download.py
import requests
import urllib.request
import time
import json
def scraping(url, max_page_num):
    #Pagination implementation
    page_list = get_page_list(url, max_page_num)
    #Get image URL list
    all_img_src_list = []
    for page in page_list:
        try:
            img_src_list = get_img_src_list(page)
            all_img_src_list.extend(img_src_list)
        except:pass
    return all_img_src_list
def get_img_src_list(url):
    #Access the search results page
    response = requests.get(url)
    webtext = response.text
    #In the original article, I used Beatiful soup, but I couldn't get the image, so I changed it.
    start_word='<script>__NEXT_DATA__ = '
    start_num = webtext.find(start_word)
    webtext_start = webtext[start_num + len(start_word):]
    end_word = ';__NEXT_LOADED_PAGES__='
    
    end_num = webtext_start.find(end_word)
    webtext_all = webtext_start[:end_num]
    web_dic = json.loads(webtext_all)
    img_src_list = [img['imageSrc'] for img in web_dic["props"]["initialProps"]["pageProps"]["algos"]]
    return img_src_list
def get_page_list(url, max_page_num):
    img_num_per_page = 20 #If you change this, the number of downloads will change.
    page_list = [f'{url}{i*img_num_per_page+1}' for i in range(max_page_num)]
    return page_list
def download_img(src, dist_path):
    time.sleep(1)
    try:
        with urllib.request.urlopen(src) as data:
            img = data.read()
            with open(dist_path, 'wb') as f:
                f.write(img)
    except:
        pass
def main():
    search_words = ["Kanna Hashimoto"] #Pass the word you want to search in a list.
    for num, search_word in enumerate(search_words):
        url = f"https://search.yahoo.co.jp/image/search?p={search_word}&ei=UTF-8&b="
        max_page_num = 20
        all_img_src_list = scraping(url, max_page_num)
        
        #Image download
        for i, src in enumerate(all_img_src_list):
            download_img(src, f'./img/image_{num}_{i}.jpg') #Please change the save destination appropriately
if __name__ == '__main__':
    main()
If you create an img folder and execute the above with python, the image will be saved in the img folder.
This is the image.

Be careful because scraping puts a load on the other server!
I tried to automatically collect images of Kanna Hashimoto with Python! !!
Recommended Posts