2022년 1월 19일 수요일

[Python] ToonKor Season 2 All

 ToonKor Season 2 All

import bs4, codecs
import requests
import base64
import os
import io

quit_flag = False

import signal
import sys

def signal_handler(sig, frame):
    quit_flag = True
    print('You pressed Ctrl+C!', quit_flag)    
    #sys.exit(0)

signal.signal(signal.SIGINT, signal_handler)
#print('Press Ctrl+C')
#signal.pause()


target_folder = r"D:/Temp6"

requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
image_ext = None
request_headers = {
    'User-Agent': ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 '
                   '(KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'),
}

def safeFileName(filename):
    filename = filename.replace(":","_")
    filename = filename.replace("?","_")
    filename = filename.replace("/","_")
    filename = filename.replace("*","_")
    filename = filename.replace("<","_")
    filename = filename.replace(">","_")
    filename = filename.replace("\t","_")
    return filename.strip()
    
def getFile(url):
    with codecs.open(url,'r', encoding='utf8') as f:
        html = f.read()
    return bs4.BeautifulSoup(html, 'html.parser')
 
def getUrl(url, headers={}, params=()):
    resp = requests.get(url, verify=False, headers=headers, params=params)
    #resp.headers
    #html = resp.content.decode('utf8')
    html = resp.text
    return bs4.BeautifulSoup(html, 'html.parser')
 
def getUrlHtml(url, headers={}, params=()):
    resp = requests.get(url, verify=False, headers=headers, params=params)
    return bs4.BeautifulSoup(resp.text, 'html.parser'), resp.content.decode('utf8')
 
def urlToFile(url, file_name):
    resp = requests.get(url, verify=False, headers=request_headers, params=())
    with open(file_name, "wb") as f:
        f.write(resp.content)
 
def extractTag(bs,tag):
    [s.extract() for s in bs(tag)]
 
def getToonKor( comicsUrl, baseUrl, baseDir):
    while True:
        try:
            doc = getUrl(comicsUrl)  
            table = doc.select("table.bt_view2")[0]
            elist = table.select("td.bt_title")
            title = elist[0].text
            break
        except:
            print( comicsUrl, "-> retry")
            if quit_flag: return
            continue
 
    table = doc.select("table.web_list")[0]
    elist = table.select("td.content__title")
 
    new_dir = os.path.join(baseDir, safeFileName(title))

    if not os.path.isdir(new_dir): os.mkdir(new_dir)
    else: return
    
    count = 0
    for e in elist:
        count += 1
        url = baseUrl + e['data-role']
        title = e['alt']
        while True:
            try:
                bs_img, html_img = getUrlHtml(url, request_headers)
                begin = html_img.index("var tnimg = '")
                break
            except:
                print( url, "-> retry")
                if quit_flag: return
                continue
        end = html_img.index("';",begin)
        data = html_img[begin + 13: end]
        img_list = base64.b64decode(data.encode("UTF-8")).decode("UTF-8")
        doc_imgs = bs4.BeautifulSoup(img_list, 'html.parser')
        imgs = doc_imgs.select("img")
        
        #sub_dir = os.path.join(new_dir, title.replace(":","_"))
        #if not os.path.isdir(sub_dir): os.mkdir(sub_dir)
 
        html_file = os.path.join(new_dir, safeFileName(title) + ".html")
        
        if os.path.isfile(html_file): print(html_file, "-> exists"); continue
        print( len(elist), count, html_file)
        
        f = open( html_file, "w" )
        f.write('<meta name="referrer" content="no-referrer" /><br>\n')
        k = 1;
        for img in imgs:
            img_url = img.get('src')
            if not img_url: continue
            if image_ext == None or img_url.endswith(image_ext):
                if( not img_url.startswith("http") ):
                    img_url = baseUrl + img_url
                #file_name = "img_%04d.jpg" % k
                #urlToFile( img_url, os.path.join( sub_dir, file_name) )
                #print( img_url + " -> " + file_name )
                #print( img_url  )
                f.write('<img src="' + img_url + '" /><br>\n')
                k = k + 1
        f.close()

def saveToonKorComics():
    urls = [ 
        "https://tkr035.com/webtoon/1061",
        ]
    iurl = "https://tkr035.com"
    bdir = "D:/Temp2/"
    for url in urls:
        getToonKor(url, iurl, bdir)
        if quit_flag: break
    print("END")

def getToonKorList(list_url,start=0):
    doc = getUrl(list_url)
    lists = doc.select("div.section-item-inner")
    #print(lists)
    i = 0
    for l in lists:
        i += 1
        if i < start: continue
        comics = l.select("a")[0]
        print(i, len(lists), comics['alt'], comics['href'])
        getToonKor(comics['href'], "https://tkr035.com", target_folder)
        if quit_flag: break

def get_finished_webtoons():
    getToonKorList("https://tkr035.com/wt/%EC%99%84%EA%B2%B0", 36)
    getToonKorList("https://tkr035.com/wt/%EC%99%84%EA%B2%B0?gbun=&wpage=&page=2", 0)
    getToonKorList("https://tkr035.com/wt/%EC%99%84%EA%B2%B0?gbun=&wpage=&page=3", 0)
    getToonKorList("https://tkr035.com/wt/%EC%99%84%EA%B2%B0?gbun=&wpage=&page=4", 130)
    getToonKorList("https://tkr035.com/wt/%EC%99%84%EA%B2%B0?gbun=&wpage=&page=5", 0)
    getToonKorList("https://tkr035.com/wt/%EC%99%84%EA%B2%B0?gbun=&wpage=&page=6", 195)
    getToonKorList("https://tkr035.com/wt/%EC%99%84%EA%B2%B0?gbun=&wpage=&page=7", 0)
    getToonKorList("https://tkr035.com/wt/%EC%99%84%EA%B2%B0?gbun=&wpage=&page=8", 0)
    getToonKorList("https://tkr035.com/wt/%EC%99%84%EA%B2%B0?gbun=&wpage=&page=9", 0)
    getToonKorList("https://tkr035.com/wt/%EC%99%84%EA%B2%B0?gbun=&wpage=&page=10", 0)
    getToonKorList("https://tkr035.com/wt/%EC%99%84%EA%B2%B0?gbun=&wpage=&page=11", 0)
    getToonKorList("https://tkr035.com/wt/%EC%99%84%EA%B2%B0?gbun=&wpage=&page=12", 0)
    getToonKorList("https://tkr035.com/wt/%EC%99%84%EA%B2%B0?gbun=&wpage=&page=13", 0)

def get_continue_webtoons():
    getToonKorList("https://tkr035.com/wt/%EC%B5%9C%EC%8B%A0/0/all/%EC%9D%B8%EA%B8%B0//%EC%A0%84%EC%B2%B4", 0)

def get_week_webtoons(week,page,start=0):
    global target_folder
    target_folder = r"D:/Temp7/" + str(week)
    url = "https://tkr035.com/wt/%EC%97%B0%EC%9E%AC%EC%A4%91/"
    url += str(week)
    url += "/all/%EC%9D%B8%EA%B8%B0/%EC%A0%84%EC%B2%B4?gbun=&wpage=&page="
    url += str(page)
    getToonKorList(url,start)
    
def get_week_webtoons_all():
    #for i in range(3): get_week_webtoons(1,i+1)
    #get_week_webtoons(1,2,150)
    #get_week_webtoons(1,3)
    for i in range(3): get_week_webtoons(2,i+1)
    for i in range(3): get_week_webtoons(3,i+1)
    for i in range(3): get_week_webtoons(4,i+1)
    for i in range(3): get_week_webtoons(5,i+1)
    for i in range(3): get_week_webtoons(6,i+1)
    for i in range(3): get_week_webtoons(7,i+1)
    for i in range(1): get_week_webtoons(8,i+1)
        
    '''
    global target_folder
    target_folder = r"D:/Temp7/1"
    getToonKorList("https://tkr035.com/wt/%EC%97%B0%EC%9E%AC%EC%A4%91/1/all/%EC%9D%B8%EA%B8%B0/%EC%A0%84%EC%B2%B4")
    getToonKorList("https://tkr035.com/wt/%EC%97%B0%EC%9E%AC%EC%A4%91/1/all/%EC%9D%B8%EA%B8%B0/%EC%A0%84%EC%B2%B4?gbun=&wpage=&page=2")
    getToonKorList("https://tkr035.com/wt/%EC%97%B0%EC%9E%AC%EC%A4%91/1/all/%EC%9D%B8%EA%B8%B0/%EC%A0%84%EC%B2%B4?gbun=&wpage=&page=3")
    '''

if __name__ == "__main__":
    #get_continue_webtoons()
    get_week_webtoons_all()
    
    

[Python] ToonKor Season 2

 ToonKor Season 2

import bs4, codecs
import requests
import base64
import os
import io
import sys
 
requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
image_ext = None
request_headers = {
    'User-Agent': ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 '
                   '(KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'),
}

def safeFileName(filename):
    return filename.replace(":","_").replace("?","_").replace("<","_").replace(">","_").strip()

def getFile(url):
    with codecs.open(url,'r', encoding='utf8') as f:
        html = f.read()
    return bs4.BeautifulSoup(html, 'html.parser')
 
def getUrl(url, headers={}, params=()):
    resp = requests.get(url, verify=False, headers=headers, params=params)
    #resp.headers
    #html = resp.content.decode('utf8')
    html = resp.text
    return bs4.BeautifulSoup(html, 'html.parser')
 
def getUrlHtml(url, headers={}, params=()):
    resp = requests.get(url, verify=False, headers=headers, params=params)
    return bs4.BeautifulSoup(resp.text, 'html.parser'), resp.content.decode('utf8')
 
def urlToFile(url, file_name):
    resp = requests.get(url, verify=False, headers=request_headers, params=())
    with open(file_name, "wb") as f:
        f.write(resp.content)
 
def extractTag(bs,tag):
    [s.extract() for s in bs(tag)]
 
def getToonKor( comicsUrl, baseUrl, baseDir):
    while True:
        try:
            doc = getUrl(comicsUrl)
            table = doc.select("table.bt_view2")[0]
            elist = table.select("td.bt_title")
            title = elist[0].text
            break
        except:
            print(comicsUrl, "-> retry")
            continue
 
    table = doc.select("table.web_list")[0]
    elist = table.select("td.content__title")
 
    new_dir = os.path.join(baseDir, safeFileName(title))
    if not os.path.isdir(new_dir): os.mkdir(new_dir)
 
    for e in elist:
        url = baseUrl + e['data-role']
        title = e['alt']
        while True:
            try:
                bs_img, html_img = getUrlHtml(url, request_headers)
                begin = html_img.index("var tnimg = '")
                break
            except:
                print( url, "-> retry")
                continue
        end = html_img.index("';",begin)
        data = html_img[begin + 13: end]
        img_list = base64.b64decode(data.encode("UTF-8")).decode("UTF-8")
        doc_imgs = bs4.BeautifulSoup(img_list, 'html.parser')
        imgs = doc_imgs.select("img")
        
        #sub_dir = os.path.join(new_dir, title.replace(":","_"))
        #if not os.path.isdir(sub_dir): os.mkdir(sub_dir)
 
        html_file = os.path.join(new_dir, safeFileName(title) + ".html")
        print(html_file)
        f = open( html_file, "w" )
        f.write('<meta name="referrer" content="no-referrer" /><br>\n')
        k = 1;
        for img in imgs:
            img_url = img.get('src')
            if not img_url: continue
            if image_ext == None or img_url.endswith(image_ext):
                if( not img_url.startswith("http") ):
                    img_url = baseUrl + img_url
                #file_name = "img_%04d.jpg" % k
                #urlToFile( img_url, os.path.join( sub_dir, file_name) )
                #print( img_url + " -> " + file_name )
                print( img_url  )
                f.write('<img src="' + img_url + '" /><br>\n')
                k = k + 1
        f.close()
        
if __name__ == "__main__":
    urls = []
    if len(sys.argv) > 1:
        for i in range(1,len(sys.argv)):
            urls.append(sys.argv[i])
    else:
        urls.append(
            #"https://tkr035.com/webtoon/2939" #사내맞선
            #"https://tkr035.com/webtoon/826" #황제의 외동딸
            #"https://tkr035.com/webtoon/2794" #그만바의 자취방
            #"https://tkr035.com/webtoon/2647" #첩
            "https://tkr035.com/webtoon/6117" #외모지상주의
        )
    iurl = "https://tkr035.com"
    bdir = "D:/Temp2/"
    for url in urls:
        getToonKor(url, iurl, bdir)
    print("END")

[python] ToonKor Season 1

 ToonKor Season 1

import bs4, codecs
import requests
import base64
import os
import io
import sys
import re

requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
image_ext = None
request_headers = {
    'User-Agent': ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 '
                   '(KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'),
}

def safeFileName(filename):
    return re.sub("[:?/*<>\t.]", "_", filename ).strip()
    
def getFile(url):
    with codecs.open(url,'r', encoding='utf8') as f:
        html = f.read()
    return bs4.BeautifulSoup(html, 'html.parser')

def getUrl(url, headers={}, params=()):
    resp = requests.get(url, verify=False, headers=headers, params=params)
    #resp.headers
    #html = resp.content.decode('utf8')
    html = resp.text
    return bs4.BeautifulSoup(html, 'html.parser')

def getUrlHtml(url, headers={}, params=()):
    resp = requests.get(url, verify=False, headers=headers, params=params)
    return bs4.BeautifulSoup(resp.text, 'html.parser'), resp.content.decode('utf8')

def urlToFile(url, file_name):
    resp = requests.get(url, verify=False, headers=request_headers, params=())
    with open(file_name, "wb") as f:
        f.write(resp.content)

def extractTag(bs,tag):
    [s.extract() for s in bs(tag)]

def getToonKor( comicsUrl, baseUrl, baseDir):
    doc = getUrl(comicsUrl)
    title = doc.find("title").text
    table = doc.select("table.web_list")[0]
    elist = table.select("td.episode__index")

    #print( doc, title, table, elist )
    
    new_dir = os.path.join(baseDir, safeFileName(title))
    if not os.path.isdir(new_dir): os.mkdir(new_dir)

    for e in elist:
        url = baseUrl + e['data-role']
        bs_img, html_img = getUrlHtml(url, request_headers)
        title = bs_img.find("title").text
        begin = html_img.index("var toon_img = '")
        end = html_img.index("';",begin)
        data = html_img[begin + 16: end]
        img_list = base64.b64decode(data.encode("UTF-8")).decode("UTF-8")
        doc_imgs = bs4.BeautifulSoup(img_list, 'html.parser')
        imgs = doc_imgs.select("img")
        
        sub_dir = os.path.join(new_dir, safeFileName(title))
        if not os.path.isdir(sub_dir): os.mkdir(sub_dir)
        else: print( 'skip -->', sub_dir ); continue
        print(sub_dir)

        k = 1;
        for img in imgs:
            img_url = img.get('src')
            #print(img_url)
            if not img_url: continue
            if image_ext == None or img_url.endswith(image_ext):
                if( not img_url.startswith("http") ):
                    img_url = baseUrl + img_url
                ext = img_url.rfind(".")
                if ext >= 0: file_name = ("img_%04d" % k) + img_url[ext:]
                else: file_name = "img_%04d.jpg" % k
                urlToFile( img_url, os.path.join( sub_dir, file_name) )
                print( img_url + " -> " + file_name )
                k = k + 1

if __name__ == "__main__":
    #https://tkor.fish/%EC%9B%B9%ED%88%B0
    url = "https://tkor.fish/%EB%82%A8%EC%B9%9C-%EC%97%86%EC%9D%8C-%EB%82%A8%EC%B9%9C-%EC%9E%88%EC%9D%8C"
    baseUrl = "https://tkor.fish"
    outDir = "D:/Temp2/"
    if len(sys.argv) > 1:
        url = sys.argv[1]    
        baseUrl = url[:url.find('/',8)]
    if len(sys.argv) > 2:
        outDir = sys.argv[2]
    getToonKor(url, baseUrl, outDir)
    
        

2022년 1월 18일 화요일

[Java] SQLite Example

 Java SQLite Example


1. Download SQLite JDBC 

SQLite JDBC - javalibs


2. Example

import java.sql.*;

public class SQ {

    public static Connection connect(String dbfile) throws SQLException {
        return DriverManager.getConnection("jdbc:sqlite:" + dbfile);
    }

    public static void execute( Connection conn, String[] statements ) throws SQLException {
        Statement state = conn.createStatement();
        for( int i = 0; i < statements.length; i++ ) {
            state.executeUpdate( statements[i] );
        }
    }

    public static void execute( Connection conn, String statement ) throws SQLException {
        Statement state = conn.createStatement();
        state.executeUpdate(statement);
    }

    public static void create( Connection conn, String table, String[][] fields ) throws SQLException {
        StringBuilder sb = new StringBuilder();
        sb.append("create table ").append(table).append(" (");
        for( int i = 0; i < fields.length; i++ ) {
            if( i != 0 ) sb.append(",");
            sb.append(fields[i][0]).append(" ").append(fields[i][1]);
        }
        sb.append(")");
        execute( conn, new String[] {
                "drop table if exists " + table ,
                sb.toString()
        });
    }

    public static void insert( Connection conn, String table, Object[][] fields ) throws SQLException {
        StringBuilder sb = new StringBuilder();
        sb.append("insert into ").append(table).append(" (");
        for( int i = 0; i < fields.length; i++ ) {
            if( i != 0 ) sb.append(",");
            sb.append(fields[i][0]);
        }
        sb.append(") values (");
        for( int i = 0; i < fields.length; i++ ) {
            if( i != 0 ) sb.append(",");
            if( fields[i][1] instanceof String ) {
                sb.append("'").append(fields[i][1]).append("'");
            } else { //Integer
                sb.append(fields[i][1]);
            }
        }
        sb.append(")");
        SQ.execute( conn, sb.toString());
    }

    public static java.util.List<Object[]> query( Connection conn, String table, String[] columns ) throws SQLException {
        StringBuilder sb = new StringBuilder();
        sb.append("select ");
        for( int i = 0; i < columns.length; i++ ) {
            if( i != 0 ) sb.append(",");
            sb.append(columns[i]);
        }
        sb.append(" from ").append(table);

        java.util.List<Object[]> res = new java.util.ArrayList<>();
        ResultSet rs = conn.createStatement().executeQuery( sb.toString());
        while (rs.next()) {
            Object[] row = new Object[columns.length];
            for( int i = 0; i < columns.length; i++ ) {
                row[i] = rs.getObject(columns[i]);
            }
            res.add(row);
        }
        rs.close();
        return res;
    }

    public static void close(Connection conn) throws SQLException {
        conn.close();
    }

    public static void main(String[] args) {
        String table = "info";
        Connection conn = null;
        try {
            conn = SQ.connect("a.db");

            SQ.create( conn, "info", new String[][] {
                    {"name","string"},
                    {"age","integer"},
                    {"num","string"}
            });

            //SQ.execute( conn, "insert into info values" + "('Tom',12,'123-1234')");
            SQ.insert( conn, table, new Object[][] {
                    {"name","Tommy"},
                    {"age",20},
                    {"num","011-1234-5678"}
            });

            java.util.List<Object[]> res = SQ.query( conn, "info", new String[] { "name", "age", "num"} );
            for( Object[] row : res ) {
                System.out.println( "" + row[0] + "," + row[1] + "," + row[2] );
            }

            SQ.close(conn);
        } catch (SQLException e) {
            e.printStackTrace();
        }
    }
}