Fix LDOCE6 issue #2; Fix MdxService bugs; Fix some ui bugs; Fix longman service; Fix oxford_learning service;

This commit is contained in:
St.Huang 2018-07-06 12:29:50 +08:00
parent 2de1a96985
commit ac42eb0809
11 changed files with 1334 additions and 638 deletions

View File

@ -1,4 +1,8 @@
# -*- coding: utf-8 -*-
# version: python 3.5
from readmdict import MDX, MDD
from struct import pack, unpack
from io import BytesIO
import re
@ -6,8 +10,6 @@ import sys
import os
import sqlite3
import json
from aqt.utils import showInfo, showText, tooltip
from .readmdict import MDX, MDD
# zlib compression is used for engine version >=2.0
import zlib
@ -26,11 +28,10 @@ version = '1.1'
class IndexBuilder(object):
# todo: enable history
def __init__(self, fname, encoding="", passcode=None, force_rebuild=False,
enable_history=False, sql_index=True, check=False):
#todo: enable history
def __init__(self, fname, encoding = "", passcode = None, force_rebuild = False, enable_history = False, sql_index = True, check = False):
self._mdx_file = fname
self._mdd_file = ""
self._encoding = ''
self._stylesheet = {}
self._title = ''
@ -38,62 +39,75 @@ class IndexBuilder(object):
self._description = ''
self._sql_index = sql_index
self._check = check
self._force_rebuild = force_rebuild
_filename, _file_extension = os.path.splitext(fname)
# assert(_file_extension == '.mdx')
# assert(os.path.isfile(fname))
assert(_file_extension == '.mdx')
assert(os.path.isfile(fname))
self._mdx_db = _filename + ".mdx.db"
self._mdd_db = _filename + ".mdd.db"
self._mdd_file = _filename + ".mdd"
self.header_build_flag = False
def get_header(self):
def _():
self.header_build_flag = True
mdx = MDX(self._mdx_file, only_header=True)
self._encoding = mdx.meta['encoding']
self._stylesheet = json.loads(mdx.meta['stylesheet'])
self._title = mdx.meta['title']
self._description = mdx.meta['description']
# make index anyway
if force_rebuild:
self._make_mdx_index(self._mdx_db)
if os.path.isfile(_filename + '.mdd'):
self._mdd_file = _filename + ".mdd"
self._mdd_db = _filename + ".mdd.db"
self._make_mdd_index(self._mdd_db)
if os.path.isfile(self._mdx_db):
# read from META table
try:
conn = sqlite3.connect(self._mdx_db)
#cursor = conn.execute("SELECT * FROM META")
cursor = conn.execute(
'SELECT value FROM META WHERE key IN ("encoding","stylesheet","title","description","version")')
self._encoding, stylesheet,\
self._title, self._description, self._version = (
each[0] for each in cursor)
self._stylesheet = json.loads(stylesheet)
#read from META table
conn = sqlite3.connect(self._mdx_db)
#cursor = conn.execute("SELECT * FROM META")
cursor = conn.execute("SELECT * FROM META WHERE key = \"version\"")
#判断有无版本号
for cc in cursor:
self._version = cc[1]
################# if not version in fo #############
if not self._version:
print("version info not found")
conn.close()
if not self._version:
_()
except:
_()
self._make_mdx_index(self._mdx_db)
print("mdx.db rebuilt!")
if os.path.isfile(_filename + '.mdd'):
self._mdd_file = _filename + ".mdd"
self._mdd_db = _filename + ".mdd.db"
self._make_mdd_index(self._mdd_db)
print("mdd.db rebuilt!")
return None
cursor = conn.execute("SELECT * FROM META WHERE key = \"encoding\"")
for cc in cursor:
self._encoding = cc[1]
cursor = conn.execute("SELECT * FROM META WHERE key = \"stylesheet\"")
for cc in cursor:
self._stylesheet = json.loads(cc[1])
cursor = conn.execute("SELECT * FROM META WHERE key = \"title\"")
for cc in cursor:
self._title = cc[1]
cursor = conn.execute("SELECT * FROM META WHERE key = \"description\"")
for cc in cursor:
self._description = cc[1]
#for cc in cursor:
# if cc[0] == 'encoding':
# self._encoding = cc[1]
# continue
# if cc[0] == 'stylesheet':
# self._stylesheet = json.loads(cc[1])
# continue
# if cc[0] == 'title':
# self._title = cc[1]
# continue
# if cc[0] == 'title':
# self._description = cc[1]
else:
_()
self._make_mdx_index(self._mdx_db)
def rebuild(self):
self._make_mdx_index()
if os.path.isfile(self._mdd_file):
self._make_mdd_index()
def check_build(self):
# check if the mdx.db and mdd.db file is available
if self.header_build_flag or not os.path.isfile(self._mdx_db):
self._make_mdx_index()
if os.path.isfile(self._mdd_file) and not os.path.isfile(self._mdd_db):
self._make_mdd_index()
self.header_build_flag = False
@property
def meta(self):
return {'title': self._title, 'description': self._description,
'encoding': self._encoding, 'version': self._version,
'stylesheet': self._stylesheet}
if os.path.isfile(_filename + ".mdd"):
self._mdd_file = _filename + ".mdd"
self._mdd_db = _filename + ".mdd.db"
if not os.path.isfile(self._mdd_db):
self._make_mdd_index(self._mdd_db)
pass
def _replace_stylesheet(self, txt):
# substitute stylesheet definition
@ -103,20 +117,19 @@ class IndexBuilder(object):
for j, p in enumerate(txt_list[1:]):
style = self._stylesheet[txt_tag[j][1:-1]]
if p and p[-1] == '\n':
txt_styled = txt_styled + \
style[0].encode('utf-8') + p.rstrip() + \
style[1].encode('utf-8') + '\r\n'
txt_styled = txt_styled + style[0] + p.rstrip() + style[1] + '\r\n'
else:
txt_styled = txt_styled + \
style[0].encode('utf-8') + p + style[1].encode('utf-8')
txt_styled = txt_styled + style[0] + p + style[1]
return txt_styled
def _make_mdx_index(self):
if os.path.exists(self._mdx_db):
os.remove(self._mdx_db)
mdx = MDX(self._mdx_file, only_header=False)
index_list = mdx.get_index(check_block=self._check)
conn = sqlite3.connect(self._mdx_db)
def _make_mdx_index(self, db_name):
if os.path.exists(db_name):
os.remove(db_name)
mdx = MDX(self._mdx_file)
self._mdx_db = db_name
returned_index = mdx.get_index(check_block = self._check)
index_list = returned_index['index_dict_list']
conn = sqlite3.connect(db_name)
c = conn.cursor()
c.execute(
''' CREATE TABLE MDX_INDEX
@ -133,50 +146,65 @@ class IndexBuilder(object):
tuple_list = [
(item['key_text'],
item['file_pos'],
item['compressed_size'],
item['decompressed_size'],
item['record_block_type'],
item['record_start'],
item['record_end'],
item['offset']
)
item['file_pos'],
item['compressed_size'],
item['decompressed_size'],
item['record_block_type'],
item['record_start'],
item['record_end'],
item['offset']
)
for item in index_list
]
]
c.executemany('INSERT INTO MDX_INDEX VALUES (?,?,?,?,?,?,?,?)',
tuple_list)
# build the metadata table
meta = returned_index['meta']
c.execute(
'''CREATE TABLE META
(key text,
value text
)''')
#for k,v in meta:
# c.execute(
# 'INSERT INTO META VALUES (?,?)',
# (k, v)
# )
c.executemany(
'INSERT INTO META VALUES (?,?)',
[('encoding', self.meta['encoding']),
('stylesheet', json.dumps(self.meta['stylesheet'])),
('title', self.meta['title']),
('description', self.meta['description']),
'INSERT INTO META VALUES (?,?)',
[('encoding', meta['encoding']),
('stylesheet', meta['stylesheet']),
('title', meta['title']),
('description', meta['description']),
('version', version)
]
)
)
if self._sql_index:
c.execute(
'''
CREATE INDEX key_index ON MDX_INDEX (key_text)
'''
)
)
conn.commit()
conn.close()
#set class member
self._encoding = meta['encoding']
self._stylesheet = json.loads(meta['stylesheet'])
self._title = meta['title']
self._description = meta['description']
def _make_mdd_index(self):
if os.path.exists(self._mdd_db):
os.remove(self._mdd_db)
def _make_mdd_index(self, db_name):
if os.path.exists(db_name):
os.remove(db_name)
mdd = MDD(self._mdd_file)
index_list = mdd.get_index(check_block=self._check)
conn = sqlite3.connect(self._mdd_db)
self._mdd_db = db_name
index_list = mdd.get_index(check_block = self._check)
conn = sqlite3.connect(db_name)
c = conn.cursor()
c.execute(
''' CREATE TABLE MDX_INDEX
@ -193,16 +221,16 @@ class IndexBuilder(object):
tuple_list = [
(item['key_text'],
item['file_pos'],
item['compressed_size'],
item['decompressed_size'],
item['record_block_type'],
item['record_start'],
item['record_end'],
item['offset']
)
item['file_pos'],
item['compressed_size'],
item['decompressed_size'],
item['record_block_type'],
item['record_start'],
item['record_end'],
item['offset']
)
for item in index_list
]
]
c.executemany('INSERT INTO MDX_INDEX VALUES (?,?,?,?,?,?,?,?)',
tuple_list)
if self._sql_index:
@ -210,13 +238,12 @@ class IndexBuilder(object):
'''
CREATE UNIQUE INDEX key_index ON MDX_INDEX (key_text)
'''
)
)
conn.commit()
conn.close()
@staticmethod
def get_data_by_index(fmdx, index):
def get_mdx_by_index(self, fmdx, index):
fmdx.seek(index['file_pos'])
record_block_compressed = fmdx.read(index['compressed_size'])
record_block_type = record_block_compressed[:4]
@ -231,97 +258,111 @@ class IndexBuilder(object):
print("LZO compression is not supported")
# decompress
header = b'\xf0' + pack('>I', index['decompressed_size'])
_record_block = lzo.decompress(record_block_compressed[
8:], initSize=decompressed_size, blockSize=1308672)
# zlib compression
_record_block = lzo.decompress(record_block_compressed[8:], initSize = decompressed_size, blockSize=1308672)
# zlib compression
elif record_block_type == 2:
# decompress
_record_block = zlib.decompress(record_block_compressed[8:])
data = _record_block[index['record_start'] -
index['offset']:index['record_end'] - index['offset']]
return data
def get_mdx_by_index(self, fmdx, index):
data = self.get_data_by_index(fmdx, index)
record = data.decode(self._encoding, errors='ignore').strip(
u'\x00').encode('utf-8')
record = _record_block[index['record_start'] - index['offset']:index['record_end'] - index['offset']]
record = record = record.decode(self._encoding, errors='ignore').strip(u'\x00').encode('utf-8')
if self._stylesheet:
record = self._replace_stylesheet(record)
record = record.decode('utf-8')
return record
def get_mdd_by_index(self, fmdx, index):
return self.get_data_by_index(fmdx, index)
fmdx.seek(index['file_pos'])
record_block_compressed = fmdx.read(index['compressed_size'])
record_block_type = record_block_compressed[:4]
record_block_type = index['record_block_type']
decompressed_size = index['decompressed_size']
#adler32 = unpack('>I', record_block_compressed[4:8])[0]
if record_block_type == 0:
_record_block = record_block_compressed[8:]
# lzo compression
elif record_block_type == 1:
if lzo is None:
print("LZO compression is not supported")
# decompress
header = b'\xf0' + pack('>I', index['decompressed_size'])
_record_block = lzo.decompress(record_block_compressed[8:], initSize = decompressed_size, blockSize=1308672)
# zlib compression
elif record_block_type == 2:
# decompress
_record_block = zlib.decompress(record_block_compressed[8:])
data = _record_block[index['record_start'] - index['offset']:index['record_end'] - index['offset']]
return data
@staticmethod
def lookup_indexes(db, keyword, ignorecase=None):
indexes = []
if ignorecase:
sql = u'SELECT * FROM MDX_INDEX WHERE lower(key_text) = lower("{}")'.format(
keyword)
else:
sql = u'SELECT * FROM MDX_INDEX WHERE key_text = "{}"'.format(
keyword)
with sqlite3.connect(db) as conn:
cursor = conn.execute(sql)
for result in cursor:
index = {}
index['file_pos'] = result[1]
index['compressed_size'] = result[2]
index['decompressed_size'] = result[3]
index['record_block_type'] = result[4]
index['record_start'] = result[5]
index['record_end'] = result[6]
index['offset'] = result[7]
indexes.append(index)
return indexes
def mdx_lookup(self, keyword, ignorecase=None):
def mdx_lookup(self, keyword):
conn = sqlite3.connect(self._mdx_db)
cursor = conn.execute("SELECT * FROM MDX_INDEX WHERE key_text = " + "\"" + keyword + "\"")
lookup_result_list = []
indexes = self.lookup_indexes(self._mdx_db, keyword, ignorecase)
with open(self._mdx_file, 'rb') as mdx_file:
for index in indexes:
lookup_result_list.append(
self.get_mdx_by_index(mdx_file, index))
mdx_file = open(self._mdx_file,'rb')
for result in cursor:
index = {}
index['file_pos'] = result[1]
index['compressed_size'] = result[2]
index['decompressed_size'] = result[3]
index['record_block_type'] = result[4]
index['record_start'] = result[5]
index['record_end'] = result[6]
index['offset'] = result[7]
lookup_result_list.append(self.get_mdx_by_index(mdx_file, index))
conn.close()
mdx_file.close()
return lookup_result_list
def mdd_lookup(self, keyword):
conn = sqlite3.connect(self._mdd_db)
cursor = conn.execute("SELECT * FROM MDX_INDEX WHERE key_text = " + "\"" + keyword + "\"")
lookup_result_list = []
mdd_file = open(self._mdd_file,'rb')
for result in cursor:
index = {}
index['file_pos'] = result[1]
index['compressed_size'] = result[2]
index['decompressed_size'] = result[3]
index['record_block_type'] = result[4]
index['record_start'] = result[5]
index['record_end'] = result[6]
index['offset'] = result[7]
lookup_result_list.append(self.get_mdd_by_index(mdd_file, index))
mdd_file.close()
conn.close()
return lookup_result_list
def mdd_lookup(self, keyword, ignorecase=None):
lookup_result_list = []
indexes = self.lookup_indexes(self._mdd_db, keyword, ignorecase)
with open(self._mdd_file, 'rb') as mdd_file:
for index in indexes:
lookup_result_list.append(
self.get_mdd_by_index(mdd_file, index))
return lookup_result_list
@staticmethod
def get_keys(db, query=''):
if not db:
def get_mdd_keys(self, query = ''):
if not self._mdd_db:
return []
conn = sqlite3.connect(self._mdd_db)
if query:
if '*' in query:
query = query.replace('*', '%')
query = query.replace('*','%')
else:
query = query + '%'
sql = 'SELECT key_text FROM MDX_INDEX WHERE key_text LIKE \"' + query + '\"'
else:
sql = 'SELECT key_text FROM MDX_INDEX'
with sqlite3.connect(db) as conn:
cursor = conn.execute(sql)
cursor = conn.execute('SELECT key_text FROM MDX_INDEX WHERE key_text LIKE \"' + query + '\"')
keys = [item[0] for item in cursor]
return keys
else:
cursor = conn.execute('SELECT key_text FROM MDX_INDEX')
keys = [item[0] for item in cursor]
conn.close()
return keys
def get_mdd_keys(self, query=''):
try:
return self.get_keys(self._mdd_db, query)
except:
return []
def get_mdx_keys(self, query = ''):
conn = sqlite3.connect(self._mdx_db)
if query:
if '*' in query:
query = query.replace('*','%')
else:
query = query + '%'
cursor = conn.execute('SELECT key_text FROM MDX_INDEX WHERE key_text LIKE \"' + query + '\"')
keys = [item[0] for item in cursor]
else:
cursor = conn.execute('SELECT key_text FROM MDX_INDEX')
keys = [item[0] for item in cursor]
conn.close()
return keys
def get_mdx_keys(self, query=''):
try:
return self.get_keys(self._mdx_db, query)
except:
return []
# mdx_builder = IndexBuilder("oald.mdx")

View File

@ -23,9 +23,8 @@ import re
import sys
import json
from .ripemd128 import ripemd128
from .pureSalsa20 import Salsa20
from aqt.utils import showInfo, showText, tooltip
from ripemd128 import ripemd128
from pureSalsa20 import Salsa20
# zlib compression is used for engine version >=2.0
import zlib
@ -93,15 +92,12 @@ class MDict(object):
Base class which reads in header and key block.
It has no public methods and serves only as code sharing base class.
"""
def __init__(self, fname, encoding='', passcode=None, only_header=False):
def __init__(self, fname, encoding='', passcode=None):
self._fname = fname
self._encoding = encoding.upper()
self._passcode = passcode
self.header = self._read_header()
if only_header:
return
try:
self._key_list = self._read_keys()
except:
@ -139,8 +135,7 @@ class MDict(object):
assert(key_block_info_compressed[:4] == b'\x02\x00\x00\x00')
# decrypt if needed
if self._encrypt & 0x02:
key_block_info_compressed = _mdx_decrypt(
key_block_info_compressed)
key_block_info_compressed = _mdx_decrypt(key_block_info_compressed)
# decompress
key_block_info = zlib.decompress(key_block_info_compressed[8:])
# adler checksum
@ -164,12 +159,10 @@ class MDict(object):
while i < len(key_block_info):
# number of entries in current key block
num_entries += unpack(self._number_format,
key_block_info[i:i + self._number_width])[0]
num_entries += unpack(self._number_format, key_block_info[i:i + self._number_width])[0]
i += self._number_width
# text head size
text_head_size = unpack(byte_format, key_block_info[
i:i + byte_width])[0]
text_head_size = unpack(byte_format, key_block_info[i:i + byte_width])[0]
i += byte_width
# text head
if self._encoding != 'UTF-16':
@ -177,8 +170,7 @@ class MDict(object):
else:
i += (text_head_size + text_term) * 2
# text tail size
text_tail_size = unpack(byte_format, key_block_info[
i:i + byte_width])[0]
text_tail_size = unpack(byte_format, key_block_info[i:i + byte_width])[0]
i += byte_width
# text tail
if self._encoding != 'UTF-16':
@ -186,15 +178,12 @@ class MDict(object):
else:
i += (text_tail_size + text_term) * 2
# key block compressed size
key_block_compressed_size = unpack(self._number_format, key_block_info[
i:i + self._number_width])[0]
key_block_compressed_size = unpack(self._number_format, key_block_info[i:i + self._number_width])[0]
i += self._number_width
# key block decompressed size
key_block_decompressed_size = unpack(self._number_format, key_block_info[
i:i + self._number_width])[0]
key_block_decompressed_size = unpack(self._number_format, key_block_info[i:i + self._number_width])[0]
i += self._number_width
key_block_info_list += [(key_block_compressed_size,
key_block_decompressed_size)]
key_block_info_list += [(key_block_compressed_size, key_block_decompressed_size)]
assert(num_entries == self._num_entries)
@ -209,8 +198,7 @@ class MDict(object):
# 4 bytes : compression type
key_block_type = key_block_compressed[start:start + 4]
# 4 bytes : adler checksum of decompressed key block
adler32 = unpack('>I', key_block_compressed[
start + 4:start + 8])[0]
adler32 = unpack('>I', key_block_compressed[start + 4:start + 8])[0]
if key_block_type == b'\x00\x00\x00\x00':
key_block = key_block_compressed[start + 8:end]
elif key_block_type == b'\x01\x00\x00\x00':
@ -219,12 +207,10 @@ class MDict(object):
break
# decompress key block
header = b'\xf0' + pack('>I', decompressed_size)
key_block = lzo.decompress(key_block_compressed[
start + 8:end], initSize=decompressed_size, blockSize=1308672)
key_block = lzo.decompress(key_block_compressed[start + 8:end], initSize = decompressed_size, blockSize=1308672)
elif key_block_type == b'\x02\x00\x00\x00':
# decompress key block
key_block = zlib.decompress(
key_block_compressed[start + 8:end])
key_block = zlib.decompress(key_block_compressed[start + 8:end])
# extract one single key block into a key list
key_list += self._split_key_block(key_block)
# notice that adler32 returns signed value
@ -237,11 +223,9 @@ class MDict(object):
key_list = []
key_start_index = 0
while key_start_index < len(key_block):
temp = key_block[
key_start_index:key_start_index + self._number_width]
temp = key_block[key_start_index:key_start_index + self._number_width]
# the corresponding record's offset in record block
key_id = unpack(self._number_format, key_block[
key_start_index:key_start_index + self._number_width])[0]
key_id = unpack(self._number_format, key_block[key_start_index:key_start_index + self._number_width])[0]
# key text ends with '\x00'
if self._encoding == 'UTF-16':
delimiter = b'\x00\x00'
@ -261,12 +245,6 @@ class MDict(object):
key_list += [(key_id, key_text)]
return key_list
@property
def meta(self):
return {'title': self._title, 'description': self._description,
'encoding': self._encoding, 'version': self._version,
'stylesheet': json.dumps(self._stylesheet)}
def _read_header(self):
f = open(self._fname, 'rb')
# number of bytes of header text
@ -349,8 +327,7 @@ class MDict(object):
if self._encrypt & 1:
if self._passcode is None:
raise RuntimeError(
'user identification is needed to read encrypted file')
raise RuntimeError('user identification is needed to read encrypted file')
regcode, userid = self._passcode
if isinstance(userid, unicode):
userid = userid.encode('utf8')
@ -388,8 +365,7 @@ class MDict(object):
# read key block
key_block_compressed = f.read(key_block_size)
# extract key block
key_list = self._decode_key_block(
key_block_compressed, key_block_info_list)
key_list = self._decode_key_block(key_block_compressed, key_block_info_list)
self._record_block_offset = f.tell()
f.close()
@ -434,8 +410,7 @@ class MDict(object):
# read key block
key_block_compressed = f.read(key_block_size)
# extract key block
key_list = self._decode_key_block(
key_block_compressed, key_block_info_list)
key_list = self._decode_key_block(key_block_compressed, key_block_info_list)
self._record_block_offset = f.tell()
f.close()
@ -453,7 +428,6 @@ class MDD(MDict):
>>> for filename,content in mdd.items():
... print filename, content[:10]
"""
def __init__(self, fname, passcode=None):
MDict.__init__(self, fname, encoding='UTF-16', passcode=passcode)
@ -500,8 +474,7 @@ class MDD(MDict):
break
# decompress
header = b'\xf0' + pack('>I', decompressed_size)
record_block = lzo.decompress(record_block_compressed[
start + 8:end], initSize=decompressed_size, blockSize=1308672)
record_block = lzo.decompress(record_block_compressed[start + 8:end], initSize = decompressed_size, blockSize=1308672)
elif record_block_type == b'\x02\x00\x00\x00':
# decompress
record_block = zlib.decompress(record_block_compressed[8:])
@ -530,16 +503,16 @@ class MDD(MDict):
f.close()
# 获取 mdx 文件的索引列表,格式为
# key_text(关键词,可以由后面的 keylist 得到)
# file_pos(record_block开始的位置)
# compressed_size(record_block压缩前的大小)
# decompressed_size(解压后的大小)
# record_block_type(record_block 的压缩类型)
# record_start (以下三个为从 record_block 中提取某一调记录需要的参数,可以直接保存)
# record_end
# offset
def get_index(self, check_block=True):
### 获取 mdx 文件的索引列表,格式为
### key_text(关键词,可以由后面的 keylist 得到)
### file_pos(record_block开始的位置)
### compressed_size(record_block压缩前的大小)
### decompressed_size(解压后的大小)
### record_block_type(record_block 的压缩类型)
### record_start (以下三个为从 record_block 中提取某一调记录需要的参数,可以直接保存)
### record_end
### offset
def get_index(self, check_block = True):
f = open(self._fname, 'rb')
index_dict_list = []
f.seek(self._record_block_offset)
@ -584,8 +557,7 @@ class MDD(MDict):
# decompress
header = b'\xf0' + pack('>I', decompressed_size)
if check_block:
record_block = lzo.decompress(record_block_compressed[
start + 8:end], initSize=decompressed_size, blockSize=1308672)
record_block = lzo.decompress(record_block_compressed[start + 8:end], initSize = decompressed_size, blockSize=1308672)
elif record_block_type == b'\x02\x00\x00\x00':
# decompress
_type = 2
@ -598,7 +570,7 @@ class MDD(MDict):
assert(len(record_block) == decompressed_size)
# split record block according to the offset info from key block
while i < len(self._key_list):
# 用来保存索引信息的空字典
### 用来保存索引信息的空字典
index_dict = {}
index_dict['file_pos'] = current_pos
index_dict['compressed_size'] = compressed_size
@ -606,11 +578,10 @@ class MDD(MDict):
index_dict['record_block_type'] = _type
record_start, key_text = self._key_list[i]
index_dict['record_start'] = record_start
index_dict['key_text'] = key_text.decode(
"utf-8", errors='ignore')
index_dict['key_text'] = key_text.decode("utf-8")
index_dict['offset'] = offset
# reach the end of current record block
if record_start - offset >= decompressed_size:
if record_start - offset >= decompressed_size:
break
# record end index
if i < len(self._key_list) - 1:
@ -620,11 +591,10 @@ class MDD(MDict):
index_dict['record_end'] = record_end
i += 1
if check_block:
data = record_block[
record_start - offset:record_end - offset]
data = record_block[record_start - offset:record_end - offset]
index_dict_list.append(index_dict)
# yield key_text, data
offset += decompressed_size
#yield key_text, data
offset += decompressed_size
size_counter += compressed_size
assert(size_counter == record_block_size)
f.close()
@ -640,9 +610,8 @@ class MDX(MDict):
>>> for key,value in mdx.items():
... print key, value[:10]
"""
def __init__(self, fname, encoding='', substyle=False, passcode=None, only_header=False):
MDict.__init__(self, fname, encoding, passcode, only_header)
def __init__(self, fname, encoding='', substyle=False, passcode=None):
MDict.__init__(self, fname, encoding, passcode)
self._substyle = substyle
def items(self):
@ -658,8 +627,7 @@ class MDX(MDict):
for j, p in enumerate(txt_list[1:]):
style = self._stylesheet[txt_tag[j][1:-1]]
if p and p[-1] == '\n':
txt_styled = txt_styled + \
style[0] + p.rstrip() + style[1] + '\r\n'
txt_styled = txt_styled + style[0] + p.rstrip() + style[1] + '\r\n'
else:
txt_styled = txt_styled + style[0] + p + style[1]
return txt_styled
@ -688,20 +656,20 @@ class MDX(MDict):
offset = 0
i = 0
size_counter = 0
# 最后的索引表的格式为
# key_text(关键词,可以由后面的 keylist 得到)
# file_pos(record_block开始的位置)
# compressed_size(record_block压缩前的大小)
# decompressed_size(解压后的大小)
# record_block_type(record_block 的压缩类型)
# record_start (以下三个为从 record_block 中提取某一调记录需要的参数,可以直接保存)
# record_end
# offset
###最后的索引表的格式为
### key_text(关键词,可以由后面的 keylist 得到)
### file_pos(record_block开始的位置)
### compressed_size(record_block压缩前的大小)
### decompressed_size(解压后的大小)
### record_block_type(record_block 的压缩类型)
### record_start (以下三个为从 record_block 中提取某一调记录需要的参数,可以直接保存)
### record_end
### offset
for compressed_size, decompressed_size in record_block_info_list:
record_block_compressed = f.read(compressed_size)
# 要得到 record_block_compressed 需要得到 compressed_size (这个可以直接记录)
# 另外还需要记录当前 f 对象的位置
# 使用 f.tell() 命令/ 在建立索引是需要 f.seek()
###### 要得到 record_block_compressed 需要得到 compressed_size (这个可以直接记录)
###### 另外还需要记录当前 f 对象的位置
###### 使用 f.tell() 命令/ 在建立索引是需要 f.seek()
# 4 bytes indicates block compression type
record_block_type = record_block_compressed[:4]
# 4 bytes adler checksum of uncompressed content
@ -716,16 +684,15 @@ class MDX(MDict):
break
# decompress
header = b'\xf0' + pack('>I', decompressed_size)
record_block = lzo.decompress(record_block_compressed[
8:], initSize=decompressed_size, blockSize=1308672)
record_block = lzo.decompress(record_block_compressed[8:], initSize = decompressed_size, blockSize=1308672)
# zlib compression
elif record_block_type == b'\x02\x00\x00\x00':
# decompress
record_block = zlib.decompress(record_block_compressed[8:])
# 这里比较重要的是先要得到 record_block, 而 record_block 是解压得到的,其中一共有三种解压方法
# 需要的信息有 record_block_compressed, decompress_size,
# record_block_type
# 另外还需要校验信息 adler32
###### 这里比较重要的是先要得到 record_block, 而 record_block 是解压得到的,其中一共有三种解压方法
###### 需要的信息有 record_block_compressed, decompress_size,
###### record_block_type
###### 另外还需要校验信息 adler32
# notice that adler32 return signed value
assert(adler32 == zlib.adler32(record_block) & 0xffffffff)
@ -742,15 +709,13 @@ class MDX(MDict):
else:
record_end = len(record_block) + offset
i += 1
# 需要得到 record_block , record_start, record_end,
# offset
record = record_block[
record_start - offset:record_end - offset]
#############需要得到 record_block , record_start, record_end,
#############offset
record = record_block[record_start - offset:record_end - offset]
# convert to utf-8
record = record.decode(self._encoding, errors='ignore').strip(
u'\x00').encode('utf-8')
record = record.decode(self._encoding, errors='ignore').strip(u'\x00').encode('utf-8')
# substitute styles
# 是否替换样式表
#############是否替换样式表
if self._substyle and self._stylesheet:
record = self._substitute_stylesheet(record)
@ -761,19 +726,19 @@ class MDX(MDict):
f.close()
# 获取 mdx 文件的索引列表,格式为
# key_text(关键词,可以由后面的 keylist 得到)
# file_pos(record_block开始的位置)
# compressed_size(record_block压缩前的大小)
# decompressed_size(解压后的大小)
# record_block_type(record_block 的压缩类型)
# record_start (以下三个为从 record_block 中提取某一调记录需要的参数,可以直接保存)
# record_end
# offset
# 所需 metadata
###
def get_index(self, check_block=True):
# 索引列表
### 获取 mdx 文件的索引列表,格式为
### key_text(关键词,可以由后面的 keylist 得到)
### file_pos(record_block开始的位置)
### compressed_size(record_block压缩前的大小)
### decompressed_size(解压后的大小)
### record_block_type(record_block 的压缩类型)
### record_start (以下三个为从 record_block 中提取某一调记录需要的参数,可以直接保存)
### record_end
### offset
### 所需 metadata
###
def get_index(self, check_block = True):
### 索引列表
index_dict_list = []
f = open(self._fname, 'rb')
f.seek(self._record_block_offset)
@ -798,21 +763,21 @@ class MDX(MDict):
offset = 0
i = 0
size_counter = 0
# 最后的索引表的格式为
# key_text(关键词,可以由后面的 keylist 得到)
# file_pos(record_block开始的位置)
# compressed_size(record_block压缩前的大小)
# decompressed_size(解压后的大小)
# record_block_type(record_block 的压缩类型)
# record_start (以下三个为从 record_block 中提取某一调记录需要的参数,可以直接保存)
# record_end
# offset
###最后的索引表的格式为
### key_text(关键词,可以由后面的 keylist 得到)
### file_pos(record_block开始的位置)
### compressed_size(record_block压缩前的大小)
### decompressed_size(解压后的大小)
### record_block_type(record_block 的压缩类型)
### record_start (以下三个为从 record_block 中提取某一调记录需要的参数,可以直接保存)
### record_end
### offset
for compressed_size, decompressed_size in record_block_info_list:
current_pos = f.tell()
record_block_compressed = f.read(compressed_size)
# 要得到 record_block_compressed 需要得到 compressed_size (这个可以直接记录)
# 另外还需要记录当前 f 对象的位置
# 使用 f.tell() 命令/ 在建立索引是需要 f.seek()
###### 要得到 record_block_compressed 需要得到 compressed_size (这个可以直接记录)
###### 另外还需要记录当前 f 对象的位置
###### 使用 f.tell() 命令/ 在建立索引是需要 f.seek()
# 4 bytes indicates block compression type
record_block_type = record_block_compressed[:4]
# 4 bytes adler checksum of uncompressed content
@ -830,25 +795,24 @@ class MDX(MDict):
# decompress
header = b'\xf0' + pack('>I', decompressed_size)
if check_block:
record_block = lzo.decompress(record_block_compressed[
8:], initSize=decompressed_size, blockSize=1308672)
record_block = lzo.decompress(record_block_compressed[8:], initSize = decompressed_size, blockSize=1308672)
# zlib compression
elif record_block_type == b'\x02\x00\x00\x00':
# decompress
_type = 2
if check_block:
record_block = zlib.decompress(record_block_compressed[8:])
# 这里比较重要的是先要得到 record_block, 而 record_block 是解压得到的,其中一共有三种解压方法
# 需要的信息有 record_block_compressed, decompress_size,
# record_block_type
# 另外还需要校验信息 adler32
###### 这里比较重要的是先要得到 record_block, 而 record_block 是解压得到的,其中一共有三种解压方法
###### 需要的信息有 record_block_compressed, decompress_size,
###### record_block_type
###### 另外还需要校验信息 adler32
# notice that adler32 return signed value
if check_block:
assert(adler32 == zlib.adler32(record_block) & 0xffffffff)
assert(len(record_block) == decompressed_size)
# split record block according to the offset info from key block
while i < len(self._key_list):
# 用来保存索引信息的空字典
### 用来保存索引信息的空字典
index_dict = {}
index_dict['file_pos'] = current_pos
index_dict['compressed_size'] = compressed_size
@ -856,11 +820,10 @@ class MDX(MDict):
index_dict['record_block_type'] = _type
record_start, key_text = self._key_list[i]
index_dict['record_start'] = record_start
index_dict['key_text'] = key_text.decode(
'utf-8', errors='ignore')
index_dict['key_text'] = key_text.decode('utf-8')
index_dict['offset'] = offset
# reach the end of current record block
if record_start - offset >= decompressed_size:
if record_start - offset >= decompressed_size:
break
# record end index
if i < len(self._key_list) - 1:
@ -869,27 +832,31 @@ class MDX(MDict):
record_end = decompressed_size + offset
index_dict['record_end'] = record_end
i += 1
# 需要得到 record_block , record_start, record_end,
# offset
#############需要得到 record_block , record_start, record_end,
#############offset
if check_block:
record = record_block[
record_start - offset:record_end - offset]
record = record_block[record_start - offset:record_end - offset]
# convert to utf-8
record = record.decode(self._encoding, errors='ignore').strip(
u'\x00').encode('utf-8')
record = record.decode(self._encoding, errors='ignore').strip(u'\x00').encode('utf-8')
# substitute styles
# 是否替换样式表
#############是否替换样式表
if self._substyle and self._stylesheet:
record = self._substitute_stylesheet(record)
index_dict_list.append(index_dict)
offset += decompressed_size
offset += decompressed_size
size_counter += compressed_size
# todo: 注意!!!
#assert(size_counter == record_block_size)
#todo: 注意!!!
#assert(size_counter == record_block_size)
f.close
return index_dict_list
#这里比 mdd 部分稍有不同,应该还需要传递编码以及样式表信息
meta = {}
meta['encoding'] = self._encoding
meta['stylesheet'] = json.dumps(self._stylesheet)
meta['title'] = self._title
meta['description'] = self._description
return {"index_dict_list":index_dict_list, 'meta':meta}
if __name__ == '__main__':
import sys
import os
@ -905,8 +872,7 @@ if __name__ == '__main__':
try:
regcode = codecs.decode(regcode, 'hex')
except:
raise argparse.ArgumentTypeError(
"regcode must be a 32 bytes hexadecimal string")
raise argparse.ArgumentTypeError("regcode must be a 32 bytes hexadecimal string")
return regcode, userid
parser = argparse.ArgumentParser()
@ -987,8 +953,7 @@ if __name__ == '__main__':
sf.close()
# write out optional data files
if mdd:
datafolder = os.path.join(
os.path.dirname(args.filename), args.datafolder)
datafolder = os.path.join(os.path.dirname(args.filename), args.datafolder)
if not os.path.exists(datafolder):
os.makedirs(datafolder)
for key, value in mdd.items():

View File

@ -73,6 +73,7 @@ class QueryThread(QThread):
super(QueryThread, self).__init__()
self.index = 0
self.exit = False
self.finished = False
self.manager = manager
self.note_flush.connect(manager.handle_flush)
@ -96,6 +97,8 @@ class QueryThread(QThread):
if self.manager:
self.manager.queue.task_done()
self.finished = True
class QueryWorkerManager(object):
@ -122,11 +125,15 @@ class QueryWorkerManager(object):
def start(self):
self.total = self.queue.qsize()
self.progress.start(self.total, min=0)
for x in range(0, min(config.thread_number, self.total)):
self.get_worker()
for worker in self.workers:
worker.start()
if self.total > 1:
for x in range(0, min(config.thread_number, self.total)):
self.get_worker()
for worker in self.workers:
worker.start()
else:
worker = self.get_worker()
worker.run()
def update(self, note, results, success_num):
self.mutex.lock()
@ -137,11 +144,15 @@ class QueryWorkerManager(object):
val = update_note_fields(note, results)
self.fields += val
self.mutex.unlock()
return val > 0
if self.total > 1:
return val > 0
else:
self.handle_flush(note)
return False
def join(self):
for worker in self.workers:
while not worker.isFinished():
while not worker.finished:
if self.progress.abort():
worker.exit = True
break
@ -323,22 +334,25 @@ def query_all_flds(note):
dict_unique = each.get('dict_unique', '').strip()
if dict_name and dict_name not in _sl('NOT_DICT_FIELD') and dict_field:
s = services.get(dict_unique, None)
if s == None:
services[dict_unique] = service_pool.get(dict_unique)#service_manager.get_service(dict_unique)
tasks.append({'k': dict_unique, 'w': word, 'f': dict_field, 'i': i})
if s is None:
s = service_pool.get(dict_unique)
if s.support:
services[dict_unique] = s
if s and s.support:
tasks.append({'k': dict_unique, 'w': word, 'f': dict_field, 'i': i})
success_num = 0
result = defaultdict(QueryResult)
for task in tasks:
try:
service = services.get(task['k'], None)
qr = service.active(task['f'], task['w'])
if qr:
result.update({task['i']: qr})
success_num += 1
except:
showInfo(_("NO_QUERY_WORD"))
pass
#try:
service = services.get(task['k'], None)
qr = service.active(task['f'], task['w'])
if qr:
result.update({task['i']: qr})
success_num += 1
#except:
# showInfo(_("NO_QUERY_WORD"))
# pass
for service in services.values():
service_pool.put(service)

View File

@ -1,17 +1,22 @@
#-*- coding:utf-8 -*-
import re
from .base import MdxService, export, register, with_styles, parseHtml
from aqt.utils import showInfo, showText
from .base import MdxService, export, register, with_styles
PATH = u'D:\\mdx_server\\mdx\\LDOCE6.mdx'
path = u'D:\\mdx_server\\mdx\\LDOCE6.mdx'
VOICE_PATTERN = r'<a href="sound://([\w/]+\w*\.mp3)"><img src="img/spkr_%s.png"></a>'
MAPPINGS = [
['br', [re.compile(VOICE_PATTERN % r'r')]],
['us', [re.compile(VOICE_PATTERN % r'b')]]
]
LANG_TO_REGEXPS = {lang: regexps for lang, regexps in MAPPINGS}
@register(u'本地词典-LDOCE6')
class Ldoce6(MdxService):
def __init__(self):
super(Ldoce6, self).__init__(path)
super(Ldoce6, self).__init__(PATH)
@property
def unique(self):
@ -27,58 +32,74 @@ class Ldoce6(MdxService):
m = re.search(r'<span class="pron">(.*?)</span>', html)
if m:
return m.groups()[0]
return ''
@export(u'Bre单词发音', 2)
def _fld_voice(self, html, voice):
"""获取发音字段"""
from hashlib import sha1
for regexp in LANG_TO_REGEXPS[voice]:
match = regexp.search(html)
if match:
val = '/' + match.group(1)
hex_digest = sha1(
val.encode('utf-8') if isinstance(val, unicode)
else val
).hexdigest().lower()
assert len(hex_digest) == 40, "unexpected output from hash library"
name = '.'.join([
'-'.join([
'mdx', self.unique.lower(), hex_digest[:8], hex_digest[8:16],
hex_digest[16:24], hex_digest[24:32], hex_digest[32:],
]),
'mp3',
])
name = self.save_file(val, name)
if name:
return self.get_anki_label(name, 'audio')
return ''
@export(u'英式发音', 2)
def fld_voicebre(self):
html = self.get_html()
m = re.search(r'<span class="brevoice">(.*?)</span brevoice>', html)
if m:
return m.groups()[0]
return ''
return self._fld_voice(self.get_html(), 'br')
@export(u'Ame单词发音', 3)
@export(u'美式发音', 3)
def fld_voiceame(self):
html = self.get_html()
m = re.search(r'<span class="amevoice">(.*?)</span amevoice>', html)
if m:
return m.groups()[0]
return ''
return self._fld_voice(self.get_html(), 'us')
@export(u'sentence', 4)
@export(u'例句', 4)
def fld_sentence(self):
html = self.get_html()
m = re.search(r'<span class="example">(.*?)</span example>', html)
m = re.findall(r'<span class="example"\s*.*>\s*.*<\/span>', self.get_html())
if m:
return re.sub('<img.*?png">', '', m.groups()[0])
return ''
@export(u'def', 5)
def fld_definate(self):
html = self.get_html()
m = re.search(r'<span class="def">(.*?)</span def>', html)
if m:
return m.groups()[0]
return ''
@export(u'random_sentence', 6)
def fld_random_sentence(self):
html = self.get_html()
m = re.findall(r'<span class="example">(.*?)</span example>', html)
if m:
number = len(m)
index = random.randrange(0, number - 1, 1)
return re.sub('<img.*?png">', '', m[index])
return ''
@export(u'all sentence', 7)
def fld_allsentence(self):
html = self.get_html()
m = re.findall(
r'(<span class="example">.+?</span example><span class="example_c">.+?</span example_c>)', html)
if m:
items = 0
soup = parseHtml(m[0])
el_list = soup.findAll('span', {'class':'example'})
if el_list:
maps = [u''.join(str(content).decode('utf-8') for content in element.contents)
for element in el_list]
my_str = ''
for items in range(len(m)):
my_str = my_str + m[items]
return my_str
for i_str in maps:
i_str = re.sub(r'<a[^>]+?href=\"sound\:.*\.mp3\".*</a>', '', i_str)
i_str = i_str.replace('&nbsp;', '')
my_str = my_str + '<li>' + i_str + '</li>'
return self._css(my_str)
return ''
@export(u'释义', 5)
def fld_definate(self):
m = m = re.findall(r'<span class="def"\s*.*>\s*.*<\/span>', self.get_html())
if m:
soup = parseHtml(m[0])
el_list = soup.findAll('span', {'class':'def'})
if el_list:
maps = [u''.join(str(content).decode('utf-8') for content in element.contents)
for element in el_list]
my_str = ''
for i_str in maps:
my_str = my_str + '<li>' + i_str + '</li>'
return self._css(my_str)
return ''
@with_styles(cssfile='_ldoce6.css')
def _css(self, val):
return val

View File

@ -1,8 +1,8 @@
#-*- coding:utf-8 -*-
#
# Copyright © 20162017 Liang Feng <finalion@gmail.com>
# Copyright © 20162017 ST.Huang <wenhonghuang@gmail.com>
#
# Support: Report an issue at https://github.com/finalion/WordQuery/issues
# Support: Report an issue at https://github.com/sth2018/FastWordQuery/issues
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@ -29,17 +29,20 @@ import sqlite3
import urllib
import urllib2
import zlib
import random
from collections import defaultdict
from functools import wraps
import cookielib
from aqt import mw
from aqt.qt import QFileDialog
from aqt.utils import showInfo, showText
from ..context import config
from ..lang import _
from ..libs import MdxBuilder, StardictBuilder
from ..utils import MapDict, wrap_css
from ..libs.bs4 import BeautifulSoup
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
def register(label):
@ -120,17 +123,45 @@ def with_styles(**styles):
return _deco
return _with
# BS4资源锁防止程序卡死
BS_LOCKS = [_threading.Lock(), _threading.Lock()]
def parseHtml(html):
'''
使用BS4解析html
'''
lock = BS_LOCKS[random.randrange(0, len(BS_LOCKS) - 1, 1)]
lock.acquire()
soup = BeautifulSoup(html, 'html.parser')
lock.release()
return soup
class Service(object):
'''service base class'''
def __init__(self):
self.cache = defaultdict(defaultdict)
self._exporters = self.get_exporters()
self._fields, self._actions = zip(*self._exporters) \
if self._exporters else (None, None)
# query interval: default 500ms
self.query_interval = 0.5
def cache_this(self, result):
self.cache[self.word].update(result)
return result
def cached(self, key):
return (self.word in self.cache) and self.cache[self.word].has_key(key)
def cache_result(self, key):
return self.cache[self.word].get(key, u'')
@property
def support(self):
return True
@property
def fields(self):
return self._fields
@ -158,9 +189,9 @@ class Service(object):
self.word = word
# if the service instance is LocalService,
# then have to build then index.
if isinstance(self, LocalService):
if isinstance(self, MdxService) or isinstance(self, StardictService):
self.builder.check_build()
#if isinstance(self, LocalService):
# if isinstance(self, MdxService) or isinstance(self, StardictService):
# self.builder.check_build()
for each in self.exporters:
if action_label == each[0]:
@ -180,21 +211,10 @@ class WebService(Service):
def __init__(self):
super(WebService, self).__init__()
self.cache = defaultdict(defaultdict)
self._cookie = cookielib.CookieJar()
self._opener = urllib2.build_opener(
urllib2.HTTPCookieProcessor(self._cookie))
self.query_interval = 1
def cache_this(self, result):
self.cache[self.word].update(result)
return result
def cached(self, key):
return (self.word in self.cache) and self.cache[self.word].has_key(key)
def cache_result(self, key):
return self.cache[self.word].get(key, u'')
self.query_interval = 1.0
@property
def title(self):
@ -229,6 +249,9 @@ class WebService(Service):
class LocalService(Service):
"""
本地词典
"""
def __init__(self, dict_path):
super(LocalService, self).__init__()
@ -236,6 +259,10 @@ class LocalService(Service):
self.builder = None
self.missed_css = set()
@property
def support(self):
return os.path.isfile(self.dict_path)
@property
def unique(self):
return self.dict_path
@ -248,28 +275,34 @@ class LocalService(Service):
def _filename(self):
return os.path.splitext(os.path.basename(self.dict_path))[0]
# mdx字典实例集
mdx_builders = defaultdict(dict)
class MdxService(LocalService):
"""
Mdx本地词典
"""
def __init__(self, dict_path):
super(MdxService, self).__init__(dict_path)
self.media_cache = defaultdict(set)
self.cache = defaultdict(str)
self.html_cache = defaultdict(str)
self.query_interval = 0.01
self.styles = []
self.builder = MdxBuilder(dict_path)
self.builder.get_header()
if self.support:
if not mdx_builders.has_key(dict_path) or not mdx_builders[dict_path]:
mdx_builders[dict_path] = MdxBuilder(dict_path)
self.builder = mdx_builders[dict_path]
@staticmethod
def support(dict_path):
return os.path.isfile(dict_path) and dict_path.lower().endswith('.mdx')
@property
def support(self):
return os.path.isfile(self.dict_path) and self.dict_path.lower().endswith('.mdx')
@property
def title(self):
if config.use_filename or not self.builder._title or self.builder._title.startswith('Title'):
return self._filename
else:
return self.builder.meta['title']
return self.builder['_title']
@export(u"default", 0)
def fld_whole(self):
@ -277,94 +310,46 @@ class MdxService(LocalService):
js = re.findall(r'<script.*?>.*?</script>', html, re.DOTALL)
return QueryResult(result=html, js=u'\n'.join(js))
def _get_definition_mdx(self):
"""根据关键字得到MDX词典的解释"""
content = self.builder.mdx_lookup(self.word)
str_content = ""
if len(content) > 0:
for c in content:
str_content += c.replace("\r\n","").replace("entry:/","")
return str_content
def _get_definition_mdd(self, word):
"""根据关键字得到MDX词典的媒体"""
word = word.replace('/', '\\')
content = self.builder.mdd_lookup(word)
if len(content) > 0:
return [content[0]]
else:
return []
def get_html(self):
if not self.cache[self.word]:
html = ''
result = self.builder.mdx_lookup(self.word) # self.word: unicode
if result:
if result[0].upper().find(u"@@@LINK=") > -1:
# redirect to a new word behind the equal symol.
self.word = result[0][len(u"@@@LINK="):].strip()
return self.get_html()
else:
html = self.adapt_to_anki(result[0])
self.cache[self.word] = html
return self.cache[self.word]
"""取得self.word对应的html页面"""
if not self.html_cache[self.word]:
html = self._get_definition_mdx()
if html:
self.html_cache[self.word] = html
return self.html_cache[self.word]
def adapt_to_anki(self, html):
"""
1. convert the media path to actual path in anki's collection media folder.
2. remove the js codes (js inside will expires.)
"""
# convert media path, save media files
media_files_set = set()
mcss = re.findall(r'href="(\S+?\.css)"', html)
media_files_set.update(set(mcss))
mjs = re.findall(r'src="([\w\./]\S+?\.js)"', html)
media_files_set.update(set(mjs))
msrc = re.findall(r'<img.*?src="([\w\./]\S+?)".*?>', html)
media_files_set.update(set(msrc))
msound = re.findall(r'href="sound:(.*?\.(?:mp3|wav))"', html)
if config.export_media:
media_files_set.update(set(msound))
for each in media_files_set:
html = html.replace(each, u'_' + each.split('/')[-1])
# find sounds
p = re.compile(
r'<a[^>]+?href=\"(sound:_.*?\.(?:mp3|wav))\"[^>]*?>(.*?)</a>')
html = p.sub(u"[\\1]\\2", html)
self.save_media_files(media_files_set)
for cssfile in mcss:
cssfile = '_' + \
os.path.basename(cssfile.replace('\\', os.path.sep))
# if not exists the css file, the user can place the file to media
# folder first, and it will also execute the wrap process to generate
# the desired file.
if not os.path.exists(cssfile):
self.missed_css.add(cssfile[1:])
new_css_file, wrap_class_name = wrap_css(cssfile)
html = html.replace(cssfile, new_css_file)
# add global div to the result html
html = u'<div class="{0}">{1}</div>'.format(
wrap_class_name, html)
return html
def save_file(self, filepath_in_mdx, savepath=None):
basename = os.path.basename(filepath_in_mdx.replace('\\', os.path.sep))
if savepath is None:
savepath = '_' + basename
def save_file(self, filepath_in_mdx, savepath):
"""从mmd中取出filepath_in_mdx媒体文件并保存到savepath"""
try:
bytes_list = self.builder.mdd_lookup(filepath_in_mdx)
if bytes_list and not os.path.exists(savepath):
with open(savepath, 'wb') as f:
f.write(bytes_list[0])
return savepath
bytes_list = self._get_definition_mdd(filepath_in_mdx)
if bytes_list:
if not os.path.exists(savepath):
with open(savepath, 'wb') as f:
f.write(bytes_list[0])
return savepath
except sqlite3.OperationalError as e:
showInfo(str(e))
def save_media_files(self, data):
"""
get the necessary static files from local mdx dictionary
** kwargs: data = list
"""
diff = data.difference(self.media_cache['files'])
self.media_cache['files'].update(diff)
lst, errors = list(), list()
wild = [
'*' + os.path.basename(each.replace('\\', os.path.sep)) for each in diff]
try:
for each in wild:
keys = self.builder.get_mdd_keys(each)
if not keys:
errors.append(each)
lst.extend(keys)
for each in lst:
self.save_file(each)
except AttributeError:
#showInfo(str(e))
pass
return errors
return ''
class StardictService(LocalService):
@ -372,12 +357,13 @@ class StardictService(LocalService):
def __init__(self, dict_path):
super(StardictService, self).__init__(dict_path)
self.query_interval = 0.05
self.builder = StardictBuilder(self.dict_path, in_memory=False)
self.builder.get_header()
if self.support:
self.builder = StardictBuilder(self.dict_path, in_memory=False)
self.builder.get_header()
@staticmethod
def support(dict_path):
return os.path.isfile(dict_path) and dict_path.lower().endswith('.ifo')
@property
def support(self):
return os.path.isfile(self.dict_path) and self.dict_path.lower().endswith('.ifo')
@property
def title(self):
@ -388,7 +374,7 @@ class StardictService(LocalService):
@export(u"default", 0)
def fld_whole(self):
self.builder.check_build()
#self.builder.check_build()
try:
result = self.builder[self.word]
result = result.strip().replace('\r\n', '<br />')\

View File

@ -2,8 +2,7 @@
import re
from aqt.utils import showInfo, showText
from BeautifulSoup import BeautifulSoup
from .base import WebService, export, register, with_styles
from .base import WebService, export, register, with_styles, parseHtml
@register(u'Bing')
@ -15,55 +14,28 @@ class Bing(WebService):
def _get_content(self):
word = self.word.replace(' ', '_')
data = self.get_response(u"http://cn.bing.com/dict/search?q={}&mkt=zh-cn".format(word))
soup = BeautifulSoup(data)
def _get_element(soup, tag, id=None, class_=None, subtag=None):
# element = soup.find(tag, id=id, class_=class_) # bs4
element = None
if id:
element = soup.find(tag, {"id": id})
if class_:
element = soup.find(tag, {"class": class_})
if subtag and element:
element = getattr(element, subtag, '')
return element
soup = parseHtml(data)
result = {}
element = _get_element(soup, 'div', class_='hd_prUS')
element = soup.find('div', class_='hd_prUS')
if element:
result['phonitic_us'] = str(element).decode('utf-8')
element = _get_element(soup, 'div', class_='hd_pr')
element = soup.find('div', class_='hd_pr')
if element:
result['phonitic_uk'] = str(element).decode('utf-8')
element = _get_element(soup, 'div', class_='hd_if')
element = soup.find('div', class_='hd_if')
if element:
result['participle'] = str(element).decode('utf-8')
element = _get_element(soup, 'div', class_='qdef', subtag='ul')
element = soup.find('div', class_='qdef')
if element:
result['def'] = u''.join([str(content).decode('utf-8')
for content in element.contents])
# for pair in pairs])
# result = _get_from_element(
# result, 'advanced_ec', soup, 'div', id='authid')
# result = _get_from_element(
# result, 'ec', soup, 'div', id='crossid')
# result = _get_from_element(
# result, 'ee', soup, 'div', id='homoid')
# result = _get_from_element(
# result, 'web_definition', soup, 'div', id='webid')
# result = _get_from_element(
# result, 'collocation', soup, 'div', id='colid')
# result = _get_from_element(
# result, 'synonym', soup, 'div', id='synoid')
# result = _get_from_element(
# result, 'antonym', soup, 'div', id='antoid')
# result = _get_from_element(
# result, 'samples', soup, 'div', id='sentenceCon')
element = getattr(element, 'ul', '')
if element:
result['def'] = u''.join([str(content) for content in element.contents])
return self.cache_this(result)
# except Exception as e:
# showInfo(str(e))
# return {}
def _get_field(self, key, default=u''):
return self.cache_result(key) if self.cached(key) else self._get_content().get(key, default)
@ -90,35 +62,3 @@ class Bing(WebService):
if val == None or val == '':
return ''
return self._css(val)
# @export(u'权威英汉双解', 5)
# def fld_advanced_ec(self):
# return self._get_field('advanced_ec')
# @export(u'英汉', 6)
# def fld_ec(self):
# return self._get_field('ec')
# @export(u'英英', 7)
# def fld_ee(self):
# return self._get_field('ee')
# @export(u'网络释义', 8)
# def fld_web_definition(self):
# return self._get_field('web_definition')
# @export(u'搭配', 9)
# def fld_collocation(self):
# return self._get_field('collocation')
# @export(u'同义词', 10)
# def fld_synonym(self):
# return self._get_field('synonym')
# @export(u'反义词', 11)
# def fld_antonym(self):
# return self._get_field('antonym')
# @export(u'例句', 12)
# def fld_samples(self):
# return self._get_field('samples')

View File

@ -8,9 +8,7 @@ Created: 12/20/2017
"""
import os
from warnings import filterwarnings
import requests as rq
from bs4 import BeautifulSoup, Tag
from ..libs.bs4 import BeautifulSoup, Tag
from .base import WebService, export, register, with_styles

View File

@ -84,31 +84,31 @@ class ServiceManager(object):
web_services, local_custom_services = set(), set()
mypath = os.path.dirname(os.path.realpath(__file__))
files = [f for f in os.listdir(mypath)
if f not in ('__init__.py', 'base.py', 'manager.py', 'pool.py') and not f.endswith('.pyc')]
if f not in ('__init__.py', 'base.py', 'manager.py', 'pool.py') and not f.endswith('.pyc') and not os.path.isdir(mypath+os.sep+f)]
base_class = (WebService, LocalService,
MdxService, StardictService)
for f in files:
try:
module = importlib.import_module(
'.%s' % os.path.splitext(f)[0], __package__)
for name, cls in inspect.getmembers(module, predicate=inspect.isclass):
if cls in base_class:
continue
#try:
#service = cls(*args)
service = service_wrap(cls, *args)
service.__unique__ = name
if issubclass(cls, WebService):
web_services.add(service)
# get the customized local services
if issubclass(cls, LocalService):
local_custom_services.add(service)
#except Exception:
# exclude the local service whose path has error.
# pass
except ImportError:
continue
#try:
module = importlib.import_module(
'.%s' % os.path.splitext(f)[0], __package__)
for name, cls in inspect.getmembers(module, predicate=inspect.isclass):
if cls in base_class:
continue
#try:
#service = cls(*args)
service = service_wrap(cls, *args)
service.__unique__ = name
if issubclass(cls, WebService):
web_services.add(service)
# get the customized local services
if issubclass(cls, LocalService):
local_custom_services.add(service)
#except Exception:
# exclude the local service whose path has error.
# pass
#except ImportError:
# continue
return web_services, local_custom_services
def _get_available_local_services(self):

View File

@ -1,47 +1,36 @@
# coding=utf-8
from warnings import filterwarnings
#from warnings import filterwarnings
from ..libs.bs4 import Tag
from .base import WebService, export, register, with_styles, parseHtml
from bs4 import BeautifulSoup, Tag
from requests import Session
from .base import WebService, export, register, with_styles
filterwarnings('ignore')
#filterwarnings('ignore')
import sys
reload(sys)
sys.setdefaultencoding('utf8')
#reload(sys)
#sys.setdefaultencoding('utf8')
BASE_URL = u'https://www.oxfordlearnersdictionaries.com/definition/english/{word}'
@register(u'牛津学习词典')
class OxfordLearning(WebService):
_base_url = 'https://www.oxfordlearnersdictionaries.com/definition/english/'
def __init__(self):
super(OxfordLearning, self).__init__()
self.s = Session()
self.s.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/31.0.1623.0 Safari/537.36'
}
self.s.get(self._base_url)
def query(self, word):
"""
:param word:
:rtype: WebWord
"""
_qry_url = self._base_url + word
qry_url = BASE_URL.format(word=word)
retried = 10
while retried:
try:
rsp = self.s.get(_qry_url, )
if rsp.status_code == 200:
return OxfordLearningDictWord(rsp.content.decode('utf-8'))
rsp = self.get_response(qry_url, timeout=15)
if rsp:
return OxfordLearningDictWord(rsp.decode('utf-8'))
break
except:
retried -= 1
@ -121,7 +110,7 @@ class OxfordLearningDictWord:
if not markups:
return
self.markups = markups
self.bs = BeautifulSoup(self.markups, from_encoding="utf-8")
self.bs = parseHtml(self.markups)
self._defs = []
self._defs_html = []

View File

@ -0,0 +1,733 @@
.entry {
line-height: 150%;
color: black;
display: block;
}
.abbr {
font-weight: bold;
}
.ac,.ac {
padding-left: 2px;
padding-right: 2px;
border-radius: 2px 2px 2px 2px;
border-style: solid;
border-width: 1px;
font-variant: small-caps;
font-size: 80%;
font-weight: bold;
color: blue;
}
.amequiv {
font-weight: bold;
}
.brequiv {
font-weight: bold;
}
.collo {
font-weight: bold;
}
.colloexa {
display: block;
}
.colloinexa {
font-style: italic;
font-weight: bold;
}
.comp {
font-weight: bold;
}
.deriv {
font-weight: bold;
color: blue;
}
.errorbox {
display: block;
}
.etymsense {
display: block;
}
.etymrefhwd {
font-style: italic;
}
.etymrefhom {
font-size: 80%;
vertical-align: super;
font-style: normal;
}
.etymorigin {
font-style: italic;
}
.etymtran {
font-weight: bold;
}
.etymbox {
margin-top: 1em;
display: block;
}
.example {
font-style: italic;
display: block;
color: blue;
}
.freq,.freq {
padding-left: 2px;
padding-right: 2px;
border-radius: 2px 2px 2px 2px;
border-style: solid;
border-width: 1px;
font-variant: small-caps;
font-size: 80%;
font-weight: bold;
color: red;
}
.level {
color: red;
font-size: 160%;
}
.fullform {
font-weight: bold;
}
.geo,span.geo {
font-weight: normal;
font-style: italic;
color: purple;
}
.gloss,.collgloss {
font-weight: normal;
font-style: normal;
color: black;
}
.gram {
color: green;
}
.hintbold {
font-weight: bold;
}
.hintitalic {
font-style: italic;
}
hinttitle {
font-weight: bold;
}
.homnum {
vertical-align: super;
font-size: 8pt;
color: blue;
font-weight: bold;
}
.hwd {
display: none;
}
.hyphenation {
font-weight: bold;
font-size: 120%;
color: blue;
}
.frequent {
color: red;
}
.lexunit {
font-weight: bold;
}
.lexvar {
font-weight: bold;
}
.linkword {
font-style: italic;
}
note {
display: block;
}
object {
font-weight: normal;
}
.opp {
font-weight: bold;
}
.orthvar {
font-weight: bold;
}
.pastpart {
font-weight: bold;
}
pastpartx {
font-weight: bold;
}
.pasttense {
font-weight: bold;
}
pasttensex {
font-weight: bold;
}
.phrvbentry {
display: block;
margin-top: 10px;
margin-left: 10px;
}
.phrvbhwd {
font-weight: bold;
color: blue;
}
.pluralform {
font-weight: bold;
}
.pos {
font-style: italic;
color: green;
font-weight: normal;
}
.prespart {
font-weight: bold;
}
prespartx {
font-weight: bold;
}
.propform {
font-weight: bold;
display: block;
}
.propformprep {
font-weight: bold;
display: block;
}
.ptandpp {
font-weight: bold;
}
ptandppx {
font-weight: bold;
}
.refhomnum {
vertical-align: super;
font-size: 60%;
}
.refhwd,.refsensenum,refsense {
font-style: normal;
font-variant: small-caps;
text-transform: lowercase;
}
.refsensenum {
font-size: 80%;
}
crossrefto .reflex {
display: none;
}
.reflex {
font-weight: bold;
}
.registerlab {
font-style: italic;
color: purple;
}
.relatedwd {
font-weight: bold;
}
.runon {
display: block;
margin-top: 8px;
}
.sense {
display: block;
margin-bottom: 14px;
margin-top: 6px;
}
.signpost {
color: white;
background-color: #35A3FF;
margin-left: .5em;
font-weight: bold;
font-variant: small-caps;
text-transform: uppercase;
font-size: 80%;
padding: 1px 2px 1px 1px;
}
.spokensect {
border-top: solid;
border-bottom: solid;
border-width: 2px;
display: block;
margin-bottom: 1ex;
clear: both;
margin-top: 1ex;
border-color: #00A2E8;
padding: 3px;
}
.spokensecthead {
display: block;
color: #00A2E8;
font-weight: bold;
}
.pronstrong {
font-style: italic;
}
.subsense {
display: block;
}
.superl {
font-weight: bold;
}
.syn {
font-weight: bold;
}
.t3perssing {
font-weight: bold;
}
t3perssingx {
font-weight: bold;
}
unclassified {
font-weight: bold;
}
span.neutral {
font-style: normal;
font-weight: normal;
font-variant: normal;
color: black;
text-decoration: none;
}
.cross {
color: red;
font-weight: bold;
}
span.italic {
color: black;
font-style: italic;
font-weight: normal;
}
.badexa {
text-decoration: line-through;
font-style: italic;
}
.hint .expl {
display: inline;
}
span.infllab {
font-style: italic;
font-weight: normal;
}
span.warning {
font-style: normal;
font-weight: bold;
color: red;
}
span.sensenum {
font-style: normal;
font-weight: bold;
margin-right: 3px;
color: blue;
}
span.synopp {
padding-left: 3px;
padding-right: 3px;
border-radius: 2px 2px 2px 2px;
border-style: solid;
border-width: 1px;
font-variant: small-caps;
font-size: 80%;
font-weight: bold;
color: blue;
}
.subheading,.secheading {
display: block;
font-weight: bold;
font-weight: bold;
color: white;
background-color: #8187BF;
margin-left: -3px;
margin-right: -3px;
font-variant: small-caps;
padding-left: 3px;
}
.collocate,.exponent {
display: block;
padding-bottom: 5px;
margin-top: 5px;
}
.collocate.inline {
display: inline;
}
.expl {
display: block;
padding: 0 3px;
}
.colloc {
font-weight: bold;
}
.exp {
font-weight: bold;
}
.expr {
font-weight: bold;
}
.colloc.key {
color: blue;
}
span.keycollo {
font-weight: bold;
color: blue;
}
.thespropform {
font-weight: bold;
}
collexa {
font-style: italic;
}
collexa .colloinexa {
font-weight: normal;
}
thesexa {
font-style: italic;
}
learneritem {
display: block;
}
.goodcollo {
font-weight: bold;
}
.badcollo {
text-decoration: line-through;
}
.defbold {
font-weight: bold;
}
topic {
font-variant: small-caps;
color: blue;
}
.thesref.newline {
display: block;
}
.heading.newline {
display: block;
}
.thesref span.thesaurus {
color: blue;
font-variant: small-caps;
}
.thesref .refhwd,.thesref .refhomnum {
color: blue;
font-weight: bold;
}
i {
font-style: italic;
}
.imgholder {
cursor: pointer;
/*float: right;*/
display: block;
margin-bottom: 1ex;
padding: 2px;
clear: both}
.imgholder img {
border: 1px solid #DDD;
}
.buttons {
display: block;
}
.popup-button {
background-color: #f0f2fc;
border-radius: 2px;
border: 1px solid #7e92c7;
color: #7e92c7;
text-transform: uppercase;
font-size: 66%;
padding: 2px 3px;
text-decoration: none;
}
.popup-button-hover {
background-color: #dfe3f8;
cursor: pointer;
}
.popverbs {
display: block;
color: white;
font-weight: bold;
background-color: #ec008d;
padding-left: 3px;
margin-bottom: 5px;
}
.verbtable .lemma {
color: blue;
font-size: 120%;
font-weight: bold;
}
.verbtable table {
border-collapse: separate;
border-spacing: 1px;
margin-top: 10px;
}
.verbtable td {
padding: 0 5px 0 2px;
border-style: solid;
border-width: 1px;
border-color: #D2D2D2;
}
.header {
font-weight: bold;
font-variant: small-caps;
}
.verbtable td.col1 {
font-weight: bold;
}
.verbtable td.col2 {
font-style: italic;
}
.verbtable .geo {
font-style: italic;
color: #000;
font-size: normal;
font-weight: normal;
}
.verbtable .aux {
font-weight: bold;
}
.verbtable .verb_form {
color: blue;
font-weight: bold;
}
.collocations .last {
margin-bottom: 20px;
}
.collocations .colloc {
display: inline-block;
font-weight: bold;
margin-left: -2px;
}
.collobox,.thesbox,.usagebox,.grambox,spoken,.f2nbox {
border-radius: 9px 9px 9px 9px;
border-style: solid;
border-width: 2px;
display: block;
margin-bottom: 1ex;
clear: both;
margin-top: 1ex;
}
.f2nbox {
border-color: #00A2E8;
}
.f2nbox .heading {
color: #00A2E8;
}
.heading {
color: white;
font-weight: bold;
line-height: 100%;
padding: 3px;
}
.section {
display: block;
padding: 0 3px;
}
.last {
border-bottom-left-radius: 9px;
border-bottom-right-radius: 9px;
}
.thesbox {
background-color: #652D91;
border-color: #652D91;
}
.thesbox .section {
background-color: #E8E2F0;
}
.collobox {
background-color: #1D3E99;
border-color: #1D3E99;
}
.collobox .section {
background-color: #E2F4FD;
}
.usagebox {
background-color: #00A2E8;
border-color: #00A2E8;
}
.usagebox .expl {
background-color: #E2F4FD;
}
.grambox {
background-color: #00ADEE;
border-color: #00ADEE;
}
.grambox .expl,.grambox .compareword {
background-color: #E2F4FD;
display: block;
padding-left: 3px;
}
.compareword {
padding-top: 5px;
padding-bottom: 5px;
}
.gramrefcont {
display: block;
text-transform: lowercase;
font-variant: small-caps;
padding-left: 3px;
padding-bottom: 1px;
}
.thesaurus .sense {
padding-left: 3px;
padding-right: 3px;
margin-top: 0;
}
.thesaurus .section,.collocations .section {
margin-bottom: 10px;
}
.thesaurus .secheading,.thesbox .secheading {
background-color: #A186BD;
}
.add_exa {
font-style: italic;
display: block;
}
.nodeword {
color: blue;
font-weight: bold;
}
.phrase {
display: block;
}
.phrasetext {
font-weight: bold;
}
.expandable {
cursor: pointer;
}
.entry div.content {
display: none;
margin-bottom: 10px;
}
.group,.w {
display: block;
}
.group .pos {
font-weight: bold;
display: block;
}
.item {
display: block;
}
.w {
font-weight: bold;
}
.popheader {
display: block;
color: white;
font-weight: bold;
padding-left: 3px;
}
.popheader.popexa {
background-color: red;
}
.popheader.popphrase {
background-color: purple;
}
.popheader.popcollo {
background-color: #1D3E99;
}
.popheader.popthes {
background-color: #652D91;
}
.popheader.popetym {
background-color: green;
}
.popheader.popwf {
background-color: #FFD448;
}
.popheader.pope_menu {
background-color: #57C0E0;
}
.ws-head {
font-weight: bold;
color: blue;
font-size: larger;
}
.ws-head.ref {
display: block;
margin-top: 10px;
}
.wswd {
font-weight: bold;
display: block;
}
.cyan {
color: #00A2E8;
font-size: larger;
}
.menuitem {
display: block;
}
ul.exas li {
font-style: italic;
color: blue;
margin-bottom: 3px;
}
.grammar {
display: block;
}
.str {
font-size: large;
font-weight: bold;
}
.etymology {
margin-top: 10px;
}
.group {
margin-top: 10px;
}
.menuitem .signpost {
background-color: #FFF;
color: black;
margin-left: 0;
font-size: 90%;
}
.phrvbs {
display: block;
margin-top: 10px;
margin-left: 5px;
}
.phrvbs .heading {
display: block;
color: blue;
font-weight: bold;
}
.phrv {
font-weight: bold;
}
.secheading,.subheading {
font-variant: normal;
text-transform: uppercase;
}
.secheading.no_convert {
text-transform: none;
}
.grambox .heading.newline {
background-color: #6CD0F6;
}
img[src*="img/spkr_"] {
margin-bottom: -5px;
}
.chwd {
margin-top: 10px;
margin-left: 8px;
margin-right: 8px;
}
.chwd a {
display: inline-block;
background-color: green;
color: white;
padding-top: 2px;
padding-bottom: 3px;
padding-left: 5px;
padding-right: 5px;
margin-right: 3px;
margin-bottom: 3px;
text-decoration: none;
font-size: 80%;
border-radius: 2px;
}
.chwd .hw {
color: white;
}
.entry, .entry.lozenge, .verbtable {
margin-top: 10px;
margin-left: 10px;
margin-right: 10px;
}

View File

@ -345,24 +345,29 @@ class OptionsDialog(QDialog):
for cls in service_manager.local_services:
# combo_data.insert("data", each.label)
service = service_pool.get(cls.__unique__)
dict_combo.addItem(
service.title, userData=service.unique)
service_pool.put(service)
if service and service.support:
dict_combo.addItem(
service.title, userData=service.unique)
service_pool.put(service)
dict_combo.insertSeparator(dict_combo.count())
for cls in service_manager.web_services:
service = service_pool.get(cls.__unique__)
dict_combo.addItem(
service.title, userData=service.unique)
service_pool.put(service)
if service and service.support:
dict_combo.addItem(
service.title, userData=service.unique)
service_pool.put(service)
def set_dict_combo_index():
dict_combo.setCurrentIndex(-1)
#dict_combo.setCurrentIndex(-1)
for i in range(dict_combo.count()):
if current_text in _sl('NOT_DICT_FIELD'):
dict_combo.setCurrentIndex(0)
return
if dict_combo.itemText(i) == current_text:
dict_combo.setCurrentIndex(i)
return
dict_combo.setCurrentIndex(0)
set_dict_combo_index()
@ -376,15 +381,19 @@ class OptionsDialog(QDialog):
field_combo.setFocus(Qt.MouseFocusReason) # MouseFocusReason
else:
field_text = field_combo.currentText()
service_unique = dict_combo_itemdata
current_service = service_pool.get(service_unique)
unique = dict_combo_itemdata
service = service_pool.get(unique)
text = ''
# problem
if current_service and current_service.fields:
for each in current_service.fields:
if service and service.support and service.fields:
for each in service.fields:
field_combo.addItem(each)
if each == field_text:
field_combo.setEditText(field_text)
service_pool.put(current_service)
text = each
field_combo.setEditText(text)
field_combo.setEnabled(text != '')
service_pool.put(service)
def radio_btn_checked(self):
rbs = self.findChildren(QRadioButton)