首页 > 编程知识 正文

数据表格图怎么制作,根据wsdl文件生成客户端代码

时间:2023-05-04 04:43:15 阅读:47071 作者:823

文章目录序言制作部的代码读取部

在pytorch上记录如何使用lmdb的代码,自己使用

创建部分的代码代码是ASTER中数据创建部分的代码略有变化的代码,即使aster_train.txt中的每一行都有图像的完整路径,但图像在同一目录下具有相同名称的txt,其中jpg的标签为

导入操作系统导入lmdb # installlmdbby ' pipinstalllmdb ' import c v2 importnumpyasnpfromtqdmimporttqdmimportsixfrompilimportimportscipy.ioassiofrfration eisvalid(imagebin ) : ifimagebinisnone : returnfalseimagebuf=NP.from string ) imagebin,dtype=np.uint8) img=cv2.IMREAD_GRAYSCALE ) imgH,imgw=img.shape(0),img.shape [1] if imgh * imgw==03: returnfalsereturntruedruededed cache ) :withenv.begin ) write=trite vin cache.items (: txn.put ) k.encode ),v ) def_is_difficult str word ) defcreatedataset (输出路径、图像路径、标签列表、lexiconList=None, 检查值=true ) 3360 ' ' createlmdbdatasetforcrnntraining.args : output path 3360 lmdboutputpathimagepathlist 3360 listofimagepathlabellisistiont textslexiconlist 3360 (可选) listoflexiconlistscheckvalid 3360 if true, checkthevalidityofeveryimage ' ' assert (len (图像属性)=len (标签列表) ) nsamples=len (图像属性) env Marist CNT=1forIinrange(nsamples ) 3360 image path=image pathlist [ I ] label==0: continueifnotos.path.exists (image path ) rb ' ) as f: imageBin=f.read () if check valid : ifnotcheckimageisvalid (image bin ) :print ) (% sisnotavalidimalidion 数据库中的所有二进制数据imageKey='image- d' % cnt#9位为零的labelkey=' label-d ' % CNT cache [ imagebincache ]=imagebinche [ lnt ] 小于iflexiconlist : lexicon key=' lexicon-d ' % CNT cache [ lexicon key ]='.join [ lexicon list [ I ] ] if CNT 0

0 == 0: writeCache(env, cache) cache = {} print('Written %d / %d' % (cnt, nSamples)) cnt += 1 nSamples = cnt-1 cache['num-samples'] = str(nSamples).encode() writeCache(env, cache) print('Created dataset with %d samples' % nSamples)def get_sample_list(txt_path:str): with open(txt_path,'r') as fr: jpg_list=[x.strip() for x in fr.readlines() if os.path.exists(x.replace('.jpg','.txt').strip())] txt_content_list=[] for jpg in jpg_list: label_path=jpg.replace('.jpg','.txt') with open(label_path,'r') as fr: try: str_tmp=fr.readline() except UnicodeDecodeError as e: print(label_path) raise(e) txt_content_list.append(str_tmp.strip()) return jpg_list,txt_content_listif __name__ == "__main__": txt_path='/home/gpu-server/disk/disk1/NumberData/8NumberSample/aster_train.txt' lmdb_output_path = '/home/gpu-server/project/aster/dataset/train' imagePathList,labelList=get_sample_list(txt_path) createDataset(lmdb_output_path, imagePathList, labelList) 读取部分

这里用的pytorch的dataloader,简单记录一下,人比较懒,代码就直接抄过来,不整理拆分了,重点看__getitem__

from __future__ import absolute_import# import sys# sys.path.append('./')import os# import moxing as moximport picklefrom tqdm import tqdmfrom PIL import Image, ImageFileimport numpy as npimport randomimport cv2import lmdbimport sysimport siximport torchfrom torch.utils import datafrom torch.utils.data import samplerfrom torchvision import transformsfrom lib.utils.labelmaps import get_vocabulary, labels2strsfrom lib.utils import to_numpyImageFile.LOAD_TRUNCATED_IMAGES = Truefrom config import get_argsglobal_args = get_args(sys.argv[1:])if global_args.run_on_remote: import moxing as mox #moxing是一个分布式的框架 跳过class LmdbDataset(data.Dataset): def __init__(self, root, voc_type, max_len, num_samples, transform=None): super(LmdbDataset, self).__init__() if global_args.run_on_remote: dataset_name = os.path.basename(root) data_cache_url = "/cache/%s" % dataset_name if not os.path.exists(data_cache_url): os.makedirs(data_cache_url) if mox.file.exists(root): mox.file.copy_parallel(root, data_cache_url) else: raise ValueError("%s not exists!" % root) self.env = lmdb.open(data_cache_url, max_readers=32, readonly=True) else: self.env = lmdb.open(root, max_readers=32, readonly=True) assert self.env is not None, "cannot create lmdb from %s" % root self.txn = self.env.begin() self.voc_type = voc_type self.transform = transform self.max_len = max_len self.nSamples = int(self.txn.get(b"num-samples")) self.nSamples = min(self.nSamples, num_samples) assert voc_type in ['LOWERCASE', 'ALLCASES', 'ALLCASES_SYMBOLS','DIGITS'] self.EOS = 'EOS' self.PADDING = 'PADDING' self.UNKNOWN = 'UNKNOWN' self.voc = get_vocabulary(voc_type, 微笑的耳机, PADDING=self.PADDING, UNKNOWN=self.UNKNOWN) self.char2id = dict(zip(self.voc, range(len(self.voc)))) self.id2char = dict(zip(range(len(self.voc)), self.voc)) self.rec_num_classes = len(self.voc) self.lowercase = (voc_type == 'LOWERCASE') def __len__(self): return self.nSamples def __getitem__(self, index): assert index <= len(self), 'index range error' index += 1 img_key = b'image-%09d' % index imgbuf = self.txn.get(img_key) #由于Image.open需要一个类文件对象 所以这里需要把二进制转为一个类文件对象 buf = six.BytesIO() buf.write(imgbuf) buf.seek(0) try: img = Image.open(buf).convert('RGB') # img = Image.open(buf).convert('L') # img = img.convert('RGB') except IOError: print('Corrupted image for %d' % index) return self[index + 1] # reconition labels label_key = b'label-%09d' % index word = self.txn.get(label_key).decode() if self.lowercase: word = word.lower() ## fill with the padding token label = np.full((self.max_len,), self.char2id[self.PADDING], dtype=np.int) label_list = [] for char in word: if char in self.char2id: label_list.append(self.char2id[char]) else: ## add the unknown token print('{0} is out of vocabulary.'.format(char)) label_list.append(self.char2id[self.UNKNOWN]) ## add a stop token label_list = label_list + [self.char2id[self.EOS]] assert len(label_list) <= self.max_len label[:len(label_list)] = np.array(label_list) if len(label) <= 0: return self[index + 1] # label length label_len = len(label_list) if self.transform is not None: img = self.transform(img) return img, label, label_len

版权声明:该文观点仅代表作者本人。处理文章:请发送邮件至 三1五14八八95#扣扣.com 举报,一经查实,本站将立刻删除。