首页 > 编程知识 正文

词向量训练的输入是什么,分词之后怎么构建词向量

时间:2023-05-03 22:20:15 阅读:33180 作者:2056

请参考哈工大车万翔等老师写的《自然语言处理-基于预训练模型的方法》

# coding : utf-8 # name :测试2 # author : Dell # data :2021/10/12 # 基于负采样的skip-garm模型importtorchimporttorch.nn.functionalasfimporttorch.nnasnfromtorch.utils.dataimportdataset ataloaderfromcollectionsimportdefaultdictfromtqdmimporttorch.optimasoptimbos _ token=' Bos ' EOS _ token=' bbbos 导入ers数据处理模块from nltk.corpus import reuters # )以获取Reuters数据中的所有语句(text=reuters.sents ) )。 (可选)将预期单词转换为小写text=[[word.lower ) forwordinsentence ) forsentenceintext]#,构建同义词库,保留标记vocab=vocab BOS_TOKEN, EOS_TOKEN] )使用词表将文本数据转换为id并返回corpus=[ vocab.convert _ tokens _ to _ ids (sentence ) for sentence in text ] vocabclassvocab : def _ _ init _ (self, tokens=none (: self.idx _ to _ token=list ) (self.token_to_idx=dict ) ) iftokensisnotne 3360 if ' ununone fortokenintokens 3360 self.idx _ to _ token.self.token _ to _ idx [ token ]=len (self.idx _ to _ token ) (-1 sstoken reserved _ tokens=none (: token _ freqs=default dict (int ) forsentenceintext 3360 fortokeninsence 3360 token _ Fren (reserved _ tokensifreserved _ tokens else [ ] ) uniq_tokens =[token for token, freqintoken=' unk ' ] return cls (uniq _ tokens ) def _ len _ _ (self ) : #表示同义词的大小, 也就是说,搜索与token(: )输入标记相对应的索引值,如果该标记不存在,则返回同义词词典中有多少个相互不同的标记return len self.idx _ to _,并且与unk相对应的索引值self.unk )返回def convert _ tokens _ tokens ) : #查找与一组输入标记相对应的索引值。 在这里直接使用自助就可以了。 _ _ getitem _ _ id return [ self [ token ] fortokenintokens ] #或# return [ self.token _ to _ idx [ token ] for token ]

] for index in indices]class SGNSDataset(Dataset): def __init__(self, corpus, vocab, context_size=2, n_negatives=5, ns_dist=None): self.data = [] self.bos = vocab[BOS_TOKEN] self.eos = vocab[EOS_TOKEN] self.pad = vocab[PAD_TOKEN] for sentence in tqdm(corpus, desc="Dataset Construction"): sentence = [self.bos] + sentence + [self.eos] for i in range(1, len(sentence)-1): # 模型输入: (w, context) w = sentence[i] left_context_index = max(0, i-context_size) right_context_index = min(len(sentence), i+context_size) context = sentence[left_context_index:i] + sentence[i+1:right_context_index+1] context += [self.pad] * (2*context_size - len(context_size)) self.data.append((w, context)) # 负采样数量 self.n_negatives = n_negatives # 负采样分布:若参数ns_dist为None,则使用均匀分布(从词表中均匀采样) self.ns_dist = ns_dist if ns_dist else torch.ones(len(vocab)) def __len__(self): return len(self.data) def __getitem__(self, i): return self.data[i] def collate_fn(self, examples): words = torch.tensor([ex[0] for ex in examples], dtype=torch.long) contexts = torch.tensor([ex[1] for ex in examples], dtype=torch.long) batch_size, context_size = contexts.shape neg_contexts = [] # 对批次内的样本分别进行负采样 for i in range(batch_size): # 保证负样本不包含当前样本中的context ns_dist = self.ns_dist.index_fill(0, contexts[i], .0) # dim=0, 需要填充的tensor的索引-contexts[i], vale=.0 # torch.multinomial--对ns_dist中的值,有放回(replacement=True)地抽取self.n_negatives * context_size neg_contexts.append(torch.multinomial(ns_dist, self.n_negatives * context_size, replacement=True)) neg_contexts = torch.stack(neg_contexts, dim=0) return words, contexts, neg_contextsclass SGNSModel(nn.Module): def __init__(self, vocab_size, embedding_dim): super(SGNSModel, self).__init__() # 词向量 self.w_embeddings = nn.Embedding(vocab_size, embedding_dim) # 上下文向量 self.c_embeddings = nn.Embedding(vocab_size, embedding_dim) def forward_w(self, words): w_embeds = self.w_embeddings(words) return w_embeds def forward_c(self, contexts): c_embeds = self.c_embeddings(contexts) return c_embedsdef get_unigram_distribution(corpus, vocab_size): # 从给定的语料中计算Unigram概率分布 token_counts = torch.tensor([0]*vocab_size) total_count = 0 for sentence in corpus: total_count += len(sentence) for token in sentence: token_counts[token] += 1 unigram_dist = torch.div(token_counts.float(), total_count) return unigram_distdef save_pretrained(vocab, embeds, save_path): with open(save_path, "w") as writer: # 记录词向量大小 writer.write(f"{embeds.shape[0]} {embeds.shape[1]}n") for idx, token in enumerate(vocab.idx_to_token): vec = " ".join([f"{x}" for x in embeds[idx]]) # 每一行对应一个单词以及由空格分隔的词向量 writer.write(f"{token} {vec}n")def main(): # 设置超参数 embedding_dim = 128 context_size = 3 batch_size = 1024 n_negatives = 5 # 负样本数量 num_epoch = 10 # 读取文本数据 corpus, vocab = load_reuters() # 计算Unigram概率分布 unigram_dist = get_unigram_distribution(corpus, len(vocab)) # 根据Unigram概率分布计算负采样分布: p(w)**0.75 # 为了防止低频单词被忽略-->通过取 0.75 次方,低频单词的概率将稍微变大。 negative_sampling_dist = unigram_dist ** 0.75 negative_sampling_dist /= negative_sampling_dist.sum() # 构建SGNS训练数据集 dataset = SGNSDataset(corpus, vocab, context_size=context_size, n_negatives=n_negatives, ns_dist=negative_sampling_dist) # data_loader = get_loader(dataset, batch_size) data_loader = DataLoader(dataset, batch_size) model = SGNSModel(len(vocab), embedding_dim) device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) optimizer = optim.Adam(model.parameters(), lr=0.001) model.train() for epoch in range(num_epoch): total_loss = 0 for batch in tqdm(data_loader, desc=f"Training Epoch {epoch}"): words, contexts, neg_contexts = [x.to(device) for x in batch] optimizer.zero_grad() batch_size = words.shape[0] # 分贝提取batch内词、上下文和负样本的向量表示 word_embeds = model.forward_w(words).unsqueeze(dim=2) # [batch_size, word_embedding, 1] context_embeds = model.forward_c(contexts) # [batch_size, context_num, context_word_embedding] neg_context_embeds = model.forward_c(neg_contexts) # [batch_size, neg_context_word_embedding] # 正样本的分类(对数)似然 context_loss = F.logsigmoid(torch.bmm(context_embeds, word_embeds).seqeeze(dim=2)) # [batch_size, context_num]-->预测上下文的词 context_loss = context_loss.mean(dim=1) # 负样本的分类(对数)似然 # torch.neg()--->按元素取负-->output = -1 * input neg_context_loss = F.logsigmoid(torch.bmm(neg_context_embeds, word_embeds).squeeze(dim=2).neg()) neg_context_loss = neg_context_loss.view(batch_size, -1, n_negatives).sum(2) # [batch_size, context_size, n_negatives] neg_context_loss = neg_context_loss.mean(dim=1) # 总体损失 loss = -(context_loss + neg_context_loss).mean() loss.backward() optimizer.step() total_loss += loss.item() print(f"Loss: {total_loss:.2f}") # 合并词向量矩阵与上下文向量矩阵,作为最终的预训练词向量 combined_embeds = model.w_embeddings.weight + model.c_embeddings.weight # 将词向量保存至sgns,vec文件 save_pretrained(vocab, combined_embeds.data, "sgns.vec")if __name__ == "__main__": main()

版权声明:该文观点仅代表作者本人。处理文章:请发送邮件至 三1五14八八95#扣扣.com 举报,一经查实,本站将立刻删除。