实战深度学习:文本预处理入门指南
最编程
2024-01-19 10:32:26
...
文本预处理
import collections
import re
from d2l import torch as d2l
将数据集读取到由文本行组成的列表中
d2l.DATA_HUB['time_machine'] = (d2l.DATA_URL + 'timemachine.txt',
'090b5e7e70c295757f55df93cb0a180b9691891a') # load一本书
def read_time_machine():
"""Load the time machine dataset into a list of text lines."""
with open(d2l.download('time_machine'), 'r') as f:
lines = f.readlines()
return [re.sub('[^A-Za-z]+', ' ', line).strip().lower() for line in lines] # 将不是字母的东西全部变为空格
lines = read_time_machine()
print(lines[0])
print(lines[10])
Downloading ../data/timemachine.txt from http://d2l-data.s3-accelerate.amazonaws.com/timemachine.txt...
the time machine by h g wells
twinkled and his usually pale face was flushed and animated the
每个文本序列被拆分成一个标记列表
# 这个函数的作用就是将一行转换为一个个token,这个token要么是一个字符串要么是一个词
def tokenize(lines, token='word'):
"""将文本行拆分为单词或字符标记。"""
if token == 'word': # 一个字符串
return [line.split() for line in lines]
elif token == 'char':
return [list(line) for line in lines]
else:
print('错误:未知令牌类型:' + token)
tokens = tokenize(lines)
for i in range(11):
print(tokens[i])
['the', 'time', 'machine', 'by', 'h', 'g', 'wells']
[]
[]
[]
[]
['i']
[]
[]
['the', 'time', 'traveller', 'for', 'so', 'it', 'will', 'be', 'convenient', 'to', 'speak', 'of', 'him']
['was', 'expounding', 'a', 'recondite', 'matter', 'to', 'us', 'his', 'grey', 'eyes', 'shone', 'and']
['twinkled', 'and', 'his', 'usually', 'pale', 'face', 'was', 'flushed', 'and', 'animated', 'the']
构建一个字典,通常也叫做词表(vocabulary),用来将字符串标记映射到从0开始的数字索引中
class Vocab:
"""文本词表"""
def __init__(self, tokens=None, min_freq=0, reserved_tokens=None): # 如果一个token在文本序列里面出现的次数少于min_freq词我们就将它丢掉
if tokens is None:
tokens = []
if reserved_tokens is None:
reserved_tokens = []
counter = count_corpus(tokens)
self.token_freqs = sorted(counter.items(), key=lambda x: x[1],
reverse=True) # 如果我们有每个token出现的频率的话,我们就将它sort一下,从大排到小
self.unk, uniq_tokens = 0, ['<unk>'] + reserved_tokens # 这里有个unknow token这就是位置的token
uniq_tokens += [
token for token, freq in self.token_freqs
if freq >= min_freq and token not in uniq_tokens] # 意思就是将token次数大于等于min_freq的东西放到这里,小于的就丢掉
self.idx_to_token, self.token_to_idx = [], dict() # 给一个token返回一个index
for token in uniq_tokens:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token) # uniq_token的个数
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens] # 返回index
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
return [self.idx_to_token[index] for index in indices] # 给一个下标返回一个token
def count_corpus(tokens): # 计算每个token出现的次数
"""统计标记的频率。"""
if len(tokens) == 0 or isinstance(tokens[0], list):
tokens = [token for line in tokens for token in line]
return collections.Counter(tokens)
构建词汇表
vocab = Vocab(tokens) # 构造一个vacab
print(list(vocab.token_to_idx.items())[:10])
[('<unk>', 0), ('the', 1), ('i', 2), ('and', 3), ('of', 4), ('a', 5), ('to', 6), ('was', 7), ('in', 8), ('that', 9)]
将每一行文本转换成一个数字索引列表
for i in [0, 10]:
print('words:', tokens[i])
print('indices:', vocab[tokens[i]])
words: ['the', 'time', 'machine', 'by', 'h', 'g', 'wells']
indices: [1, 19, 50, 40, 2183, 2184, 400]
words: ['twinkled', 'and', 'his', 'usually', 'pale', 'face', 'was', 'flushed', 'and', 'animated', 'the']
indices: [2186, 3, 25, 1044, 362, 113, 7, 1421, 3, 1045, 1]
将所有内容打包到load_corpus_time_machine函数中
def load_corpus_time_machine(max_tokens=-1):
"""返回时光机器数据集的标记索引列表和词汇表。"""
lines = read_time_machine()
tokens = tokenize(lines, 'char')
vocab = Vocab(tokens)
corpus = [vocab[token] for line in tokens for token in line] # 将每一个行都丢进vacublary里面转换为数字下标
if max_tokens > 0:
corpus = corpus[:max_tokens]
return corpus, vocab
corpus, vocab = load_corpus_time_machine()
len(corpus), len(vocab)
(170580, 28)
推荐阅读
-
深度学习编译器入门指南 (Q2) 模型量化-KL 散射
-
深度学习编译器入门指南 (Q4) 模型量化 - INT8 量化实验
-
深度学习编译器入门指南 (Q1) 模型量化-概览
-
深度学习编译器入门指南 (Q3) 模型量化 - INT8 卷积
-
阿里云 - NLP 零基础入门 [基于深度学习的文本分类 3-BERT] - 数据标记
-
2023 年深度学习入门指南(21) - Bacchus Big Models
-
[人工智能实战] 深度学习文本识别实战(文本检测篇:基于 MSER、CTPN、SegLink、EAST 和其他方法)
-
MLPerf 实战指南:轻松理解并入门机器学习物体检测(第二部分)
-
如何轻松上手百度的PaddlePaddle深度强化学习框架PARL:深度学习工具实战指南
-
入门到精通:实战探索深度强化学习 - 以DDPG与TD3算法为例(第7.1章)