[{"data":1,"prerenderedAt":565},["ShallowReactive",2],{"content-query-o1PhcddsL1":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"category":13,"body":14,"_type":559,"_id":560,"_source":561,"_file":562,"_stem":563,"_extension":564},"/technology-blogs/zh/372","zh",false,"","MindSpore系列分享（三）：自然语言处理基础上篇","自然语言处理是人工智能与计算机领域中的一个重要方向，研究的是实现人与计算机之间用自然语言进行有效通信的法，本文主要是对自然语言一些技术的浅析。","2021-01-11","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/01/11/89745165a282468a9c462f5e6a50d803.png","technology-blogs","基础知识",{"type":15,"children":16,"toc":556},"root",[17,25,35,40,50,55,60,70,75,80,85,90,95,105,110,115,120,125,130,135,140,145,150,155,163,168,178,183,188,198,203,213,218,223,233,238,243,248,253,258,263,268,273,278,285,290,295,302,307,312,317,322,329,334,339,344,349,354,361,366,371,381,386,396,401,406,413,418,423,433,438,445,450,455,462,467,474,479,484,489,496,501,509,514,519,531,541,551],{"type":18,"tag":19,"props":20,"children":22},"element","h1",{"id":21},"mindspore系列分享三自然语言处理基础上篇",[23],{"type":24,"value":8},"text",{"type":18,"tag":26,"props":27,"children":28},"p",{},[29],{"type":18,"tag":30,"props":31,"children":32},"strong",{},[33],{"type":24,"value":34},"简介：",{"type":18,"tag":26,"props":36,"children":37},{},[38],{"type":24,"value":39},"广义上介绍自然语言处理：是人工智能与计算机领域中的一个重要方向。研究的是实现人与计算机之间用自然语言进行有效通信的法。自然语言处理并不是一般地研究自然语言，而是在研究处理语言的方法，应用于计算机中。因而它是计算机科学的一部分。下面主要是对自然语言一些技术的浅析。",{"type":18,"tag":26,"props":41,"children":42},{},[43,45],{"type":24,"value":44},"1. ",{"type":18,"tag":30,"props":46,"children":47},{},[48],{"type":24,"value":49},"分词技术",{"type":18,"tag":26,"props":51,"children":52},{},[53],{"type":24,"value":54},"由于汉语对词的构成边界方面很难进行定位。在英文中，单词本身就是词的表示，一篇英文文章就是单词加空格来表示。在汉语中，词以字为单位，但一篇汉语文章的语义却仍以词来划分。因此，在处理中文文档时，需要进行分词处理，将文档转换成词来表示。这个切词过程就是中文分词。通过计算机自动识别出句子的词，在词间加入边界标识符，分隔出各个词汇，主要的难点在于分词歧义。",{"type":18,"tag":26,"props":56,"children":57},{},[58],{"type":24,"value":59},"中文分词主要有三个流派：规则分词、统计分词、混合分词。",{"type":18,"tag":26,"props":61,"children":62},{},[63,65],{"type":24,"value":64},"1.1 ",{"type":18,"tag":30,"props":66,"children":67},{},[68],{"type":24,"value":69},"规则分词",{"type":18,"tag":26,"props":71,"children":72},{},[73],{"type":24,"value":74},"规则分词：基于规则的分词是一种机械分词方法，将语句中的每一个字符串与词表中的词逐一匹配，匹配到就切分，否则不予切分。按照匹配切分的方式，主要有正向最大匹配法、逆向最大匹配法和双向最大匹配法。",{"type":18,"tag":26,"props":76,"children":77},{},[78],{"type":24,"value":79},"正向最大匹配法思想：假设分词词典中的最长词有i个字符，那么用被处理文档的当前字符串的前i个字符作为匹配字段，查找字典。若字典中存在这样一个i长度字词，则匹配成功，匹配字段则被作为一个词切分出来。如果词典中找不到这样的一个i长度字词，则匹配失败。此时便将匹配字段中的最后一个字去掉，对剩余的字符串重新匹配处理。根据这样的规则处理下去，直到匹配成功，即切分出一个词或剩余字符串的长度为0为止。这样就完成一轮匹配，然后取下一个i长度字符串进行匹配处理，直到文档被扫描完为止。",{"type":18,"tag":26,"props":81,"children":82},{},[83],{"type":24,"value":84},"逆向最大匹配法思想：基本原理与正向最大匹配法相同，",{"type":18,"tag":26,"props":86,"children":87},{},[88],{"type":24,"value":89},"双向最大匹配法思想：将正向最大匹配法得到的分词结果和逆向最大匹配法得到的结果进行比较，然后按照最大匹配原则，选取词数切分最少的作为结果。",{"type":18,"tag":26,"props":91,"children":92},{},[93],{"type":24,"value":94},"基于规则的分词，一般都比较简单高效，但是词典的维护是一个很庞大的工程。而且网络新词频频出现，很难通过词典覆盖到所以词。",{"type":18,"tag":26,"props":96,"children":97},{},[98,100],{"type":24,"value":99},"1.2 ",{"type":18,"tag":30,"props":101,"children":102},{},[103],{"type":24,"value":104},"统计分词",{"type":18,"tag":26,"props":106,"children":107},{},[108],{"type":24,"value":109},"统计分词：主要思想是把每个词看作是由词的最小单位的各个字组成的，如果相连的字在不同的文本中出现的次数越多，就证明这相连的字很可能就是一个词。因此我们就可以利用字与字相邻出现的频率来反映成词的可靠度，统计语料中相邻共现的各个字的组合的频率，当组合频率高于某一个临界值时，我们可以认为这个字的组合可能会构成一个词语。",{"type":18,"tag":26,"props":111,"children":112},{},[113],{"type":24,"value":114},"基于统计的分词，通常需要两个步骤操作：",{"type":18,"tag":26,"props":116,"children":117},{},[118],{"type":24,"value":119},"（1）建立统计语言模型；",{"type":18,"tag":26,"props":121,"children":122},{},[123],{"type":24,"value":124},"（2）对句子进行单词划分，然后对划分结果进行概率计算，获得概率最大的分词方式。这里就要用到了统计学习算法。",{"type":18,"tag":26,"props":126,"children":127},{},[128],{"type":24,"value":129},"语言模型：用概率论的专业术语描述语言模型就是，为长度为m的字符串确定其概率分布P(w1,w2, …,wm)，其中w1到wm依次表示文本中的每个词语。一般采用链式法则计算其概率值。",{"type":18,"tag":26,"props":131,"children":132},{},[133],{"type":24,"value":134},"P(w1,w2, …,wm)=P(w1)P(w2|w1)P(w3|w1,w2)",{"type":18,"tag":26,"props":136,"children":137},{},[138],{"type":24,"value":139},"...P(wi|w1,w2, …,wi-1) …P(wm|w1,w2, …,wm-1)",{"type":18,"tag":26,"props":141,"children":142},{},[143],{"type":24,"value":144},"当文本过长时，公式右部从第三项起的每一项计算难度都很大。为了解决该问题，提出了n元模型用来降低该计算难度。计算公式为：",{"type":18,"tag":26,"props":146,"children":147},{},[148],{"type":24,"value":149},"P(wi|w1,w2, …,wi-1) ≈P(wi|wi-(n-1), …,wi-1)",{"type":18,"tag":26,"props":151,"children":152},{},[153],{"type":24,"value":154},"当为一元模型，句子的概率表示为P(w1,w2,…,wm)=P(w1)P(w2)…P(wm)。在一元模型中，整个句子的概率等于各个词语概率的乘积。也可以看作是各个词之间是相互独立的，损失了句子中的顺序信息。所以一元模型的效果不理想。",{"type":18,"tag":26,"props":156,"children":157},{},[158],{"type":18,"tag":159,"props":160,"children":162},"img",{"alt":7,"src":161},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/01/11/400c5ce471f642e59d78ddb5f815e890.png",[],{"type":18,"tag":26,"props":164,"children":165},{},[166],{"type":24,"value":167},"由上面表达式可见，当n越大时，模型包含的词顺序信息越丰富，但同时计算量也随之增大。此时长度越长的文本序列出现的次数也会减少。根据公式估计n元条件概率时，就会出现分子分母为零的情况。因此，在一般的n元模型中需要配合相应的平滑算法解决该问题，例如拉普拉斯平滑算法。",{"type":18,"tag":26,"props":169,"children":170},{},[171,173],{"type":24,"value":172},"1.3 ",{"type":18,"tag":30,"props":174,"children":175},{},[176],{"type":24,"value":177},"混合分词",{"type":18,"tag":26,"props":179,"children":180},{},[181],{"type":24,"value":182},"在目前常用的分词方法中，在具体的分词任务中，效果上并未有很明显的差距。在实际的工程应用中，首先是先基于一种分词算法使用，然后将其他分词方法辅助使用。",{"type":18,"tag":26,"props":184,"children":185},{},[186],{"type":24,"value":187},"通常的使用方式是先基于机械分词方法进行分词，然后再使用统计分词方法辅助对准未登录词和歧义词，这样混合使用会有比单一使用有更好的效果。",{"type":18,"tag":26,"props":189,"children":190},{},[191,193],{"type":24,"value":192},"2. ",{"type":18,"tag":30,"props":194,"children":195},{},[196],{"type":24,"value":197},"词性标注和命名实体识别",{"type":18,"tag":26,"props":199,"children":200},{},[201],{"type":24,"value":202},"词性是词汇基本的语法属性，也可以称为词类。词性标注的行为就是在给定的中文句子中判定每个词的语法作用，确定每个词的词性并加以标注。命名实体识别在信息检索方面有着很重要作用，检测出代表性的名称。",{"type":18,"tag":26,"props":204,"children":205},{},[206,208],{"type":24,"value":207},"2.1 ",{"type":18,"tag":30,"props":209,"children":210},{},[211],{"type":24,"value":212},"词性标注",{"type":18,"tag":26,"props":214,"children":215},{},[216],{"type":24,"value":217},"在中文句子中，一个同音同形的词处在不同的上下文时，语法的属性是截然不同的，由于这个原因，这就给中文词性标注带来很大的困难。但是从中文词语整体的使用情况来看，大多数的词语，尤其是实词，一般是有一到二个词性，并且通过统计发现，其中一个词性的使用频次远大于另外词性。所以即使每次都将高频的词性作为其词性，也能够实现很高的准确率。只要我们对常用词的词性能够进行很精准的识别，使用时也能够覆盖绝大多数的场景。",{"type":18,"tag":26,"props":219,"children":220},{},[221],{"type":24,"value":222},"词性标注最简单的方法就是从语料库中统计每个词所对应的高频词性，将其作为默认的词性，但基于这种方法的词性标注还是有提醒空间的。目前较为主流的方法和分词相似，将句子的词性标注作为一个序列标注问题看待，这样隐马尔可夫模型、条件随机场模型都可以应用于词性标注任务中。",{"type":18,"tag":26,"props":224,"children":225},{},[226,228],{"type":24,"value":227},"2.2 ",{"type":18,"tag":30,"props":229,"children":230},{},[231],{"type":24,"value":232},"命名实体识别",{"type":18,"tag":26,"props":234,"children":235},{},[236],{"type":24,"value":237},"中文命名实体识别主要有以下的难点：各类命名实体数量众多、命名实体的构成规律复杂、嵌套情况复杂、长度不确定。",{"type":18,"tag":26,"props":239,"children":240},{},[241],{"type":24,"value":242},"在分词的介绍中，我们主要列出来三种方式：基于规则的方法、基于统计的方法以及混合使用方法。在整个NLP的命名实体识别中也不例外。",{"type":18,"tag":26,"props":244,"children":245},{},[246],{"type":24,"value":247},"基于规则的命名实体识别：规则加词典是早期命名实体识别中最行之有效的方法，主要依赖于手工规则的系统，结合命名实体库，对每一条规则进行权重的赋值，然后再通过实体与规则的相符程度进行类型的判断。当提取的规则能够较好的反应语言的现象时，此方法的效果明显优于其他方法。但是在大多数的情境下，规则往往依赖于具体的语言、领域和文本的风格，并且其编制的过程非常耗时，也难以涵盖所有的语言现象，更新维护非常困难。",{"type":18,"tag":26,"props":249,"children":250},{},[251],{"type":24,"value":252},"基于统计的命名实体识别：目前主流的基于统计的命名实体识别方法主要有隐马尔可夫模型、最大熵模型、条件随机场等等。主要的思想是：基于人工标注的语料，将命名实体识别任务作为序列标注问题来解决。基于统计方法对语料库质量的依赖比较大，而规模大质量高的语料库很少，是此类方法的一个制约。",{"type":18,"tag":26,"props":254,"children":255},{},[256],{"type":24,"value":257},"混合方法：NLP并不完全是随机的过程，如果仅使用基于统计的方法会使搜索空间非常的庞大，所以需要提前借助规则方法进行过滤修剪处理。所以在很多情况下是使用混合方法的。",{"type":18,"tag":26,"props":259,"children":260},{},[261],{"type":24,"value":262},"在进入条件随机场之前，我们首先要了解下HMM。这里面有两个非常关键的假设：一是输出观察值之间相互独立，二是状态的转移过程中当前状态只与前一状态有关。因为这两个假设的成立，使得HMM便于计算。但是在多数的场景下，尤其是在大量真实语料中，观察序列更多是以一种多重的交互特征形式表现出来的，观察到元素之间广泛存在着长程相关性。此时的HMM就受到很大的限制。",{"type":18,"tag":26,"props":264,"children":265},{},[266],{"type":24,"value":267},"由于上述原因，条件随机场被开创出来，主要的思想是源于HMM的，也是一种用来标记和切分序列化数据的统计模型。不同的是，条件随机场是在给定的标记序列下，计算整个标记序列的联合概率，而HMM则是在给定当前状态下，去定义下一个状态的分布。",{"type":18,"tag":26,"props":269,"children":270},{},[271],{"type":24,"value":272},"条件随机场的定义：",{"type":18,"tag":26,"props":274,"children":275},{},[276],{"type":24,"value":277},"假设X=(X1,X2,X3,…,Xn)和Y=(Y1,Y2,Y3,…,Ym)是联合随机变量，若随机变量Y构成一个无向图G=(V,E)表示的马尔可夫模型，则其条件概率分布P(Y|X)就称为条件随机场(Conditional Random Field，CRF)，公式表示为：",{"type":18,"tag":26,"props":279,"children":280},{},[281],{"type":18,"tag":159,"props":282,"children":284},{"alt":7,"src":283},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/01/11/97aeaccf8d3f48e5a4baf97d6525e114.png",[],{"type":18,"tag":26,"props":286,"children":287},{},[288],{"type":24,"value":289},"其中w-v表示图G=(V,E)中与节点v有边连接的所有结点，w!=v表示节点v以外的所有结点。",{"type":18,"tag":26,"props":291,"children":292},{},[293],{"type":24,"value":294},"在这里简单的说明一下随机场的概念：现有若干个位置组成的整体，当给某一个位置按照某种分布随机的赋予一个值后，则该整体被称为随机场。如果以机构地名为例子，并假定如下规则。",{"type":18,"tag":26,"props":296,"children":297},{},[298],{"type":18,"tag":159,"props":299,"children":301},{"alt":7,"src":300},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/01/11/963471f4733d4239acfffff79f5088e0.png",[],{"type":18,"tag":26,"props":303,"children":304},{},[305],{"type":24,"value":306},"图1：标注表",{"type":18,"tag":26,"props":308,"children":309},{},[310],{"type":24,"value":311},"现有n个字符构成的NER的句子，每个字符的标签都在我们已知的标签集合中选择好，当我们为每个字符选定标签后，就形成一个随机场。若在其中加入一些约束，比如所有的字符的标签只与相邻的字符的标签相关，那么此时就是马尔可夫随机场问题。马尔可夫随机场中有X和Y两种变量，X一般是给定的，Y是在给定X条件下的输出。那么在这里，X是字符，Y是标签，P(X|Y)就是条件随机场。",{"type":18,"tag":26,"props":313,"children":314},{},[315],{"type":24,"value":316},"在条件随机场的定义中，我们并未规定变量X与Y具有相同的结构，实际在自然语言处理中，很多情况下假设其结构是相似的，表示为",{"type":18,"tag":26,"props":318,"children":319},{},[320],{"type":24,"value":321},"X=(X1,X2,X3,…,Xn)，Y=(Y1,Y2,Y3,…,Ym)",{"type":18,"tag":26,"props":323,"children":324},{},[325],{"type":18,"tag":159,"props":326,"children":328},{"alt":7,"src":327},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/01/11/a3fab627a224479b8af432490ce767d5.png",[],{"type":18,"tag":26,"props":330,"children":331},{},[332],{"type":24,"value":333},"图2：线性条件随机场",{"type":18,"tag":26,"props":335,"children":336},{},[337],{"type":24,"value":338},"一般称这种结构为线性链条件随机场，可以定义为：假设X=(X1,X2,X3,…,Xn和Y=(Y1,Y2,Y3,…,Ym)均为线性链表示的随机变量序列，若在给定的随机变量序列X的条件下，随机变量序列Y的条件概率分布P(Y|X)构成条件随机场，并且满足马尔可夫性质：",{"type":18,"tag":26,"props":340,"children":341},{},[342],{"type":24,"value":343},"P(Yi|X,Y1,Y2,…,Ym)=P(Yi|X,Yi-1，Yi+1)",{"type":18,"tag":26,"props":345,"children":346},{},[347],{"type":24,"value":348},"那么，可以称P(Y|X)为线性链的条件随机场。",{"type":18,"tag":26,"props":350,"children":351},{},[352],{"type":24,"value":353},"对比于HMM，这里的线性链不仅考虑了上一个状态Yi-1，还考虑了后面一个状态Yi+1。可以通过下图直观表示。",{"type":18,"tag":26,"props":355,"children":356},{},[357],{"type":18,"tag":159,"props":358,"children":360},{"alt":7,"src":359},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/01/11/d419a273bf4a499a97d030b38a8363bb.png",[],{"type":18,"tag":26,"props":362,"children":363},{},[364],{"type":24,"value":365},"图3：HMM与线性链",{"type":18,"tag":26,"props":367,"children":368},{},[369],{"type":24,"value":370},"在该图中可以看到HMM属于一个有向图，而本次重点的线性链是一个无向图，也因此，HMM处理时，本次状态依赖于上一个状态，而线性链则是依赖于当前状态的周围节点的状态。",{"type":18,"tag":26,"props":372,"children":373},{},[374,376],{"type":24,"value":375},"3. ",{"type":18,"tag":30,"props":377,"children":378},{},[379],{"type":24,"value":380},"关键词提取",{"type":18,"tag":26,"props":382,"children":383},{},[384],{"type":24,"value":385},"关键词提取算法一般可以分为有监督和无监督两类。有监督的关键词提取方法主要是通过分类的方式进行，首先通过创建一个比较丰富完善的词表，然后通过计算相似度判断每个文档与词表中每个词的匹配程度，类似打标签的方式，以此达到关键词提取的效果。有监督的方法虽然可以获取到较高的提取精度，但是需要大批量的标注数据，人工成本非常高。",{"type":18,"tag":26,"props":387,"children":388},{},[389,391],{"type":24,"value":390},"3.1 ",{"type":18,"tag":30,"props":392,"children":393},{},[394],{"type":24,"value":395},"TF-IDF",{"type":18,"tag":26,"props":397,"children":398},{},[399],{"type":24,"value":400},"TF-IDF算法（Term Frequency-Inverse Document Frequency，词频-逆文档频次算法）是一种基于统计的计算方法，常用于评估在一个文档集中一个词对某份文档的重要程度。这种思想是符合关键词抽取的需求，一个词语对文档越重要，那么是关键词的概率就越大，所以通常将TF-IDF算法应用在关键词提取中。",{"type":18,"tag":26,"props":402,"children":403},{},[404],{"type":24,"value":405},"首先从算法的名称分析，TF-IDF算法是由两部分组成：TF算法和IDF算法。TF算法是统计一个词在一篇文档中出现的频次，基本思想理解为：一个词在一篇文档中出现的次数越多，那么这个词对文档的表达能力就越强。而IDF算法是统计一个词在文档集中的多少个文档中出现，基本思想理解为：如果一个词在越少数的文档中出现，则对文档的区分能力就越强。",{"type":18,"tag":26,"props":407,"children":408},{},[409],{"type":18,"tag":159,"props":410,"children":412},{"alt":7,"src":411},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/01/11/c4f9c565be1b4456b250d8b5fcb33a43.png",[],{"type":18,"tag":26,"props":414,"children":415},{},[416],{"type":24,"value":417},"TF-IDF算法如上图中所示，TF-IDF算法就是TF算法与IDF算法的综合使用，对于这两种算法的组合，以取IDF算法值的对数，相乘是较为有效的计算方式。",{"type":18,"tag":26,"props":419,"children":420},{},[421],{"type":24,"value":422},"除了上述提到的传统TF-IDF算法之外，TF-IDF算法还有很多变种的加权方法。传统的TF-IDF算法中，仅仅考虑到了词的两个统计信息。因此对算法进行合理的改造和补充，这样可以更好的得到想要的结果。",{"type":18,"tag":26,"props":424,"children":425},{},[426,428],{"type":24,"value":427},"3.2 ",{"type":18,"tag":30,"props":429,"children":430},{},[431],{"type":24,"value":432},"TextRank算法",{"type":18,"tag":26,"props":434,"children":435},{},[436],{"type":24,"value":437},"在上述的TF-IDF算法中，都需要基于一个现成的语料库，主题模型的关键词提取算法则是需要通过对大规模文档学习，发现文档的隐含主题。而TextRank算法则是可以脱离语料库的基础，仅对单篇文档进行分析就可以提取该文档的关键词。这也是TextRank算法的重要特点。TextRank算法的基本思想源于Google的PageRank算法。因此这里需要先了解下PageRank算法。",{"type":18,"tag":26,"props":439,"children":440},{},[441],{"type":18,"tag":159,"props":442,"children":444},{"alt":7,"src":443},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/01/11/d815fcf4b0aa4c3a9859b6878a63a38d.png",[],{"type":18,"tag":26,"props":446,"children":447},{},[448],{"type":24,"value":449},"图4：PageRank算法示意图",{"type":18,"tag":26,"props":451,"children":452},{},[453],{"type":24,"value":454},"PageRank算法是一种网页排名算法，其基本思想有两个：（1）链接数量。一个网页被越多的其他网页链接，表示这个网页越重要；（2）链接质量。一个网页被一个越高权值的网页链接，也表示这个网页越重要。",{"type":18,"tag":26,"props":456,"children":457},{},[458],{"type":18,"tag":159,"props":459,"children":461},{"alt":7,"src":460},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/01/11/a71847f301044062b88fab88f53ffa02.png",[],{"type":18,"tag":26,"props":463,"children":464},{},[465],{"type":24,"value":466},"上述便是PageRank算法的理论，也是TextRank算法的理论基础。不同的是PageRank是又向无权图，而TextRank进行自动摘要则属于有权图，因为在计分时除了考虑链接句子的重要性外，还要考虑两个句子的相似性。因此TextRank的完整表达式为",{"type":18,"tag":26,"props":468,"children":469},{},[470],{"type":18,"tag":159,"props":471,"children":473},{"alt":7,"src":472},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/01/11/d1739a4f01a94ad8903fd9b364ced3cb.png",[],{"type":18,"tag":26,"props":475,"children":476},{},[477],{"type":24,"value":478},"在计算每个句子给他链接句的贡献时，就不采用平均分配的方式，而是通过计算权重占总权重的比例进行分配，这里的权重就是两个句子的相似度值。相似度计算的方法可以采用距离相似度、余弦相似度等。在对一篇文档进行自动摘要的时候，默认每个语句和其他语句都有链接关系，也就是又向完全图了。",{"type":18,"tag":26,"props":480,"children":481},{},[482],{"type":24,"value":483},"当TextRank应用到关键字抽取的时候，与应用在自动摘要中有两个不同的地方：（1）词与词之间的关联没有权重；（2）每个词不是与其余所以词都有链接。",{"type":18,"tag":26,"props":485,"children":486},{},[487],{"type":24,"value":488},"由于第一点的不同，那么TextRank重点分数计算将会退化，将得分平均贡献给每个链接的词。",{"type":18,"tag":26,"props":490,"children":491},{},[492],{"type":18,"tag":159,"props":493,"children":495},{"alt":7,"src":494},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2021/01/11/ce5e4579f40b464c801d1dbad11b824f.png",[],{"type":18,"tag":26,"props":497,"children":498},{},[499],{"type":24,"value":500},"对于第二点的不同，既然每个词与其余所有词并不是都相连，那么他们中间的链接关系该如何设定呢。这里的TextRank应用在关键字提取中时，加入了一个窗口的概念，在窗口中的词都是互相链接的。",{"type":18,"tag":26,"props":502,"children":503},{},[504],{"type":18,"tag":30,"props":505,"children":506},{},[507],{"type":24,"value":508},"总结：",{"type":18,"tag":26,"props":510,"children":511},{},[512],{"type":24,"value":513},"本次的内容主要是自然语言处理技术的分享，基本标准是在将中文语言处理好后，才能够让网络模型更好的接入使用，实现比较高级的智能语言应用。",{"type":18,"tag":26,"props":515,"children":516},{},[517],{"type":24,"value":518},"详细内容分享请移步到MindSpore论坛中查看：",{"type":18,"tag":26,"props":520,"children":521},{},[522],{"type":18,"tag":523,"props":524,"children":528},"a",{"href":525,"rel":526},"https://bbs.huaweicloud.com/forum/thread-78366-1-1.html",[527],"nofollow",[529],{"type":24,"value":530},"自然语言处理—分词技术",{"type":18,"tag":26,"props":532,"children":533},{},[534],{"type":18,"tag":523,"props":535,"children":538},{"href":536,"rel":537},"https://bbs.huaweicloud.com/forum/forum.php?mod=viewthread&tid=79618",[527],[539],{"type":24,"value":540},"自然语言处理——词性标注和命名实体识别",{"type":18,"tag":26,"props":542,"children":543},{},[544],{"type":18,"tag":523,"props":545,"children":548},{"href":546,"rel":547},"https://bbs.huaweicloud.com/forum/forum.php?mod=viewthread&tid=82044",[527],[549],{"type":24,"value":550},"自然语言处理之——关键词提取",{"type":18,"tag":26,"props":552,"children":553},{},[554],{"type":24,"value":555},"以上是个人的一些见解，理解有限，欢迎大家去论坛相关帖下指正讨论！",{"title":7,"searchDepth":557,"depth":557,"links":558},4,[],"markdown","content:technology-blogs:zh:372.md","content","technology-blogs/zh/372.md","technology-blogs/zh/372","md",1776506134041]