接下來,我們特別花一個小節介紹如何在R語言中使用由中研院開發的一系列斷詞工具「CKIP」。根據中研院介紹,這個工具可以斷詞(Word Segmentation)、辨識詞性(POS Tagging)與實體(NER)。
你可以參考中研院GitHub上的中文介紹與英文介紹。不過,因為R語言並沒有打包成套件,若是要使用CKIP,我們得要在R語言中先呼叫Python,而這要透過套件reticulate
才能夠達成。
所以我們再來看一下reticulate
的官方介紹,它的用途就是通往Python的介面(R Interface to Python)。我們首先要安裝這個套件,接著安裝Python,再接著安裝Python中你想用的套件,例如常見的Nympy
、Pandas
等,最後則有兩種方法使用,一種是把Python物件當成R物件使用,另一種則是直接在R語言當中撰寫Python程式碼。
即使你本來就已經Python,還是建議你重新安裝Python,因為不熟悉安裝環境與路徑的使用者,實在太容易遇到Error了。我記得初次用這個套件的時候,安裝想用的套件,滿心期待要開始在R語言當中自由使用Python,沒想到先是遇到我安裝的Python套件其實沒有安裝在調用Python版本的路徑之下,接著則是遇到套件版本錯誤的問題,一重又一重的挑戰襲來,讓人非常痛苦。
此外,不只是Python本身,還有環境的問題,例如你會發現有許多人建議要用conda
,但不熟悉anaconda的朋友也會一直出錯,你可能用了好幾次py_discover_config()
、use_python()
,接著用py_avalible()
發現是TRUE
,以為大功告成,沒想到再度中計。
以下就是我走完一遍上述流程,你可以嘗試看看。本段落原先是參考2019年在R語言社團中的一篇文章,但有略作更動,因為原始遇到錯誤,所以我還是重新安裝了一次python、建立不依賴conda的環境,最後將套件安裝在這個環境之中。
# 若尚未安裝記得先安裝
library(reticulate)
# 我想安裝 3.9 版本的 python
version <- "3.9.12"
# 因為我第一次執行時安裝了所以註解掉
# install_python(version)
# 創建用這個版本 python 的新環境
virtualenv_create("my-environment", version = version)
## virtualenv: my-environment
use_virtualenv("my-environment")
# 你要找設置才會用到的函數可以嚕略
# py_discover_config()
# 確認有沒有 python
os = import("os")
os$listdir(".")
py_available() # TRUE
## [1] TRUE
# 安裝套件,因為我安裝了所以跳過
# py_install(packages = 'tensorflow', pip = T)
# py_install(packages = 'ckiptagger', pip = T)
# 匯入套件
ckip <- import("ckiptagger")
# 如果你還有下載模型檔案記得先去下載
# model:
# https://drive.google.com/drive/folders/105IKCb88evUyLKlLondvDBoh7Dy_I1tm
# 接著讀取模型檔
ws = ckip$WS("/Users/macuser/Documents/GitHub/text-mining/ckip/data") # 斷詞
pos = ckip$POS("/Users/macuser/Documents/GitHub/text-mining/ckip/data") # 詞性
ner = ckip$NER("/Users/macuser/Documents/GitHub/text-mining/ckip/data") # 實體辨識
### ckip test
senten = list("傅達仁今將執行安樂死,卻突然爆出自己20年前遭緯來體育台封殺,他不懂自己哪裡得罪到電視台。",
"美國參議院針對今天總統布什所提名的勞工部長趙小蘭展開認可聽證會,預料她將會很順利通過參議院支持,成為該國有史以來第一位的華裔女性內閣成員。",
"土地公有政策??還是土地婆有政策。.",
"… 你確定嗎… 不要再騙了……",
"最多容納59,000個人,或5.9萬人,再多就不行了.這是環評的結論.",
"科長說:1,坪數對人數為1:3。2,可以再增加。")
# ws 斷詞
word_senten = ws(senten)
word_senten
## [[1]]
## [1] "傅達仁" "今" "將" "執行" "安樂死" "," "卻" "突然" "爆出" "自己" "20" "年" "前" "遭" "緯來" "體育台"
## [17] "封殺" "," "他" "不" "懂" "自己" "哪裡" "得罪到" "電視台" "。"
##
## [[2]]
## [1] "美國" "參議院" "針對" "今天" "總統" "布什" "所" "提名" "的" "勞工部長" "趙小蘭" "展開" "認可"
## [14] "聽證會" "," "預料" "她" "將" "會" "很" "順利" "通過" "參議院" "支持" "," "成為"
## [27] "該" "國" "有史以來" "第一" "位" "的" "華裔" "女性" "內閣" "成員" "。"
##
## [[3]]
## [1] "土地公" "有" "政策" "?" "?" "還是" "土地" "婆" "有" "政策" "。" "."
##
## [[4]]
## [1] "…" " " "你" "確定" "嗎" "…" " " "不要" "再" "騙" "了" "…" "…"
##
## [[5]]
## [1] "最多" "容納" "59,000" "個" "人" "," "或" "5.9萬" "人" "," "再" "多" "就" "不行" "了" "."
## [17] "這" "是" "環評" "的" "結論" "."
##
## [[6]]
## [1] "科長" "說" ":1," "坪數" "對" "人數" "為" "1:3" "。" "2" "," "可以" "再" "增加" "。"
# pos 詞性
pos_senten = pos(word_senten)
pos_senten
## [[1]]
## [1] "Nb" "Nd" "D" "VC" "Na" "COMMACATEGORY" "D" "D"
## [9] "VJ" "Nh" "Neu" "Nf" "Ng" "P" "Nb" "Na"
## [17] "VC" "COMMACATEGORY" "Nh" "D" "VK" "Nh" "Ncd" "VJ"
## [25] "Nc" "PERIODCATEGORY"
##
## [[2]]
## [1] "Nc" "Nc" "P" "Nd" "Na" "Nb" "D" "VC"
## [9] "DE" "Na" "Nb" "VC" "VC" "Na" "COMMACATEGORY" "VE"
## [17] "Nh" "D" "D" "Dfa" "VH" "VC" "Nc" "VC"
## [25] "COMMACATEGORY" "VG" "Nes" "Nc" "D" "Neu" "Nf" "DE"
## [33] "Na" "Na" "Na" "Na" "PERIODCATEGORY"
##
## [[3]]
## [1] "Nb" "V_2" "Na" "QUESTIONCATEGORY" "QUESTIONCATEGORY" "Caa" "Na"
## [8] "Na" "V_2" "Na" "PERIODCATEGORY" "PERIODCATEGORY"
##
## [[4]]
## [1] "ETCCATEGORY" "WHITESPACE" "Nh" "VK" "T" "ETCCATEGORY" "WHITESPACE" "D" "D" "VC"
## [11] "Di" "ETCCATEGORY" "ETCCATEGORY"
##
## [[5]]
## [1] "VH" "VJ" "Neu" "Nf" "Na" "COMMACATEGORY" "Caa" "Neu"
## [9] "Na" "COMMACATEGORY" "D" "D" "D" "VH" "T" "PERIODCATEGORY"
## [17] "Nep" "SHI" "Na" "DE" "Na" "PERIODCATEGORY"
##
## [[6]]
## [1] "Na" "VE" "Neu" "Na" "P" "Na" "VG" "Neu"
## [9] "PERIODCATEGORY" "Neu" "COMMACATEGORY" "D" "D" "VHC" "PERIODCATEGORY"
# ner 實體辨識
ner_senten = ner(word_senten, pos_senten)
ner_senten
## [[1]]
## {(0, 3, 'PERSON', '傅達仁'), (18, 22, 'DATE', '20年前'), (23, 28, 'ORG', '緯來體育台')}
##
## [[2]]
## {(17, 21, 'ORG', '勞工部長'), (2, 5, 'ORG', '參議院'), (56, 58, 'ORDINAL', '第一'), (7, 9, 'DATE', '今天'), (42, 45, 'ORG', '參議院'), (0, 2, 'GPE', '美國'), (60, 62, 'NORP', '華裔'), (21, 24, 'PERSON', '趙小蘭'), (11, 13, 'PERSON', '布什')}
##
## [[3]]
## {(0, 3, 'PERSON', '土地公')}
##
## [[4]]
## set()
##
## [[5]]
## {(4, 10, 'CARDINAL', '59,000'), (14, 18, 'CARDINAL', '5.9萬')}
##
## [[6]]
## {(4, 6, 'CARDINAL', '1,'), (12, 13, 'CARDINAL', '1'), (16, 17, 'CARDINAL', '2'), (14, 15, 'CARDINAL', '3')}
接下來則是在R語言裡面寫python script的版本。
# 匯入套件
from ckiptagger import data_utils, construct_dictionary, WS, POS, NER
ws = WS("/Users/macuser/Documents/GitHub/text-mining/ckip/data")
pos = POS("/Users/macuser/Documents/GitHub/text-mining/ckip/data")
ner = NER("/Users/macuser/Documents/GitHub/text-mining/ckip/data")
# 建立辭典
word_to_weight = {
"土地公": 1,
"土地婆": 1,
"公有": 2,
"": 1,
"來亂的": "啦",
"緯來體育台": 1,
}
dictionary = construct_dictionary(word_to_weight)
print(dictionary)
## [(2, {'公有': 2.0}), (3, {'土地公': 1.0, '土地婆': 1.0}), (5, {'緯來體育台': 1.0})]
# 執行ckip任務
sentence_list = [
"傅達仁今將執行安樂死,卻突然爆出自己20年前遭緯來體育台封殺,他不懂自己哪裡得罪到電視台。",
"美國參議院針對今天總統布什所提名的勞工部長趙小蘭展開認可聽證會,預料她將會很順利通過參議院支持,成為該國有史以來第一位的華裔女性內閣成員。",
"",
"土地公有政策??還是土地婆有政策。.",
"… 你確定嗎… 不要再騙了……",
"最多容納59,000個人,或5.9萬人,再多就不行了.這是環評的結論.",
"科長說:1,坪數對人數為1:3。2,可以再增加。",
]
word_sentence_list = ws(
sentence_list,
# sentence_segmentation = True, # To consider delimiters
# segment_delimiter_set = {",", "。", ":", "?", "!", ";"}), # This is the defualt set of delimiters
# recommend_dictionary = dictionary1, # words in this dictionary are encouraged
# coerce_dictionary = dictionary2, # words in this dictionary are forced
)
word_sentence_list
## [['傅達仁', '今', '將', '執行', '安樂死', ',', '卻', '突然', '爆出', '自己', '20', '年', '前', '遭', '緯來', '體育台', '封殺', ',', '他', '不', '懂', '自己', '哪裡', '得罪到', '電視台', '。'], ['美國', '參議院', '針對', '今天', '總統', '布什', '所', '提名', '的', '勞工部長', '趙小蘭', '展開', '認可', '聽證會', ',', '預料', '她', '將', '會', '很', '順利', '通過', '參議院', '支持', ',', '成為', '該', '國', '有史以來', '第一', '位', '的', '華裔', '女性', '內閣', '成員', '。'], [], ['土地公', '有', '政策', '?', '?', '還是', '土地', '婆', '有', '政策', '。', '.'], ['…', ' ', '你', '確定', '嗎', '…', ' ', '不要', '再', '騙', '了', '…', '…'], ['最多', '容納', '59,000', '個', '人', ',', '或', '5.9萬', '人', ',', '再', '多', '就', '不行', '了', '.', '這', '是', '環評', '的', '結論', '.'], ['科長', '說', ':1,', '坪數', '對', '人數', '為', '1:3', '。', '2', ',', '可以', '再', '增加', '。']]
pos_sentence_list = pos(word_sentence_list)
pos_sentence_list
## [['Nb', 'Nd', 'D', 'VC', 'Na', 'COMMACATEGORY', 'D', 'D', 'VJ', 'Nh', 'Neu', 'Nf', 'Ng', 'P', 'Nb', 'Na', 'VC', 'COMMACATEGORY', 'Nh', 'D', 'VK', 'Nh', 'Ncd', 'VJ', 'Nc', 'PERIODCATEGORY'], ['Nc', 'Nc', 'P', 'Nd', 'Na', 'Nb', 'D', 'VC', 'DE', 'Na', 'Nb', 'VC', 'VC', 'Na', 'COMMACATEGORY', 'VE', 'Nh', 'D', 'D', 'Dfa', 'VH', 'VC', 'Nc', 'VC', 'COMMACATEGORY', 'VG', 'Nes', 'Nc', 'D', 'Neu', 'Nf', 'DE', 'Na', 'Na', 'Na', 'Na', 'PERIODCATEGORY'], [], ['Nb', 'V_2', 'Na', 'QUESTIONCATEGORY', 'QUESTIONCATEGORY', 'Caa', 'Na', 'Na', 'V_2', 'Na', 'PERIODCATEGORY', 'PERIODCATEGORY'], ['ETCCATEGORY', 'WHITESPACE', 'Nh', 'VK', 'T', 'ETCCATEGORY', 'WHITESPACE', 'D', 'D', 'VC', 'Di', 'ETCCATEGORY', 'ETCCATEGORY'], ['VH', 'VJ', 'Neu', 'Nf', 'Na', 'COMMACATEGORY', 'Caa', 'Neu', 'Na', 'COMMACATEGORY', 'D', 'D', 'D', 'VH', 'T', 'PERIODCATEGORY', 'Nep', 'SHI', 'Na', 'DE', 'Na', 'PERIODCATEGORY'], ['Na', 'VE', 'Neu', 'Na', 'P', 'Na', 'VG', 'Neu', 'PERIODCATEGORY', 'Neu', 'COMMACATEGORY', 'D', 'D', 'VHC', 'PERIODCATEGORY']]
entity_sentence_list = ner(word_sentence_list, pos_sentence_list)
entity_sentence_list
## [{(0, 3, 'PERSON', '傅達仁'), (18, 22, 'DATE', '20年前'), (23, 28, 'ORG', '緯來體育台')}, {(17, 21, 'ORG', '勞工部長'), (2, 5, 'ORG', '參議院'), (56, 58, 'ORDINAL', '第一'), (7, 9, 'DATE', '今天'), (42, 45, 'ORG', '參議院'), (0, 2, 'GPE', '美國'), (60, 62, 'NORP', '華裔'), (21, 24, 'PERSON', '趙小蘭'), (11, 13, 'PERSON', '布什')}, set(), {(0, 3, 'PERSON', '土地公')}, set(), {(4, 10, 'CARDINAL', '59,000'), (14, 18, 'CARDINAL', '5.9萬')}, {(4, 6, 'CARDINAL', '1,'), (12, 13, 'CARDINAL', '1'), (16, 17, 'CARDINAL', '2'), (14, 15, 'CARDINAL', '3')}]
以上就是利用ckip斷詞。其實和前面三個套件相比,速度會慢上非常多,那為什麼還要使用它?因為它有幾大好處,第一就是專門針對繁體中文設計,訓練上利用台灣語料,所以會比沒有針對繁體中文的tidytext
、quanteda
、jiebaR
都還要好,第二是它的 POS 和 NER 功能很強,其他套件即使有 POS 或 NER 都比不上,像是 jiebaR
的 POS 就有點不怎麼樣,甚至連 NER 都沒有,第三是它和 jiebaR
一樣都提供自建詞典的功能,對上面提到要考慮語料特性的任務來說滿好的。