@@ -6,20 +6,21 @@ import {split as splitSentences, Syntax as SentenceSyntax} from "sentence-splitt
6
6
import StringSource from "textlint-util-to-string" ;
7
7
import {
8
8
is助詞Token , is読点Token ,
9
- createKeyFromKey , restoreToSurfaceFromKey
9
+ concatJoishiTokens ,
10
+ createKeyFromKey ,
11
+ restoreToSurfaceFromKey
10
12
} from "./token-utils" ;
11
13
/**
12
14
* Create token map object
13
15
* {
14
- * "で": [token, token],
15
- * "の": [token, token]
16
+ * "は:助詞.係助詞": [token, token]
16
17
* }
17
18
* @param tokens
18
19
* @returns {* }
19
20
*/
20
21
function createSurfaceKeyMap ( tokens ) {
21
22
// 助詞のみを対象とする
22
- return tokens . filter ( is助詞Token ) . reduce ( ( keyMap , token ) => {
23
+ return tokens . reduce ( ( keyMap , token ) => {
23
24
// "は:助詞.係助詞" : [token]
24
25
const tokenKey = createKeyFromKey ( token ) ;
25
26
if ( ! keyMap [ tokenKey ] ) {
@@ -70,7 +71,7 @@ export default function(context, options = {}) {
70
71
const isStrict = options . strict || defaultOptions . strict ;
71
72
const allow = options . allow || defaultOptions . allow ;
72
73
const separatorChars = options . separatorChars || defaultOptions . separatorChars ;
73
- const { Syntax, report, getSource , RuleError} = context ;
74
+ const { Syntax, report, RuleError} = context ;
74
75
return {
75
76
[ Syntax . Paragraph ] ( node ) {
76
77
if ( helper . isChildNode ( node , [ Syntax . Link , Syntax . Image , Syntax . BlockQuote , Syntax . Emphasis ] ) ) {
@@ -81,13 +82,16 @@ export default function(context, options = {}) {
81
82
const isSentenceNode = node => {
82
83
return node . type === SentenceSyntax . Sentence ;
83
84
} ;
84
- let sentences = splitSentences ( text , {
85
+ const sentences = splitSentences ( text , {
85
86
separatorChars : separatorChars
86
87
} ) . filter ( isSentenceNode ) ;
87
88
return getTokenizer ( ) . then ( tokenizer => {
88
89
const checkSentence = ( sentence ) => {
89
- let tokens = tokenizer . tokenizeForSentence ( sentence . raw ) ;
90
- let countableTokens = tokens . filter ( token => {
90
+ const tokens = tokenizer . tokenizeForSentence ( sentence . raw ) ;
91
+ // 助詞 + 助詞は 一つの助詞として扱う
92
+ // https://github.com/textlint-ja/textlint-rule-no-doubled-joshi/issues/15
93
+ const concatTokens = concatJoishiTokens ( tokens ) ;
94
+ const countableTokens = concatTokens . filter ( token => {
91
95
if ( isStrict ) {
92
96
return is助詞Token ( token ) ;
93
97
}
@@ -96,14 +100,14 @@ export default function(context, options = {}) {
96
100
// https://github.com/azu/textlint-rule-no-doubled-joshi/issues/2
97
101
return is助詞Token ( token ) || is読点Token ( token ) ;
98
102
} ) ;
99
- let joshiTokenSurfaceKeyMap = createSurfaceKeyMap ( countableTokens ) ;
103
+ const joshiTokenSurfaceKeyMap = createSurfaceKeyMap ( countableTokens ) ;
100
104
/*
101
105
# Data Structure
102
106
103
107
joshiTokens = [tokenA, tokenB, tokenC, tokenD, tokenE, tokenF]
104
108
joshiTokenSurfaceKeyMap = {
105
- "は:助詞.係助詞": [tokenA, tokenC, tokenE],
106
- "で:助詞.係助詞": [tokenB, tokenD, tokenF]
109
+ "は:助詞.係助詞": [tokenA, tokenC, tokenE],
110
+ "で:助詞.係助詞": [tokenB, tokenD, tokenF]
107
111
}
108
112
*/
109
113
Object . keys ( joshiTokenSurfaceKeyMap ) . forEach ( key => {
0 commit comments