Skip to content

Commit 1e20c89

Browse files
committed
fix setup.py in python2.7
1 parent 5704e23 commit 1e20c89

File tree

3 files changed

+7
-4
lines changed

3 files changed

+7
-4
lines changed

Changelog

+3
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,6 @@
1+
2019-1-20: version 0.42.1
2+
1. 修复setup.py在python2.7版本无法工作的问题 (issue #809)
3+
14
2019-1-13: version 0.42
25
1. 修复paddle模式空字符串coredump问题 @JesseyXujin
36
2. 修复cut_all模式切分丢字问题 @fxsjy

jieba/__init__.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from __future__ import absolute_import, unicode_literals
22

3-
__version__ = '0.42'
3+
__version__ = '0.42.1'
44
__license__ = 'MIT'
55

66
import marshal
@@ -300,7 +300,7 @@ def cut(self, sentence, cut_all=False, HMM=True, use_paddle=False):
300300
sentence = strdecode(sentence)
301301
if use_paddle and is_paddle_installed:
302302
# if sentence is null, it will raise core exception in paddle.
303-
if sentence is None or sentence == "" or sentence == u"":
303+
if sentence is None or len(sentence) == 0:
304304
return
305305
import jieba.lac_small.predict as predict
306306
results = predict.get_sent(sentence)

setup.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@
4343
"""
4444

4545
setup(name='jieba',
46-
version='0.42',
46+
version='0.42.1',
4747
description='Chinese Words Segmentation Utilities',
4848
long_description=LONGDOC,
4949
author='Sun, Junyi',
@@ -71,5 +71,5 @@
7171
keywords='NLP,tokenizing,Chinese word segementation',
7272
packages=['jieba'],
7373
package_dir={'jieba':'jieba'},
74-
package_data={'jieba':['*.*','finalseg/*','analyse/*','posseg/*', 'lac_small/*','lac_small/model_baseline/*']}
74+
package_data={'jieba':['*.*','finalseg/*','analyse/*','posseg/*', 'lac_small/*.py','lac_small/*.dic', 'lac_small/model_baseline/*']}
7575
)

0 commit comments

Comments
 (0)