python常用命令集合

一、pandas常用命令

1、重置index
location_data = location_data.reset_index(drop=True)
2、根据指定列删除重复值
data = result_data.drop_duplicates(subset = ['集团客户ID', '集团客户名称', '证件地址'], keep = 'first')
3、根据多列进行merge
merge_data = pd.merge(boss_data, data[['ID', '名称', '地址', '省名称',
       'output',]], how = 'left', left_on = [ 'ID', '名称', '地址', '省名称'], right_on = ['ID', '名称', '地址', '省名称'])
4、解析pandas中的字典,返回多列
data['enterprise_information'] = {"企业名称":1, "企业地址":2,"经度":3,"纬度":4}

def formatrow(row):
    if row['enterprise_information']:
        res_row=row['enterprise_information']
        return res_row["企业名称"], res_row["企业地址"],res_row["经度"],res_row["纬度"]
    else:
        return [],[],[],[]
data[["企业名称", "企业地址","经度","纬度"]] = data.apply(formatrow, axis=1, result_type="expand")

二、正则命令

1、可以返回所有满足条件的文本和对应索引—re.finditer
import re
pattern = re.compile('(\d*).(椅子|桌子)')
s='这里有3个椅子和10张桌子'
f=re.finditer(pattern,s)
print(f)
for i in f: 
    print(i)
    print(i.group())
    print(i.group(1))
    print(i.group(2))
    print(i.span())

输出:

<callable_iterator object at 0x7f73c5018710>
<re.Match object; span=(3, 7), match='3个椅子'>
3个椅子
3
椅子
(3, 7)
<re.Match object; span=(8, 13), match='10张桌子'>
10张桌子
10
桌子
(8, 13)
2、对满足情况的组合进行遍历

添加通配符?

import re
meanwhile ='账户(.*?)不存在'

pattern = re.compile(meanwhile)
s='账户不存在,显示银行账户已经不存在或者已销户,但客户表示未做更改。 '
f=re.finditer(pattern,s)
print(f)
for i in f:
    print(i)
    print(i.group())
    print(i.group(1))
    print(i.span())

输出:

<callable_iterator object at 0x7f73c5022e90>
<re.Match object; span=(0, 5), match='账户不存在'>
账户不存在
(0, 5)
<re.Match object; span=(10, 17), match='账户已经不存在'>
账户已经不存在
已经
(10, 17)

三、numpy命令

1、返回最小值的索引
np.argmin((value['distance']))
2、循环时返回索引
list_data = [1,2,3]
for index, index_data in enumerate(list_data):
    print(index)
3、对列表中任意元素去重
def list_unqiue(word_list):
    """
    字典列表去重
    """
    temp_old = []
    iter_list = [temp_old.append(i) for i in word_list if not i in temp_old]
    return temp_old
4、选取字典中最大值对应的键
score_dict = {'a':1,'b':2,'c':3}
top_rule = max(score_dict, key=score_dict.get)  # 获取优先级最高的规则

输出:

'c'
5、根据字典的值大小进行过滤
score_dict = {'a':1,'b':2,'c':3}
dict(filter(lambda x: x[1] >= 2,score_dict.items()))

输出:

{'b': 2, 'c': 3}
6、对列表进行计数统计
from collections import Counter
total_list = [1,2,3,3]
topn_word = 2
counter = Counter(total_list)  # 对整体工单频次条统计
counter.most_common(topn_word)

输出:

counter
Counter({3: 2, 1: 1, 2: 1})
counter.most_common(topn_word)
[(3, 2), (1, 1)]

四、机器学习保存模型

pickle形式
1、保存为pickle
import pickle
# 保存Model(注:save文件夹要预先建立,否则会报错)
with open('/order_analysis/model/svc.pickle', 'wb') as f:
pickle.dump(model, f)
2、读取pickle
# 读取Model
with open('/order_analysis/model/svc.pickle', 'rb') as f:
    model_load = pickle.load(f)
# 测试读取后的Model
print([id_to_cat[i] for i in model_load.predict(X_test[2:7])])
joblib形式
1、保存pkl
from sklearn.externals import joblib  # jbolib模块
joblib.dump(model_load, '/order_analysis/model/svc.pkl')
2、读取pkl
# 读取Model
model_load = joblib.load('/order_analysis/model/svc.pkl')
# 测试读取后的Model
# X_test 需要以训练集同样的方式进行处理
print([id_to_cat[i] for i in model_load.predict(X_test[2:7])])

五、json保存与读取

json包形式
1、json保存
import json
sentence_dict_path = 'keyword.json'
with open(sentence_dict_path, "w") as f:
    f.write(json.dumps(self.sentence_keyword_dict, ensure_ascii=False, indent=4, separators=(',', ':')))
2、json读取
import json
# 结合人工整合关键词
sentence_dict_path = 'keyword.json'
# 加载词语字典
with open(sentence_dict_path,'r') as fp:
    sentence_dict = json.load(fp)
    print(sentence_dict)
pickle形式
1、pickle保存
import pickle
keyword_path = 'keyword.json'
with open(keyword_path, 'wb') as f_json:
    pickle.dump(keyword_dict, f_json, pickle.HIGHEST_PROTOCOL)
2、pickle读取
import pickle
with open(keyword_path, 'rb') as f:
    keyword_dict = pickle.load(f)
Logo

为开发者提供学习成长、分享交流、生态实践、资源工具等服务,帮助开发者快速成长。

更多推荐