From 433b39d1cf284c5ce48491cae7815e7c3edc3f4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=80=C3=AE=C3=97=C3=9A=C3=95=C3=B1?= Date: Fri, 10 Jun 2022 17:41:03 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BA=8B=E4=BB=B6=E5=88=86=E6=9E=90=E6=A0=87?= =?UTF-8?q?=E7=AD=BE=E5=88=86=E7=BB=84=E9=A1=B91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- api/api_v1/endpoints/query.py | 570 ++++++++++++++++++---------------- 1 file changed, 294 insertions(+), 276 deletions(-) diff --git a/api/api_v1/endpoints/query.py b/api/api_v1/endpoints/query.py index 51f6e1b..e314ac0 100644 --- a/api/api_v1/endpoints/query.py +++ b/api/api_v1/endpoints/query.py @@ -6,11 +6,11 @@ import os from copy import deepcopy import pandas as pd import numpy as np -from fastapi import APIRouter, Depends, Request,File +from fastapi import APIRouter, Depends, Request, File from fastapi.encoders import jsonable_encoder from motor.motor_asyncio import AsyncIOMotorDatabase from fastapi.responses import StreamingResponse -#from datetime import datetime +# from datetime import datetime import crud, schemas from common import * @@ -22,7 +22,7 @@ from db.redisdb import get_redis_pool, RedisDrive from models.behavior_analysis import BehaviorAnalysis, CombinationEvent from models.user_analysis import UserAnalysis from models.x_analysis import XAnalysis -from utils import DfToStream, getEveryDay, Download_xlsx,jiange_insert,create_df,create_neidf +from utils import DfToStream, getEveryDay, Download_xlsx, jiange_insert, create_df, create_neidf router = APIRouter() @@ -37,7 +37,7 @@ async def query_sql( ) -> schemas.Msg: """原 sql 查询 """ sql = data_in.sql - sql1=sql.lower() + sql1 = sql.lower() if 'insert' not in sql1 and 'update' not in sql1 and 'delete' not in sql1 and 'select' in sql1: sql = sql.replace('$game', game) data = await ckdb.execute(sql) @@ -116,6 +116,7 @@ async def event_model_export(request: Request, export = d.to_stream() return StreamingResponse(export, media_type=mime, headers={'Content-Disposition': f'filename="{file_name}"'}) + @router.post("/event_model_pay") async def event_model_export(request: Request, game: str, @@ -123,29 +124,30 @@ async def event_model_export(request: Request, ckdb: CKDrive = Depends(get_ck_db) ): """下载日充总额详细订单数据""" - sql=f"""select * FROM {game}.event WHERE addHours(`#event_time`, 8) >= '{data_in.start_time}' and addHours(`#event_time`, 8) <= '{data_in.end_time}' and `#event_name` = 'pay' and + sql = f"""select * FROM {game}.event WHERE addHours(`#event_time`, 8) >= '{data_in.start_time}' and addHours(`#event_time`, 8) <= '{data_in.end_time}' and `#event_name` = 'pay' and orderid NOT LIKE '%GM%' order by `#event_time`""" df = await ckdb.query_dataframe(sql) - list_columns=list(df.columns.values) - drop_list=[] + list_columns = list(df.columns.values) + drop_list = [] for i in list_columns: - aa=type(df[i][0]) + aa = type(df[i][0]) if df[i][0] == None or df[i][0] == [] or df[i][0] == '': drop_list.append(i) else: - if 'time' in i : + if 'time' in i: df[i] = df[i].astype(str) for nu in range(len(df)): - df.replace(to_replace=df[i][nu],value=df[i][nu].split('+')[0],inplace=True) + df.replace(to_replace=df[i][nu], value=df[i][nu].split('+')[0], inplace=True) df.drop(drop_list, axis=1, inplace=True) - file_name=quote(f'订单详情.xlsx') + file_name = quote(f'订单详情.xlsx') mime = mimetypes.guess_type(file_name)[0] df_to_stream = DfToStream((df, '订单详情')) with df_to_stream as d: export = d.to_stream() return StreamingResponse(export, media_type=mime, headers={'Content-Disposition': f'filename="{file_name}"'}) + # @router.get("/event_model_export") # async def event_model_export(request: Request, # game: str, @@ -189,11 +191,11 @@ async def event_model( res = [] is_hide = [] group_label = {} - for idx, item in enumerate(sqls): #列出索引下标 + for idx, item in enumerate(sqls): # 列出索引下标 if item.get('is_show') == False: is_hide.append(idx) - #event_name:事件名,日充总额 - #format:float浮点型 + # event_name:事件名,日充总额 + # format:float浮点型 q = { 'groups': [], 'values': [], @@ -212,7 +214,7 @@ async def event_model( values, sum_, avg = combination_event.parse() # q['values'].append(values) - #q['sum'].append(sum_) + # q['sum'].append(sum_) q['avg'].append(avg) q['date_range'] = item['date_range'] for last_value in values[::-1]: @@ -221,40 +223,40 @@ async def event_model( break if list(item.get('event_name'))[-1] == '率': for i in range(len(values)): - values[i]=str((values[i]))+'%' + values[i] = str((values[i])) + '%' q['values'].append(values) - q['sum'].append(str(sum_)+'%') + q['sum'].append(str(sum_) + '%') elif '比' in item['event_name']: for i in range(len(values)): - values[i]=str(int(float(values[i])*100))+'%' + values[i] = str(int(float(values[i]) * 100)) + '%' q['values'].append(values) - q['sum'].append(str(int(float(sum_)*100))+'%') + q['sum'].append(str(int(float(sum_) * 100)) + '%') else: q['values'].append(values) q['sum'].append(sum_) res.append(q) continue - #sql语句 + # sql语句 sql = item['sql'] groupby = item['groupby'] - date_range = item['date_range'] #获取的要查询的每一天的时间 - q['date_range'] = date_range #把要查询的时间加入q字典中 - df = await ckdb.query_dataframe(sql) #以sql语句查出数据,df是二维列表 - df.fillna(0, inplace=True)#以0填补空数据 - #映射对应中文返回给前端展示 + date_range = item['date_range'] # 获取的要查询的每一天的时间 + q['date_range'] = date_range # 把要查询的时间加入q字典中 + df = await ckdb.query_dataframe(sql) # 以sql语句查出数据,df是二维列表 + df.fillna(0, inplace=True) # 以0填补空数据 + # 映射对应中文返回给前端展示 for i in groupby: if i == 'svrindex': if game == 'mfmh5': game = 'mzmfmh5' - chinese={} - resp = await crud.select_map.get_one(db, game,i) + chinese = {} + resp = await crud.select_map.get_one(db, game, i) for ii in resp: - chinese[ii['id']]=ii['title'] - for k,v in chinese.items(): - #开始映射 + chinese[ii['id']] = ii['title'] + for k, v in chinese.items(): + # 开始映射 df.loc[df[i] == k, i] = v - #获取第一矩阵的长度 + # 获取第一矩阵的长度 if df.shape[0] == 0: df = pd.DataFrame({'date': date_range, 'values': 0 * len(date_range)}) # continue @@ -322,10 +324,10 @@ async def event_model( df[columns] = df[columns].astype(str) # 有分组 for group, df_group in df.groupby(groupby): - #在原数据上将索引重新转换为列,新索引的列删除 + # 在原数据上将索引重新转换为列,新索引的列删除 df_group.reset_index(drop=True, inplace=True) - #判断为0的改成未知城市 - if str(group) == '0' and analysis.event_view['groupBy'][0]['columnDesc']== '城市': + # 判断为0的改成未知城市 + if str(group) == '0' and analysis.event_view['groupBy'][0]['columnDesc'] == '城市': q['groups'].append('未知城市') else: if 'str' in str(type(group)): @@ -353,9 +355,9 @@ async def event_model( concat_data = [] for i in set(date_range) - set(df['date']): concat_data.append((i, 0)) - #纵向拼接两个表 + # 纵向拼接两个表 df = pd.concat([df, pd.DataFrame(concat_data, columns=df.columns)]) - #在原数据上按data排序 + # 在原数据上按data排序 df.sort_values('date', inplace=True) if len(df) >= 2: q['chain_ratio'] = division((df.iloc[-1, 1] - df.iloc[-2, 1]) * 100, df.iloc[-2, 1], 2) @@ -366,9 +368,9 @@ async def event_model( if last_value > 0: q['last_value'] = float(last_value) break - #求所有值的和 + # 求所有值的和 q['sum'].append(round(abs(float(df['values'].sum())), 2)) - #求平均值 + # 求平均值 q['avg'].append(round(float(df['values'].mean()), 2)) # q['eventNameDisplay']=item['event_name_display'] @@ -377,7 +379,7 @@ async def event_model( # 按总和排序 for item in res: try: - if item['time_particle'] in ('P1D', 'P1W'): #按格式修改年月日 + if item['time_particle'] in ('P1D', 'P1W'): # 按格式修改年月日 item['date_range'] = [d.strftime('%Y-%m-%d') for d in item['date_range']] elif item['time_particle'] in ('P1M',): item['date_range'] = [d.strftime('%Y-%m') for d in item['date_range']] @@ -386,7 +388,7 @@ async def event_model( except: pass - sort_key = np.argsort(np.array(item['sum']))[::-1]#将sum中的元素从小到大排列后的结果,提取其对应的索引,然后倒着输出到变量之中 + sort_key = np.argsort(np.array(item['sum']))[::-1] # 将sum中的元素从小到大排列后的结果,提取其对应的索引,然后倒着输出到变量之中 if item.get('groups'): item['groups'] = np.array(item['groups'])[sort_key].tolist() groups = [] @@ -394,7 +396,7 @@ async def event_model( gb = [] if '(' in gitem: gitem = gitem.strip('(').strip(')').replace(' ', '').replace("'", '') - if isinstance(gitem,list): + if isinstance(gitem, list): true_list = gitem else: true_list = gitem.split(',') @@ -404,7 +406,10 @@ async def event_model( if group_label: for name, idx in group_label.items(): gb.insert(idx, name) - groups.append(str(gb)) + + # 去掉分组表现里面的'' + appgb = str(gb).replace("'", '') + groups.append(appgb) item['groups'] = groups item['values'] = np.array(item['values'])[sort_key].tolist() item['sum'] = np.array(item['sum'])[sort_key].tolist() @@ -437,19 +442,19 @@ async def retention_model(request: Request, ) -> schemas.Msg: await analysis.init(data_where=current_user.data_where) try: - res = await analysis.retention_model_sql2() #初始化开始时间结束时间,sql语句 字典 + res = await analysis.retention_model_sql2() # 初始化开始时间结束时间,sql语句 字典 except Exception as e: return schemas.Msg(code=-9, msg='报表配置参数异常') - sql = res['sql'] #获取到sql语句 + sql = res['sql'] # 获取到sql语句 df = await ckdb.query_dataframe(sql) if df.empty: return schemas.Msg(code=-9, msg='无数据', data=None) - date_range = res['date_range'] #时间 列表 - unit_num = res['unit_num'] #int - retention_n = res['retention_n'] #列表 int - filter_item_type = res['filter_item_type'] #all - filter_item = res['filter_item'] #列表 0,1,3,7,14,21,30 + date_range = res['date_range'] # 时间 列表 + unit_num = res['unit_num'] # int + retention_n = res['retention_n'] # 列表 int + filter_item_type = res['filter_item_type'] # all + filter_item = res['filter_item'] # 列表 0,1,3,7,14,21,30 df.set_index('reg_date', inplace=True) for d in set(res['date_range']) - set(df.index): df.loc[d] = 0 @@ -457,12 +462,12 @@ async def retention_model(request: Request, df.sort_index(inplace=True) summary_values = {'均值': {}} max_retention_n = 1 - #留存人数 + # 留存人数 avg = {} - #流失人数 + # 流失人数 avgo = {} for date, v in df.T.items(): - #字典中data存在时不替换,否则将data替换成空字典 + # 字典中data存在时不替换,否则将data替换成空字典 tmp = summary_values.setdefault(date, dict()) tmp['d0'] = int(v.cnt0) tmp['p'] = [] @@ -474,9 +479,9 @@ async def retention_model(request: Request, if i > n: continue # max_retention_n = i if i > max_retention_n else max_retention_n - #留存的人数 + # 留存的人数 avg[i] = avg.setdefault(i, 0) + v[f'cnt{i}'] - #流失的人数 + # 流失的人数 avgo[i] = avgo.setdefault(i, 0) + v[f'on{i}'] tmp['p'].append(v[f'p{i}']) tmp['n'].append(v[f'cnt{i}']) @@ -488,7 +493,7 @@ async def retention_model(request: Request, for rn in retention_n: for rt, rd in df.T.items(): if rt + datetime.timedelta(days=rn) <= pd.datetime.now().date(): - retention_avg_dict.setdefault(rn, {'cnt0': 0, 'cntn': 0,'o_cnt0':0,'o_cntn':0}) + retention_avg_dict.setdefault(rn, {'cnt0': 0, 'cntn': 0, 'o_cnt0': 0, 'o_cntn': 0}) retention_avg_dict[rn]['cnt0'] += rd['cnt0'] retention_avg_dict[rn]['cntn'] += rd[f'cnt{rn}'] retention_avg_dict[rn]['o_cnt0'] += rd['cnt0'] @@ -501,16 +506,16 @@ async def retention_model(request: Request, tmp['d0'] = 0 for rt, rd in retention_avg_dict.items(): tmp['d0'] = int(df['cnt0'].sum()) - n = round(rd['cntn'] * 100 / rd['cnt0'],2) + n = round(rd['cntn'] * 100 / rd['cnt0'], 2) n = 0 if np.isnan(n) else n tmp['p'].append(n) tmp['n'].append(rd['cntn']) - n = round(rd['o_cntn'] * 100 / rd['cnt0'],2) + n = round(rd['o_cntn'] * 100 / rd['cnt0'], 2) n = 0 if np.isnan(n) else n tmp['p_outflow'].append(n) tmp['n_outflow'].append(rd['o_cntn']) - #次留数 + # 次留数 title = ['日期', '用户数', '次留', *[f'{i + 1}留' for i in retention_n[1:]]] # 未到达的日期需要补齐- @@ -535,24 +540,24 @@ async def retention_model(request: Request, async def retention_model01(request: Request, - game: str, - ckdb: CKDrive = Depends(get_ck_db), - db: AsyncIOMotorDatabase = Depends(get_database), - analysis: BehaviorAnalysis = Depends(BehaviorAnalysis), - current_user: schemas.UserDB = Depends(deps.get_current_user) - ) -> schemas.Msg: + game: str, + ckdb: CKDrive = Depends(get_ck_db), + db: AsyncIOMotorDatabase = Depends(get_database), + analysis: BehaviorAnalysis = Depends(BehaviorAnalysis), + current_user: schemas.UserDB = Depends(deps.get_current_user) + ) -> schemas.Msg: await analysis.init(data_where=current_user.data_where) - res = await analysis.retention_model_sql2() #初始化开始时间结束时间,sql语句 字典 - sql = res['sql'] #获取到sql语句 + res = await analysis.retention_model_sql2() # 初始化开始时间结束时间,sql语句 字典 + sql = res['sql'] # 获取到sql语句 df = await ckdb.query_dataframe(sql) if df.empty: return schemas.Msg(code=-9, msg='无数据', data=None) - date_range = res['date_range'] #时间 列表 - unit_num = res['unit_num'] #int - retention_n = res['retention_n'] #列表 int - filter_item_type = res['filter_item_type'] #all - filter_item = res['filter_item'] #列表 0,1,3,7,14,21,30 + date_range = res['date_range'] # 时间 列表 + unit_num = res['unit_num'] # int + retention_n = res['retention_n'] # 列表 int + filter_item_type = res['filter_item_type'] # all + filter_item = res['filter_item'] # 列表 0,1,3,7,14,21,30 df.set_index('reg_date', inplace=True) for d in set(res['date_range']) - set(df.index): df.loc[d] = 0 @@ -563,7 +568,7 @@ async def retention_model01(request: Request, avg = {} avgo = {} for date, v in df.T.items(): - #字典中data存在时不替换,否则将data替换成空字典 + # 字典中data存在时不替换,否则将data替换成空字典 tmp = summary_values.setdefault(date, dict()) tmp['d0'] = int(v.cnt0) tmp['p'] = [] @@ -577,8 +582,8 @@ async def retention_model01(request: Request, # max_retention_n = i if i > max_retention_n else max_retention_n avg[i] = avg.setdefault(i, 0) + v[f'cnt{i}'] avgo[i] = avgo.setdefault(i, 0) + v[f'on{i}'] - tmp['p'].append(round(100-v[f'p{i}'],2)) - #tmp['p'].append(v[f'p{i}']) + tmp['p'].append(round(100 - v[f'p{i}'], 2)) + # tmp['p'].append(v[f'p{i}']) tmp['n'].append(v[f'cnt{i}']) tmp['p_outflow'].append(v[f'op{i}']) tmp['n_outflow'].append(v[f'on{i}']) @@ -588,7 +593,7 @@ async def retention_model01(request: Request, for rn in retention_n: for rt, rd in df.T.items(): if rt + datetime.timedelta(days=rn) <= pd.datetime.now().date(): - retention_avg_dict.setdefault(rn, {'cnt0': 0, 'cntn': 0,'o_cnt0':0,'o_cntn':0}) + retention_avg_dict.setdefault(rn, {'cnt0': 0, 'cntn': 0, 'o_cnt0': 0, 'o_cntn': 0}) retention_avg_dict[rn]['cnt0'] += rd['cnt0'] retention_avg_dict[rn]['cntn'] += rd[f'cnt{rn}'] @@ -602,17 +607,16 @@ async def retention_model01(request: Request, tmp['d0'] = 0 for rt, rd in retention_avg_dict.items(): tmp['d0'] = int(df['cnt0'].sum()) - n = round(100-(rd['cntn'] * 100 / rd['cnt0']), 2) - #n = round(rd['cntn'] * 100 / rd['cnt0'],2) + n = round(100 - (rd['cntn'] * 100 / rd['cnt0']), 2) + # n = round(rd['cntn'] * 100 / rd['cnt0'],2) n = 0 if np.isnan(n) else n tmp['p'].append(n) tmp['n'].append(rd['cntn']) - n = round(rd['o_cntn'] * 100 / rd['cnt0'],2) + n = round(rd['o_cntn'] * 100 / rd['cnt0'], 2) n = 0 if np.isnan(n) else n tmp['p_outflow'].append(n) tmp['n_outflow'].append(rd['o_cntn']) - title = ['日期', '用户数', '次流失', *[f'{i + 1}流失' for i in retention_n[1:]]] # 未到达的日期需要补齐- @@ -636,7 +640,6 @@ async def retention_model01(request: Request, return schemas.Msg(code=0, msg='ok', data=resp) - @router.post("/retention_model_export") async def retention_model_export(request: Request, game: str, @@ -763,7 +766,7 @@ async def funnel_model( await analysis.init(data_where=current_user.data_where) res = await analysis.funnel_model_sql() sql = res['sql'] - #查询的时间 + # 查询的时间 date_range = res['date_range'] cond_level = res['cond_level'] groupby = res['groupby'] @@ -778,7 +781,7 @@ async def funnel_model( for item in not_exists_level: key = key if isinstance(key, tuple) else (key,) concat_data.append((*key, item, 0)) - #合并数据 + # 合并数据 df = pd.concat([df, pd.DataFrame(concat_data, columns=df.columns)]) # df.set_index('date',inplace=True) @@ -788,9 +791,9 @@ async def funnel_model( return schemas.Msg(code=0, msg='ok', data={'list': data_list, 'level': cond_level}) tmp = {'title': '总体'} - #以level分组后的和 + # 以level分组后的和 tmp_df = df[['level', 'values']].groupby('level').sum() - #在原数据上对索引进行排序 + # 在原数据上对索引进行排序 tmp_df.sort_index(inplace=True) for i in tmp_df.index: tmp_df.loc[i, 'values'] = tmp_df.loc[i:]['values'].sum() @@ -1039,13 +1042,13 @@ async def scatter_model( res = await analysis.scatter_model_sql() except Exception as e: return schemas.Msg(code=-9, msg='报表配置参数异常') - end_date=analysis.end_date - start_date=analysis.start_date - where=analysis.events[-1]['quotaname'] + end_date = analysis.end_date + start_date = analysis.start_date + where = analysis.events[-1]['quotaname'] sql = res['sql'] - #columnName = analysis.events[-1]['label_id'] + # columnName = analysis.events[-1]['label_id'] - #查询买量渠道owner为kuaiyou3的日注册玩家等级分布 + # 查询买量渠道owner为kuaiyou3的日注册玩家等级分布 # sql_list=sql.split("GROUP BY") # sql01 = """and xiangsu.event.owner_name='kuaiyou3'GROUP BY""""" # new_sql=sql_list[0]+sql01+sql_list[1] @@ -1059,9 +1062,9 @@ async def scatter_model( # f=lambda x:x[0] # df['values']=df['values'].map(f) df = df.explode("values").reset_index(drop=True) - #df['values']=df['values'].astype(str) + # df['values']=df['values'].astype(str) df.fillna(0, inplace=True) - #转换数据类型为int + # 转换数据类型为int df['values'] = df['values'].astype(int) interval_type = res['interval_type'] analysi = res['analysis'] @@ -1082,7 +1085,7 @@ async def scatter_model( } if not quota_interval_arr: - resp['label'] = [f'[{i},{i + interval})' for i in range(min_v, max_v, interval)] + resp['label'] = [f'[{i},{i + interval})' for i in range(min_v, max_v, interval)] bins = [i for i in range(min_v, max_v + interval, interval)] else: quota_interval_arr = [-float('inf')] + quota_interval_arr + [float('inf')] @@ -1102,13 +1105,13 @@ async def scatter_model( resp['list']['合计'] = dict() p = list(round(bins_s * 100 / total, 2).to_list()) resp['list']['合计']['总体'] = {'n': bins_s.to_list(), 'total': total, - 'p': [str(i)+'%' for i in p], + 'p': [str(i) + '%' for i in p], 'title': '总体'} else: - p=list(round(bins_s * 100 / total, 2).to_list()) + p = list(round(bins_s * 100 / total, 2).to_list()) resp['list'][key.strftime('%Y-%m-%d')] = dict() resp['list'][key.strftime('%Y-%m-%d')]['总体'] = {'n': bins_s.to_list(), 'total': total, - 'p':[str(i)+'%' for i in p], + 'p': [str(i) + '%' for i in p], 'title': '总体'} # 分组的 # if groupby: @@ -1124,10 +1127,10 @@ async def scatter_model( # 2).to_list(), # 'title': title # } - download=analysis.event_view.get('download','') + download = analysis.event_view.get('download', '') if download == 1: creat_df = create_df(resp) - Download=Download_xlsx(creat_df, '分布分析') + Download = Download_xlsx(creat_df, '分布分析') return Download return schemas.Msg(code=0, msg='ok', data=resp) @@ -1153,48 +1156,48 @@ async def scatter_model( n = len(tmp_df2) labels_dict[label] = n if event_type == 'pay': - #如需要2之后所有之和,则执行下面代码,返回值为字典的labels_dict01 - labels_dict01={} - v=-1 - for i in labels: - v +=1 + # 如需要2之后所有之和,则执行下面代码,返回值为字典的labels_dict01 + labels_dict01 = {} + v = -1 + for i in labels: + v += 1 if int(i) == 1: - labels_dict01["1"]=labels_dict["1"] + labels_dict01["1"] = labels_dict["1"] else: # for number in labels_dict.keys(): # if number >=i: - values=list(labels_dict.values()) - n=sum(values[v:]) - labels_dict01[i]=n - #传入百分比数据 - list_p=[] + values = list(labels_dict.values()) + n = sum(values[v:]) + labels_dict01[i] = n + # 传入百分比数据 + list_p = [] for i in labels: - number_int=round(labels_dict01.get(i, 0) * 100 / total, 2) - number_str=str(number_int)+'%' + number_int = round(labels_dict01.get(i, 0) * 100 / total, 2) + number_str = str(number_int) + '%' list_p.append(number_str) resp['list'][dt] = {'总体': {'n': [labels_dict01.get(i, 0) for i in labels], 'total': total, 'p': list_p}} else: - list_p=[] + list_p = [] for i in labels: - number_int=round(labels_dict.get(i, 0) * 100 / total, 2) - number_str=str(number_int)+'%' + number_int = round(labels_dict.get(i, 0) * 100 / total, 2) + number_str = str(number_int) + '%' list_p.append(number_str) resp['list'][dt] = {'总体': {'n': [labels_dict.get(i, 0) for i in labels], 'total': total, 'p': list_p}} - #resp['list'][dt] = {'总体': {'n': [labels_dict.get(i, 0) for i in labels], 'total': total, + # resp['list'][dt] = {'总体': {'n': [labels_dict.get(i, 0) for i in labels], 'total': total, # 'p': [round(labels_dict.get(i, 0) * 100 / total, 2) for i in labels]}} - if where =="step_id" and event_type == "guide": - sql=f"""SELECT toDate(addHours({game}.event."#event_time", 8)) AS date, count(DISTINCT {game}.event."#account_id") AS values FROM {game}.event WHERE addHours({game}.event."#event_time", 8) >= '{start_date}' AND addHours({game}.event."#event_time", 8) <= '{end_date}' AND {game}.event."#event_name" = 'create_account' GROUP BY toDate(addHours({game}.event."#event_time", 8)) ORDER BY date""" + if where == "step_id" and event_type == "guide": + sql = f"""SELECT toDate(addHours({game}.event."#event_time", 8)) AS date, count(DISTINCT {game}.event."#account_id") AS values FROM {game}.event WHERE addHours({game}.event."#event_time", 8) >= '{start_date}' AND addHours({game}.event."#event_time", 8) <= '{end_date}' AND {game}.event."#event_name" = 'create_account' GROUP BY toDate(addHours({game}.event."#event_time", 8)) ORDER BY date""" df = await ckdb.query_dataframe(sql) for i in range(len(df)): - resp['list'][str(df['date'][i])]['总体']['total']=int(df['values'][i]) - #兼容下载功能 - download=analysis.event_view.get('download','') + resp['list'][str(df['date'][i])]['总体']['total'] = int(df['values'][i]) + # 兼容下载功能 + download = analysis.event_view.get('download', '') if download == 1: - creat_df=create_df(resp) - Download=Download_xlsx(creat_df,'分布分析') + creat_df = create_df(resp) + Download = Download_xlsx(creat_df, '分布分析') return Download return schemas.Msg(code=0, msg='ok', data=resp) @@ -1206,6 +1209,8 @@ async def scatter_model( # resp['list'][key.strftime('%Y-%m-%d')]['总体'] = {'n': bins_s.to_list(), 'total': total, # 'p': round(bins_s * 100 / total, 2).to_list(), # 'title': '总体'} + + @router.post("/scatter_model_details") async def scatter_model( request: Request, @@ -1222,7 +1227,7 @@ async def scatter_model( return schemas.Msg(code=-9, msg='报表配置参数异常') event_type = analysis.events[0]['eventName'] where = analysis.events[-1]['quotaname'] - sql=res['sql'] + sql = res['sql'] columnName = analysis.event_view['groupBy'][-1]['columnName'] if analysis.event_view['groupBy'] != []: @@ -1249,7 +1254,7 @@ async def scatter_model( df['date'] = '合计' if analysi != 'number_of_days' and interval_type != 'discrete': - #默认区间 + # 默认区间 max_v = int(df['values'].max()) min_v = int(df['values'].min()) interval = (max_v - min_v) // 10 or 1 @@ -1257,7 +1262,7 @@ async def scatter_model( 'start_date': res['start_date'], 'end_date': res['end_date'], 'time_particle': res['time_particle'], - 'biaotou':columnName + 'biaotou': columnName } if 'float' in str(df.dtypes['va']): df['va'] = df['va'].astype(int) @@ -1283,55 +1288,55 @@ async def scatter_model( for key, tmp_df in df.groupby('va'): bins_s = pd.cut(tmp_df['values'], bins=bins, - right=True,include_lowest=True).value_counts() + right=True, include_lowest=True).value_counts() bins_s.sort_index(inplace=True) total = int(bins_s.sum()) if res['time_particle'] == 'total': resp['list']['合计'] = dict() resp['list']['合计'] = {'n': bins_s.to_list(), 'total': total, - 'p': round(bins_s * 100 / total, 2).to_list(), - 'title': '总体'} + 'p': round(bins_s * 100 / total, 2).to_list(), + 'title': '总体'} else: - p=round(bins_s * 100 / total, 2).to_list() + p = round(bins_s * 100 / total, 2).to_list() for i in range(len(p)): if str(p[i]) == 'nan': p[i] = 0 - #映射对应的埋点数据 + # 映射对应的埋点数据 re = await crud.select_map.get_list(db, game) - re_list=[i['attr_name'] for i in re] + re_list = [i['attr_name'] for i in re] if columnName in re_list: for i in re: if columnName == i['attr_name']: for datas in i['map_']: if key == datas['id']: - key=datas['title'] + key = datas['title'] break break if 'time' not in columnName: resp['list'][key] = dict() resp['list'][key] = {'n': bins_s.to_list(), 'total': total, - 'p': [str(i)+'%' for i in p], - 'title': '总体'} + 'p': [str(i) + '%' for i in p], + 'title': '总体'} else: resp['list'][key.strftime('%Y-%m-%d %H:%M:%S')] = dict() resp['list'][key.strftime('%Y-%m-%d %H:%M:%S')] = {'n': bins_s.to_list(), 'total': total, - 'p': [str(i)+'%' for i in p], - 'title': '总体'} + 'p': [str(i) + '%' for i in p], + 'title': '总体'} # 兼容下载功能 download = analysis.event_view.get('download', '') if download == 1: - create_df = create_neidf(resp,columnName) - Download=Download_xlsx(create_df, '分布分析') + create_df = create_neidf(resp, columnName) + Download = Download_xlsx(create_df, '分布分析') return Download return schemas.Msg(code=0, msg='ok', data=resp) else: - #离散数字 + # 离散数字 resp = {'list': {}, 'label': [], 'start_date': res['start_date'], 'end_date': res['end_date'], 'time_particle': res['time_particle'], - 'biaotou':columnName + 'biaotou': columnName } labels = [str(i) for i in sorted(df['values'].unique())] resp['label'] = labels @@ -1343,20 +1348,20 @@ async def scatter_model( if res['time_particle'] == 'total': dt = '合计' else: - #映射对应的埋点数据 + # 映射对应的埋点数据 re = await crud.select_map.get_list(db, game) - re_list=[i['attr_name'] for i in re] + re_list = [i['attr_name'] for i in re] if columnName in re_list: for i in re: if columnName == i['attr_name']: for datas in i['map_']: if key == datas['id']: - key=datas['title'] + key = datas['title'] break break dt = key - #dt = key.strftime('%Y-%m-%d') - #dt='合计' + # dt = key.strftime('%Y-%m-%d') + # dt='合计' labels_dict = {} for key2, tmp_df2 in tmp_df.groupby('values'): @@ -1385,7 +1390,7 @@ async def scatter_model( list_p.append(number_str) resp['list'][dt] = {'n': [labels_dict01.get(i, 0) for i in labels], 'total': total, - 'p': list_p} + 'p': list_p} else: list_p = [] for i in labels: @@ -1393,7 +1398,7 @@ async def scatter_model( number_str = str(number_int) + '%' list_p.append(number_str) resp['list'][dt] = {'n': [labels_dict.get(i, 0) for i in labels], 'total': total, - 'p': list_p} + 'p': list_p} # resp['list'][dt] = {'总体': {'n': [labels_dict.get(i, 0) for i in labels], 'total': total, # 'p': [round(labels_dict.get(i, 0) * 100 / total, 2) for i in labels]}} if where == "step_id" and event_type == "guide": @@ -1404,13 +1409,14 @@ async def scatter_model( # 兼容下载功能 download = analysis.event_view.get('download', '') if download == 1: - create_df = create_neidf(resp,columnName) - Download=Download_xlsx(create_df, '分布分析') + create_df = create_neidf(resp, columnName) + Download = Download_xlsx(create_df, '分布分析') return Download return schemas.Msg(code=0, msg='ok', data=resp) else: return schemas.Msg(code=-9, msg='没有添加分组项', data='') + @router.post("/trace_model_sql") async def trace_model_sql( request: Request, @@ -1440,25 +1446,25 @@ async def trace_model_sql( if df.empty: return schemas.Msg(code=-9, msg='无数据', data=None) chain_dict = defaultdict(dict) - event_num_dict={} - event_next_event={} + event_num_dict = {} + event_next_event = {} nodes = {'流失'} - name_list=analysis.events['event_namesdes'] - name_dict={} + name_list = analysis.events['event_namesdes'] + name_dict = {} for i in name_list: - name_dict[i['event_name']]=i['event_desc'] + name_dict[i['event_name']] = i['event_desc'] for event_names, count in zip(df['event_chain'], df['values']): - fmt_keys=[] + fmt_keys = [] chain_len = len(event_names) for i, event_name in enumerate(event_names): if i >= 10: continue next_event = event_names[i + 1] if i < chain_len - 1 else '流失' - #按对应的中文名显示 - event_namess=name_dict.get(event_name,event_name) - next_eventss=name_dict.get(next_event,next_event) + # 按对应的中文名显示 + event_namess = name_dict.get(event_name, event_name) + next_eventss = name_dict.get(next_event, next_event) key = (f'{event_namess}-{i}', f'{next_eventss}-{i + 1}') - #key = (f'{event_name}', f'{next_event}') + # key = (f'{event_name}', f'{next_event}') nodes.update(key) chain_dict[i][key] = chain_dict[i].setdefault(key, 0) + count keys = list(key) @@ -1486,9 +1492,9 @@ async def trace_model_sql( "target": keys[1], "value": val }) - node=[ item for item in nodes] + node = [item for item in nodes] node.sort() - #按固定的首尾排序 + # 按固定的首尾排序 first = [] trail = [] nodes = [] @@ -1513,11 +1519,11 @@ async def trace_model_sql( event_new_next[key] = new_key_list data = { - #'nodes': [{'name': item} for item in nodes], + # 'nodes': [{'name': item} for item in nodes], 'nodes': [{'name': item} for item in nodes], 'links': links, - 'event_num':event_num_dict, - 'event_next':event_new_next, + 'event_num': event_num_dict, + 'event_next': event_new_next, 'start_date': res['start_date'], 'end_date': res['end_date'], 'time_particle': res['time_particle'] @@ -1669,6 +1675,7 @@ async def user_property_model( 'title': title }) + @router.post("/seek_user") async def user_property_model( request: Request, @@ -1677,25 +1684,29 @@ async def user_property_model( ckdb: CKDrive = Depends(get_ck_db) ) -> schemas.Msg: """游戏用户搜索功能""" - #判断的内容 - data=data_in.condition - #需要判断的字段 - ziduan=data_in.user_arrt_title - #筛选条件 - tiaojian=data_in.comparator_id + # 判断的内容 + data = data_in.condition + # 需要判断的字段 + ziduan = data_in.user_arrt_title + # 筛选条件 + tiaojian = data_in.comparator_id if tiaojian == '==': tiaojian = '=' - #判断是否是时间类型 + # 判断是否是时间类型 if data_in.user_arrt_type == 'datetime': sql = f"""select `#account_id`,`{ziduan}` from {game}.`user` WHERE addHours(`{ziduan}`, 8) >= '{data_in.start_time}' - and addHours(`{ziduan}`, 8) <= '{data_in.end_time}' ORDER BY `#reg_time` LIMIT 10 OFFSET {(data_in.pages-1)*10}""" - #如果查询'#account_id',则不多余返回一个account_id + and addHours(`{ziduan}`, 8) <= '{data_in.end_time}' ORDER BY `#reg_time` LIMIT 10 OFFSET {( + data_in.pages - 1) * 10}""" + # 如果查询'#account_id',则不多余返回一个account_id elif ziduan == '#account_id': - sql=f"""select `{ziduan}`,name from {game}.`user` WHERE `{ziduan}` {tiaojian} '{data_in.condition}' ORDER BY `#reg_time` LIMIT 10 OFFSET {(data_in.pages-1)*10} """ + sql = f"""select `{ziduan}`,name from {game}.`user` WHERE `{ziduan}` {tiaojian} '{data_in.condition}' ORDER BY `#reg_time` LIMIT 10 OFFSET {( + data_in.pages - 1) * 10} """ elif data_in.user_arrt_type == 'int': - sql=f"""select `#account_id`,`{ziduan}` from {game}.`user` WHERE `{ziduan}` {tiaojian} {data_in.condition} ORDER BY `#reg_time` LIMIT 10 OFFSET {(data_in.pages-1)*10}""" + sql = f"""select `#account_id`,`{ziduan}` from {game}.`user` WHERE `{ziduan}` {tiaojian} {data_in.condition} ORDER BY `#reg_time` LIMIT 10 OFFSET {( + data_in.pages - 1) * 10}""" else: - sql=f"""select `#account_id`,`{ziduan}` from `{game}`.`user` WHERE `{ziduan}` {tiaojian} '{data}' ORDER BY `#reg_time` LIMIT 10 OFFSET {(data_in.pages-1)*10}""" + sql = f"""select `#account_id`,`{ziduan}` from `{game}`.`user` WHERE `{ziduan}` {tiaojian} '{data}' ORDER BY `#reg_time` LIMIT 10 OFFSET {( + data_in.pages - 1) * 10}""" # 查询数据 try: df = await ckdb.query_dataframe(sql) @@ -1703,23 +1714,25 @@ async def user_property_model( return schemas.Msg(code=0, msg='查询参数不匹配', data=e) # 转换成列表返回 df.fillna(0, inplace=True) - account_id=list(df['#account_id']) - new_sql=f"""select `#account_id`,`#ip`,`#distinct_id`,rmbmoney,owner_name,lv,zhanli,channel, + account_id = list(df['#account_id']) + new_sql = f"""select `#account_id`,`#ip`,`#distinct_id`,rmbmoney,owner_name,lv,zhanli,channel, channel,svrindex,maxmapid,name,`exp`,vip,jinbi,last_account_login_time,binduid from {game}.`user` where `#account_id` in ({account_id})""" - df1= await ckdb.query_dataframe(new_sql) - new_values=df1.values.tolist() + df1 = await ckdb.query_dataframe(new_sql) + new_values = df1.values.tolist() for i in range(len(new_values)): if str(new_values[i][6]) == 'nan': - new_values[i][6]=0 - res = {'refer':{ + new_values[i][6] = 0 + res = {'refer': { 'columns': df.columns.tolist(), 'values': df.values.tolist() }, - 'details_data':{ - 'new_columns':df1.columns.tolist(), - 'new_values':new_values - }} + 'details_data': { + 'new_columns': df1.columns.tolist(), + 'new_values': new_values + }} return schemas.Msg(code=0, msg='ok', data=res) + + @router.post("/seek_user_count") async def user_property_model( request: Request, @@ -1728,34 +1741,35 @@ async def user_property_model( ckdb: CKDrive = Depends(get_ck_db) ) -> schemas.Msg: """游戏用户搜索功能查询到的数量""" - #判断的内容 - data=data_in.condition - #需要判断的字段 - ziduan=data_in.user_arrt_title - #筛选条件 - tiaojian=data_in.comparator_id + # 判断的内容 + data = data_in.condition + # 需要判断的字段 + ziduan = data_in.user_arrt_title + # 筛选条件 + tiaojian = data_in.comparator_id if tiaojian == '==': tiaojian = '=' - #判断是否是时间类型 + # 判断是否是时间类型 if data_in.user_arrt_type == 'datetime': sql = f"""select `#account_id`,`{ziduan}` from {game}.`user` WHERE addHours(`{ziduan}`, 8) >= '{data_in.start_time}' and addHours(`{ziduan}`, 8) <= '{data_in.end_time}' ORDER BY `#reg_time`""" - #如果查询'#account_id',则不多余返回一个account_id + # 如果查询'#account_id',则不多余返回一个account_id elif ziduan == '#account_id': - sql=f"""select `{ziduan}`,name from {game}.`user` WHERE `{ziduan}` {tiaojian} '{data_in.condition}' ORDER BY `#reg_time` """ + sql = f"""select `{ziduan}`,name from {game}.`user` WHERE `{ziduan}` {tiaojian} '{data_in.condition}' ORDER BY `#reg_time` """ elif data_in.user_arrt_type == 'int': sql = f"""select `#account_id`,`{ziduan}` from {game}.`user` WHERE `{ziduan}` {tiaojian} {data_in.condition} ORDER BY `#reg_time`""" else: sql = f"""select `#account_id`,`{ziduan}` from `{game}`.`user` WHERE `{ziduan}` {tiaojian} '{data}' ORDER BY `#reg_time`""" - #查询数据 + # 查询数据 try: df = await ckdb.query_dataframe(sql) except Exception as e: return schemas.Msg(code=0, msg='查询参数不匹配', data=e) - #返回查询到的数量 - res=len(df) + # 返回查询到的数量 + res = len(df) return schemas.Msg(code=0, msg='ok', data=res) + @router.post("/download_user") async def user_property_model( request: Request, @@ -1764,43 +1778,44 @@ async def user_property_model( ckdb: CKDrive = Depends(get_ck_db) ): """下载查询到的所有数据""" - #判断的内容 - data=data_in.condition - #需要判断的字段 - ziduan=data_in.user_arrt_title - #筛选条件 - tiaojian=data_in.comparator_id + # 判断的内容 + data = data_in.condition + # 需要判断的字段 + ziduan = data_in.user_arrt_title + # 筛选条件 + tiaojian = data_in.comparator_id if tiaojian == '==': tiaojian = '=' - #判断是否是时间类型 + # 判断是否是时间类型 if data_in.user_arrt_type == 'datetime': sql = f"""select `#account_id`,`{ziduan}` from {game}.`user` WHERE addHours(`{ziduan}`, 8) >= '{data_in.start_time}' and addHours(`{ziduan}`, 8) <= '{data_in.end_time}' ORDER BY `#reg_time`""" - #如果查询'#account_id',则不多余返回一个account_id + # 如果查询'#account_id',则不多余返回一个account_id elif ziduan == '#account_id': - sql=f"""select `{ziduan}`,name from {game}.`user` WHERE `{ziduan}` {tiaojian} '{data_in.condition}' ORDER BY `#reg_time` """ + sql = f"""select `{ziduan}`,name from {game}.`user` WHERE `{ziduan}` {tiaojian} '{data_in.condition}' ORDER BY `#reg_time` """ elif data_in.user_arrt_type == 'int': sql = f"""select `#account_id`,`{ziduan}` from {game}.`user` WHERE `{ziduan}` {tiaojian} {data_in.condition} ORDER BY `#reg_time`""" else: sql = f"""select `#account_id`,`{ziduan}` from `{game}`.`user` WHERE `{ziduan}` {tiaojian} '{data}' ORDER BY `#reg_time`""" - #查询数据 + # 查询数据 try: df = await ckdb.query_dataframe(sql) except Exception as e: return schemas.Msg(code=0, msg='查询参数不匹配', data=e) if df.empty: - return schemas.Msg(code=-9, msg='无数据',data='') + return schemas.Msg(code=-9, msg='无数据', data='') account_id = list(df['#account_id']) new_sql = f"""select `#account_id`,`#ip`,`#distinct_id`,rmbmoney,owner_name,lv,zhanli,channel, channel,svrindex,maxmapid,name,`exp`,vip,jinbi,last_account_login_time,binduid from {game}.`user` where `#account_id` in ({account_id})""" df1 = await ckdb.query_dataframe(new_sql) - file_name=quote(f'下载的用户搜索数据.xlsx') + file_name = quote(f'下载的用户搜索数据.xlsx') mime = mimetypes.guess_type(file_name)[0] df_to_stream = DfToStream((df1, '下载的用户搜索数据')) with df_to_stream as d: export = d.to_stream() return StreamingResponse(export, media_type=mime, headers={'Content-Disposition': f'filename="{file_name}"'}) + @router.post("/solo_user") async def user_property_model( request: Request, @@ -1811,90 +1826,90 @@ async def user_property_model( """用户的详情""" if data_in.event_list == []: return schemas.Msg(code=-9, msg='请配置用户搜索模块事件', data=[]) - event_dict={} + event_dict = {} for i in data_in.event_list: - event_dict[i['event']]=i['event_name'] + event_dict[i['event']] = i['event_name'] # event_dict={'pay':'充值','create_account':'创建角色','login':'登录','ta_app_end':'离开游戏','guide':'新手引导','level_up':'玩家等级', # 'vip_level':'vip等级','sign':'签到','summon':'招募','ask_for_join_guild':'加入联盟','leave_guild':'离开联盟','create_guild':'创建联盟'} - sql=f"""select `#account_id`,`#ip`,`#distinct_id`,rmbmoney,owner_name,lv,zhanli,channel, + sql = f"""select `#account_id`,`#ip`,`#distinct_id`,rmbmoney,owner_name,lv,zhanli,channel, channel,svrindex,maxmapid,name,`exp`,vip,jinbi,last_account_login_time,binduid from {game}.`user` where `#account_id` = '{data_in.account_id}'""" - #获取用户基本详情 - df= await ckdb.query_dataframe(sql) - #获取用户每天事件量 - start_times=data_in.start_time.split(' ')[0] - end_times=data_in.end_time.split(' ')[0] + # 获取用户基本详情 + df = await ckdb.query_dataframe(sql) + # 获取用户每天事件量 + start_times = data_in.start_time.split(' ')[0] + end_times = data_in.end_time.split(' ')[0] event = list(event_dict.keys()) sql1 = f"""select toDate(addHours(`#event_time`, `#zone_offset`)) as date,count(`#event_name`) as v from {game}.event where `date`>='{start_times}' and `date`<='{end_times}' and `#account_id`='{data_in.account_id}' and `#event_name` in ({event}) group by date ORDER by date""" - df1=await ckdb.query_dataframe(sql1) - #时间间隔天数 - global event_values, data_list,game_details,zhanbi - if len(df1) >0: - time_interval=getEveryDay(start_times,end_times) + df1 = await ckdb.query_dataframe(sql1) + # 时间间隔天数 + global event_values, data_list, game_details, zhanbi + if len(df1) > 0: + time_interval = getEveryDay(start_times, end_times) a = list(df1['date']) - aa=[] + aa = [] for i in a: aa.append(str(i)) for i in time_interval: if i not in aa: df1.loc[len(df1.index)] = [i, 0] - df1[['date']]=df1[['date']].astype(str) - df1.sort_values('date',inplace=True) - data_list=list(df1['date']) - event_values=list(df1['v']) + df1[['date']] = df1[['date']].astype(str) + df1.sort_values('date', inplace=True) + data_list = list(df1['date']) + event_values = list(df1['v']) else: - data_list= [] #getEveryDay(start_times,end_times) - event_values=[] - #获取用户事件的详情 - sql2=f"""select * FROM {game}.event WHERE `#account_id`='{data_in.account_id}' and addHours(`#event_time`, `#zone_offset`) >='{data_in.start_time}' and + data_list = [] # getEveryDay(start_times,end_times) + event_values = [] + # 获取用户事件的详情 + sql2 = f"""select * FROM {game}.event WHERE `#account_id`='{data_in.account_id}' and addHours(`#event_time`, `#zone_offset`) >='{data_in.start_time}' and addHours(`#event_time`, `#zone_offset`) <= '{data_in.end_time}' and `#event_name` in ({event}) order by `#event_time`""" - df2=await ckdb.query_dataframe(sql2) + df2 = await ckdb.query_dataframe(sql2) if len(df2) > 0: - game_details={} - #区分天数 - days=list(df2['#event_time']) - day_set=set() + game_details = {} + # 区分天数 + days = list(df2['#event_time']) + day_set = set() for i in days: day_set.add(str(i).split(' ')[0]) - #总日期,一天的 - day_list=list(day_set) + # 总日期,一天的 + day_list = list(day_set) day_list.sort() for day in day_list: game_deta = [] for nu in range(len(df2)): if day in str(df2['#event_time'][nu]): - #详细时间 - game_detail={} - time_s=str(df2['#event_time'][nu]).split('+')[0] - game_detail['time']=time_s.split(' ')[1] - game_detail['event']=event_dict[df2['#event_name'][nu]] + # 详细时间 + game_detail = {} + time_s = str(df2['#event_time'][nu]).split('+')[0] + game_detail['time'] = time_s.split(' ')[1] + game_detail['event'] = event_dict[df2['#event_name'][nu]] a_list = [] - #获取df的字段名 - columns=df2.columns.values + # 获取df的字段名 + columns = df2.columns.values for col in columns: - a=str(df2[col][nu]) + a = str(df2[col][nu]) if a != 'None' and a != '' and a != 'nan' and a != '[]': - a_list.append({'title':col,'val':a}) - game_detail['xaingqing']=a_list + a_list.append({'title': col, 'val': a}) + game_detail['xaingqing'] = a_list game_deta.append(game_detail) - game_details[day]=game_deta + game_details[day] = game_deta else: game_details = {} - #event_count = await ckdb.yesterday_event_count(game) - #求事件占比 - sql3=f"""select `#event_name` as a,count(`#event_name`) as v from {game}.event + # event_count = await ckdb.yesterday_event_count(game) + # 求事件占比 + sql3 = f"""select `#event_name` as a,count(`#event_name`) as v from {game}.event where addHours(`#event_time`, `#zone_offset`)>='{data_in.start_time}' and addHours(`#event_time`, `#zone_offset`)<='{data_in.end_time}' and `#account_id`='{data_in.account_id}' and `#event_name` in ({event}) group by `#event_name`""" df3 = await ckdb.query_dataframe(sql3) if len(df3) > 0: - zhanbi=[] - sums=sum(list(df1['v'])) - numbers=0 + zhanbi = [] + sums = sum(list(df1['v'])) + numbers = 0 for i in range(len(df3)): - shuju={} - shuju['name']=event_dict[df3['a'][i]] - shuju['value']=int(df3['v'][i]) + shuju = {} + shuju['name'] = event_dict[df3['a'][i]] + shuju['value'] = int(df3['v'][i]) # if i != len(df3)-1: # number1=round(int(df3['v'][i]) / sums, 2) # number=round(number1*100,2) @@ -1906,17 +1921,19 @@ addHours(`#event_time`, `#zone_offset`) <= '{data_in.end_time}' and `#event_name else: zhanbi = [] res = { - 'details_data':{ - 'new_columns':df.columns.tolist(), - 'new_values':df.values.tolist()}, - 'event_count':{ - 'date':data_list, - 'event_values':event_values + 'details_data': { + 'new_columns': df.columns.tolist(), + 'new_values': df.values.tolist()}, + 'event_count': { + 'date': data_list, + 'event_values': event_values }, - 'details_user':game_details, - 'proportion':zhanbi + 'details_user': game_details, + 'proportion': zhanbi } return schemas.Msg(code=0, msg='ok', data=res) + + @router.get("/event_list") async def event_list( request: Request, @@ -1926,15 +1943,16 @@ async def event_list( current_user: schemas.UserDB = Depends(deps.get_current_user) ) -> schemas.Msg: """个人详情中的事件列表""" - #获取事件名 - #event_list = await ckdb.distinct(game, 'event', '#event_name') - event_list = await crud.event_list.get_list(db,game) + # 获取事件名 + # event_list = await ckdb.distinct(game, 'event', '#event_name') + event_list = await crud.event_list.get_list(db, game) if event_list == []: return schemas.Msg(code=0, msg='请配置用户搜索模块事件', data=[]) else: - res=event_list[0]['details'] + res = event_list[0]['details'] return schemas.Msg(code=0, msg='ok', data=res) + @router.post("/add_event_list") async def add_select_map( request: Request, @@ -1946,15 +1964,15 @@ async def add_select_map( """添加对应游戏事件选择映射""" dfs = pd.read_excel(file, engine='openpyxl', sheet_name=None) for attr_name, df in dfs.items(): - #将id这列转换成字符串类型 - if len(df) >0: + # 将id这列转换成字符串类型 + if len(df) > 0: ColNames = df.columns.tolist() event = df.to_dict('records') - details=[] + details = [] for i in event: - details_dict={} - details_dict['event']=i[ColNames[0]] - details_dict['event_name']=i[ColNames[1]] + details_dict = {} + details_dict['event'] = i[ColNames[0]] + details_dict['event_name'] = i[ColNames[1]] details.append(details_dict) data_in = schemas.Event_list(game=game, details=details) await crud.event_list.save(db, data_in)