1.代码加备注

This commit is contained in:
李伟 2022-11-11 10:37:46 +08:00
parent 9bd21f574c
commit b6c5ce6187
10 changed files with 104 additions and 100 deletions

View File

@ -35,7 +35,7 @@ async def add_role_domain(
# domain=item.game)
#
# return schemas.Msg(code=0, msg='添加成功', data=True)
res = await crud.url_list.get_all(db)
res = await crud.url_list.get_all(db) # 获取所有级别权限的所有路由和路由状态
role_id = {}
for i in res:
role_id[i['auth_id']] = i['name']
@ -60,7 +60,7 @@ async def add_role_domain(
quanxian_id=quanxian_id))
return schemas.Msg(code=0, msg='添加成功', data=True)
# 疑似弃用
@router.post("/get_permissions_for_user_in_domain")
async def get_permissions_for_user_in_domain(
request: Request,
@ -153,6 +153,7 @@ async def add_policy(
async def remove_policy(
request: Request,
data_in: schemas.Del_role,
db: AsyncIOMotorDatabase = Depends(get_database),
current_user: schemas.UserDB = Depends(deps.get_current_user)):
"""
修改角色api权限

View File

@ -19,7 +19,7 @@ from models.x_analysis import XAnalysis
router = APIRouter()
# 弃用
@router.post("/update_event_view")
async def update_event_view(
request: Request,

View File

@ -32,6 +32,7 @@ async def edit_show_report(
db: AsyncIOMotorDatabase = Depends(get_database),
current_user: schemas.UserDB = Depends(deps.get_current_user)
) -> schemas.Msg:
"""修改看板"""
report_id = data_in.config.report_id
res = await crud.dashboard.update_one(db, {'_id': data_in.dashboard_id, 'reports.report_id': report_id},
{'$set': {f'reports.$.{k}': v for k, v in

View File

@ -20,7 +20,7 @@ from db.redisdb import get_redis_pool, RedisDrive
router = APIRouter()
# 疑似弃用
@router.post('/add_data_auth')
async def add_data_auth(request: Request,
data_id: schemas.DataAuthCreate,
@ -32,7 +32,7 @@ async def add_data_auth(request: Request,
await crud.data_auth.create(db, data_id, game)
return schemas.Msg(code=0, msg='ok', data=data_id)
# 疑似弃用
@router.post('/edit_data_auth')
async def edit_data_auth(request: Request,
data_id: schemas.DataAuthEdit,
@ -44,7 +44,7 @@ async def edit_data_auth(request: Request,
await crud.data_auth.edit_data_auth(db, data_id)
return schemas.Msg(code=0, msg='ok', data=data_id)
#各数据类型在分析指标时可支持的计算方法
@router.get("/quotas_map")
async def quotas_map(
request: Request,
@ -60,6 +60,7 @@ async def filter_map(
game: str,
current_user: schemas.UserDB = Depends(deps.get_current_user)
) -> schemas.Msg:
# ck中不同的数据类型的不同计算规则
return schemas.Msg(code=0, msg='ok', data=settings.CK_FILTER)
@ -154,7 +155,7 @@ async def user_property(request: Request,
'show_name': data_attr.get(k, {}).get('show_name', ''),
}
)
propertys = sorted(propertys, key=lambda x: x['show_name'])
propertys = sorted(propertys, key=lambda x: x['show_name']) # 按show_name排序
return schemas.Msg(code=0, msg='ok', data=propertys)
@ -170,7 +171,7 @@ async def load_prop_quotas(request: Request,
) -> schemas.Msg:
"""事件属性 聚合条件"""
event_columns = await ck.get_columns(game, 'event')
event_columns = await ck.get_columns(game, 'event') # 获取字段名和字段类型
data_attr = await crud.data_attr.find_many(db, {'game': game, 'cat': 'event'})
data_attr = {item['name']: item for item in data_attr}

View File

@ -136,7 +136,7 @@ async def edit_data_attr(
# await crud.select_map.save(db, data_in)
# return schemas.Msg(code=0, msg='ok', data=data_in)
#在gametoos同步区服了所以不需要这段代码
# 在gametoos同步区服了但在其他事件映射时需要用到
@router.post("/add_select_map")
async def add_select_map(
request: Request,

View File

@ -29,7 +29,7 @@ async def event_list(
"""事件列表"""
# 获取事件名
try:
event_list = await ckdb.distinct(game, 'event', '#event_name')
event_list = await ckdb.distinct(game, 'event', '#event_name') # 获取事件名
# 获取事件量
event_count = await ckdb.yesterday_event_count(game)
event_meta = await crud.event_mana.find_many(db, {'game': game}) or {}

View File

@ -23,7 +23,7 @@ async def create(
) -> schemas.Msg:
"""创建项目"""
try:
res_project = await crud.project.create(db, data_in, current_user=request.user)
res_project = await crud.project.create(db, data_in, current_user=request.user) # 创建项目
await crud.project_number.createxiangmu(db, data_in)
# 同步插入项目
# await crud.project_number.createxiangmu(db, data_in)
@ -36,7 +36,7 @@ async def create(
await crud.user_url.updata_quanxian(db, schemas.Url_quanxian(user=user_url['user'], user_id=user_url['user_id'],
game=user_url['game'],
quanxian_id=user_url['quanxian_id'],
quanxian=user_url['quanxian']))
quanxian=user_url['quanxian'])) # 配置权限
except pymongo.errors.DuplicateKeyError:
return schemas.Msg(code=-1, msg='项目名已存在', data='项目名已存在')

View File

@ -181,16 +181,16 @@ async def event_model(
request: Request,
game: str,
data_in: schemas.CkQuery,
ckdb: CKDrive = Depends(get_ck_db),
db: AsyncIOMotorDatabase = Depends(get_database),
rdb: RedisDrive = Depends(get_redis_pool),
ckdb: CKDrive = Depends(get_ck_db), # ck 驱动
db: AsyncIOMotorDatabase = Depends(get_database), # mongodb 驱动
rdb: RedisDrive = Depends(get_redis_pool), # redis 驱动
analysis: BehaviorAnalysis = Depends(BehaviorAnalysis),
current_user: schemas.UserDB = Depends(deps.get_current_user)
current_user: schemas.UserDB = Depends(deps.get_current_user) # 登录校验
) -> schemas.Msg:
""" 事件分析"""
await analysis.init(data_where=current_user.data_where)
await analysis.init(data_where=current_user.data_where) # 初始化请求参数
try:
sqls = await analysis.event_model_sql()
sqls = await analysis.event_model_sql() # 生成SQL查询时间范围日期类型
except Exception as e:
return schemas.Msg(code=-9, msg='报表配置参数异常')
res = []
@ -226,7 +226,7 @@ async def event_model(
if last_value > 0:
q['last_value'] = float(last_value)
break
if list(item.get('event_name'))[-1] == '':
if list(item.get('event_name'))[-1] == '': # 比列类的全部加上百分比符号
for i in range(len(values)):
values[i] = str((values[i])) + '%'
q['values'].append(values)
@ -243,10 +243,10 @@ async def event_model(
continue
# sql语句
sql = item['sql']
groupby = item['groupby']
groupby = item['groupby'] # 分组项
date_range = item['date_range'] # 获取的要查询的每一天的时间
df = await ckdb.query_dataframe(sql) # 以sql语句查出数据df是二维列表
if item['event_name'] == '月充总额':
if item['event_name'] == '月充总额': # 充值总额和月充总额是单独拿出来做处理返回数据
date_range=df['date'].tolist()
q['date_range']=[str(i).split('-')[0]+'-'+str(i).split('-')[1] for i in date_range]
else:
@ -255,7 +255,7 @@ async def event_model(
# 映射对应中文返回给前端展示
for i in groupby:
if i == 'svrindex':
if game == 'mfmh5':
if game == 'mfmh5': # 只有mfmh5单独处理因为同步区服的的游戏名不一样
game = 'mzmfmh5'
chinese = {}
resp = await crud.select_map.get_one(db, game, i)
@ -355,10 +355,10 @@ async def event_model(
else:
concat_data.append((i, group, 0))
df_group = pd.concat([df_group, pd.DataFrame(concat_data, columns=df_group.columns)])
df_group.sort_values('date', inplace=True)
df_group.sort_values('date', inplace=True) # 按date这一列的值在原数据排序
q['values'].append(df_group['values'].to_list())
q['sum'].append(round(float(df_group['values'].sum()), 2))
q['avg'].append(round(float(df_group['values'].mean()), 2))
q['sum'].append(round(float(df_group['values'].sum()), 2)) # 求和
q['avg'].append(round(float(df_group['values'].mean()), 2)) #平均数
for last_value in df['values'].values[::-1]:
if last_value > 0:
q['last_value'] = float(last_value)
@ -503,7 +503,7 @@ async def retention_model(request: Request,
except Exception as e:
return schemas.Msg(code=-9, msg='报表配置参数异常')
sql = res['sql'] # 获取到sql语句
df = await ckdb.query_dataframe(sql)
df = await ckdb.query_dataframe(sql) # 查询数据
if df.empty:
return schemas.Msg(code=-9, msg='无数据', data=None)
@ -512,12 +512,12 @@ async def retention_model(request: Request,
retention_n = res['retention_n'] # 列表 int
filter_item_type = res['filter_item_type'] # all
filter_item = res['filter_item'] # 列表 0,1,3,7,14,21,30
df.set_index('reg_date', inplace=True)
df.set_index('reg_date', inplace=True) # 重新生成下标
# 补齐没有数据的日期
for d in set(res['date_range']) - set(df.index):
df.loc[d] = 0
df.sort_index(inplace=True)
df.sort_index(inplace=True) # 原数据排序
summary_values = {'均值': {}}
max_retention_n = 1
# 留存人数
@ -557,11 +557,11 @@ async def retention_model(request: Request,
retention_avg_dict[rn]['o_cnt0'] += rd['cnt0']
retention_avg_dict[rn]['o_cntn'] += rd[f'on{rn}']
# 算均值
tmp['p'] = []
tmp['n'] = []
tmp['p_outflow'] = []
tmp['n_outflow'] = []
tmp['d0'] = 0
tmp['p'] = [] # 留存率
tmp['n'] = [] # 留存人数
tmp['p_outflow'] = [] # 流失率
tmp['n_outflow'] = [] # 流失人数
tmp['d0'] = 0 # 总人数
for rt, rd in retention_avg_dict.items():
tmp['d0'] = int(df['cnt0'].sum())
n = round(rd['cntn'] * 100 / rd['cnt0'], 2)
@ -582,15 +582,15 @@ async def retention_model(request: Request,
items[key].extend(['-'] * (retention_length - len(items[key])))
resp = {
'summary_values': summary_values,
'summary_values': summary_values, # 留存/流失数据
# 'values': values,
'date_range': [d.strftime('%Y-%m-%d') for d in date_range],
'title': title,
'date_range': [d.strftime('%Y-%m-%d') for d in date_range], # 时间范围
'title': title, # 表头
'filter_item_type': filter_item_type,
'filter_item': filter_item,
'start_date': res['start_date'],
'end_date': res['end_date'],
'time_particle': res['time_particle']
'start_date': res['start_date'], # 开始时间
'end_date': res['end_date'], # 结束时间
'time_particle': res['time_particle'] # 时间类型
}
return schemas.Msg(code=0, msg='ok', data=resp)
@ -668,13 +668,13 @@ async def retention_model(request: Request,
if len(groupby_list) == 1:
max_v = int(df[groupby_list[0]['columnName']].max())
min_v = int(df[groupby_list[0]['columnName']].min())
for i in groupby:
for i in groupby: # 映射区服
if i == 'svrindex':
if game == 'mfmh5':
game = 'mzmfmh5'
chinese = {}
resp = await crud.select_map.get_one(db, game, i)
if not resp:
if not resp: # 如果没有配置相关的映射数据就跳过
continue
for ii in resp:
chinese[ii['id']] = ii['title']
@ -682,11 +682,11 @@ async def retention_model(request: Request,
# 开始映射
df.loc[df['svrindex'] == k, 'svrindex'] = v
times = df['reg_date'][0]
df.set_index(groupby, inplace=True)
df.set_index(groupby, inplace=True) # 重新生成下标
# for d in set(res['date_range']) - set(df.index):
# df.loc[d] = 0
df.sort_index(inplace=True)
df.sort_index(inplace=True) # 排序
summary_values = {'均值': {}}
max_retention_n = 1
# 留存人数
@ -862,7 +862,7 @@ async def retention_model(request: Request,
}
return schemas.Msg(code=0, msg='ok', data=resp)
# 废弃
async def retention_model01(request: Request,
game: str,
ckdb: CKDrive = Depends(get_ck_db),
@ -986,7 +986,7 @@ async def retention_model_export(request: Request,
export = d.to_stream()
return StreamingResponse(export, media_type=mime, headers={'Content-Disposition': f'filename="{file_name}"'})
# 弃用
@router.post("/retention_model_del", deprecated=True)
async def retention_model_del(
request: Request,
@ -1095,7 +1095,7 @@ async def funnel_model(
cond_level = res['cond_level']
groupby = res['groupby']
switch_test = analysis.event_view.get('switchTest', True)
if switch_test:
if switch_test: # 界面的开关是开的走这里
df = await ckdb.query_dataframe(sql)
if df.empty:
return schemas.Msg(code=-9, msg='无数据', data=None)
@ -1112,7 +1112,7 @@ async def funnel_model(
# df.set_index('date',inplace=True)
data_list = []
date_data = {}
if df.shape == (0, 0):
if df.shape == (0, 0): # 元组的第一个元素代表行数,第二个元素代表列数,判断数据大小
return schemas.Msg(code=0, msg='ok', data={'list': data_list, 'level': cond_level})
tmp = {'title': '总体'}
@ -1123,7 +1123,7 @@ async def funnel_model(
for i in tmp_df.index:
tmp_df.loc[i, 'values'] = tmp_df.loc[i:]['values'].sum()
tmp['n'] = tmp_df['values'].to_list()
tmp['n'] = tmp_df['values'].to_list() # 取出values这一列的所有数据返回成列表
tmp['p1'] = [100]
# tmp['p2'] = []
for i, v in tmp_df.loc[2:, 'values'].items():
@ -1142,7 +1142,7 @@ async def funnel_model(
tmp_df = tmp_df.groupby('level').sum()
tmp_df.sort_index(inplace=True)
for i in tmp_df.index:
tmp_df.loc[i, 'values'] = tmp_df.loc[i:]['values'].sum()
tmp_df.loc[i, 'values'] = tmp_df.loc[i:]['values'].sum() # 数据求和
tmp = dict()
@ -1161,12 +1161,12 @@ async def funnel_model(
if groupby:
# 补齐数据
concat_data = []
idx = set(df.set_index(['date'] + groupby).index)
idx = set(df.set_index(['date'] + groupby).index) # 下标去重
all_idx = {(*j, i) for i in range(1, len(cond_level) + 1) for j in idx}
for i in all_idx - set(df.set_index(list(('date', *groupby, 'level'))).index):
concat_data.append((*i, 0))
df = pd.concat([df, pd.DataFrame(concat_data, columns=df.columns)])
df = pd.concat([df, pd.DataFrame(concat_data, columns=df.columns)]) # 合并数据
# df.sort_values(list((*groupby, 'level')), inplace=True, ascending=False)
# 映射对应中文返回给前端展示
for i in groupby:
@ -1184,7 +1184,7 @@ async def funnel_model(
df.loc[df[i] == k, i] = v
for key, tmp_df in df.groupby(groupby):
tmp = {'title': key}
tmp_df = tmp_df.groupby('level').sum()
tmp_df = tmp_df.groupby('level').sum() #按level分组求和
tmp_df.sort_index(inplace=True)
for i in tmp_df.index:
tmp_df.loc[i, 'values'] = tmp_df.loc[i:]['values'].sum()
@ -1220,14 +1220,14 @@ async def funnel_model(
_ = date_data.setdefault(key[0].strftime('%Y-%m-%d'), {})
# [key[1]] = tmp
title = (groupby or ['总体']) + cond_level
resp = {'list': data_list,
resp = {'list': data_list, # 数据列表
'date_data': date_data,
'title': title,
'level': cond_level,
'title': title, # 表头
'level': cond_level, # 所选事件
'switch_test': switch_test,
'start_date': res['start_date'],
'end_date': res['end_date'],
'time_particle': res['time_particle']
'start_date': res['start_date'], # 开始时间
'end_date': res['end_date'], # 结束时间
'time_particle': res['time_particle'] # 时间类型
}
return schemas.Msg(code=0, msg='ok', data=resp)
else:
@ -1243,8 +1243,8 @@ async def funnel_model(
group_str = res['analysis']
# 转int
df[group_str] = df[group_str].astype(int)
step_list = [str(i) for i in sorted(df[group_str].unique())]
df[group_str] = df[group_str].astype(int) # 声明这一列的数据类型为int
step_list = [str(i) for i in sorted(df[group_str].unique())] # 唯一性去重排序
dict_k = {}
df['values'] = 1
for k, nedf in df.groupby("date"):
@ -1431,9 +1431,9 @@ async def scatter_model(
res = await analysis.scatter_model_sql()
except Exception as e:
return schemas.Msg(code=-9, msg='报表配置参数异常')
end_date = analysis.end_date
start_date = analysis.start_date
where = analysis.events[-1]['quotaname']
end_date = analysis.end_date # 开始时间
start_date = analysis.start_date # 结束时间
where = analysis.events[-1]['quotaname'] # 查询条件的字段名
sql = res['sql']
# columnName = analysis.events[-1]['label_id']
@ -1450,29 +1450,29 @@ async def scatter_model(
if 'list' in str(type(df['values'][0])):
# f=lambda x:x[0]
# df['values']=df['values'].map(f)
df = df.explode("values").reset_index(drop=True)
df = df.explode("values").reset_index(drop=True) # 取字典类型数据并重置索引
# df['values']=df['values'].astype(str)
df.fillna(0, inplace=True)
df.fillna(0, inplace=True) # 在原数据把缺失值填充为0
# 转换数据类型为int
if analysis.events[-1].get('analysis') != 'uniqExact':
if analysis.events[-1].get('analysis') != 'uniqExact': # 如果不是去重数的话声明值为int类型
df['values'] = df['values'].astype(int)
else:
df['values'] = df['values'].astype(str) # 统一声明使用去重数的时候为str
interval_type = res['interval_type']
interval_type = res['interval_type'] # 在前端的展现形式,默认/离散/自定义
analysi = res['analysis']
groupby = res['groupby']
quota_interval_arr = res['quota_interval_arr']
# 兼容合计的
if res['time_particle'] == 'total':
if res['time_particle'] == 'total': # 计算所有的数据走这里
df['date'] = '合计'
if analysi != 'number_of_days' and interval_type != 'discrete':
try:
max_v = int(df['values'].max())
max_v = int(df['values'].max()) # 数值中的最大值
except Exception as e:
return schemas.Msg(code=-9, msg='请用离散数字', data=None)
min_v = int(df['values'].min())
interval = (max_v - min_v) // 10 or 1
min_v = int(df['values'].min()) # 数值中的最小值
interval = (max_v - min_v) // 10 or 1 # 大小值之间10等分的区间数值
resp = {'list': dict(),
'start_date': res['start_date'],
'end_date': res['end_date'],
@ -1480,8 +1480,8 @@ async def scatter_model(
}
if not quota_interval_arr:
resp['label'] = [f'[{i},{i + interval})' for i in range(min_v, max_v, interval)]
bins = [i for i in range(min_v, max_v + interval, interval)]
resp['label'] = [f'[{i},{i + interval})' for i in range(min_v, max_v, interval)] # 表头
bins = [i for i in range(min_v, max_v + interval, interval)] #区间
else:
quota_interval_arr = [-float('inf')] + quota_interval_arr + [float('inf')]
resp['label'] = []
@ -1493,16 +1493,16 @@ async def scatter_model(
# 这是整体的
for key, tmp_df in df.groupby('date'):
bins_s = pd.cut(tmp_df['values'], bins=bins,
right=False, include_lowest=True).value_counts()
bins_s.sort_index(inplace=True)
total = int(bins_s.sum())
if res['time_particle'] == 'total':
right=False, include_lowest=True).value_counts() # 按区间分开
bins_s.sort_index(inplace=True) # 重新排序
total = int(bins_s.sum()) # 算出数量
if res['time_particle'] == 'total': # 合计的数据
resp['list']['合计'] = dict()
p = list(round(bins_s * 100 / total, 2).to_list())
resp['list']['合计']['总体'] = {'n': bins_s.to_list(), 'total': total,
'p': [str(i) + '%' for i in p],
'title': '总体'}
else:
else: # 按时间分开算数据
p = list(round(bins_s * 100 / total, 2).to_list())
resp['list'][key.strftime('%Y-%m-%d')] = dict()
resp['list'][key.strftime('%Y-%m-%d')]['总体'] = {'n': bins_s.to_list(), 'total': total,
@ -1523,7 +1523,7 @@ async def scatter_model(
# 'title': title
# }
download = analysis.event_view.get('download', '')
if download == 1:
if download == 1: # 下载数据
creat_df = create_df(resp)
Download = Download_xlsx(creat_df, '分布分析')
return Download
@ -1540,7 +1540,7 @@ async def scatter_model(
labels = [str(i) for i in sorted(df['values'].unique())]
resp['label'] = labels
shaixuan = analysis.events[0].get('analysis')
for key, tmp_df in df.groupby(['date']):
for key, tmp_df in df.groupby(['date']): # 按日期分组
if shaixuan == 'uniqExact':
total = len(set(tmp_df['uid']))
else:
@ -1585,7 +1585,7 @@ async def scatter_model(
number_str = str(number_int) + '%'
list_p.append(number_str)
resp['list'][dt] = {'总体': {'n': [labels_dict.get(i, 0) for i in labels], 'total': total,
'p': list_p}}
'p': list_p}} # 整合出数据
# resp['list'][dt] = {'总体': {'n': [labels_dict.get(i, 0) for i in labels], 'total': total,
# 'p': [round(labels_dict.get(i, 0) * 100 / total, 2) for i in labels]}}
if where == "step_id" and event_type == "guide":
@ -1617,7 +1617,7 @@ async def scatter_model(
# 'p': round(bins_s * 100 / total, 2).to_list(),
# 'title': '总体'}
# 疑弃用,有同类型分布分析
@router.post("/guide_model")
async def guide_model(
request: Request,
@ -1657,9 +1657,9 @@ async def guide_model(
p_data = {}
data = {}
for dict_key, dict_data in dict_k.items():
dict_data1 = deepcopy(dict_data)
dict_data1 = deepcopy(dict_data) # 深拷贝一份数据
dict_k1 = {int(k): v for k, v in dict_data1.items()}
sorted(dict_k1.keys())
sorted(dict_k1.keys()) # 按key排序
data_values = list(dict_k1.values())
p_values = [round(i / sum(data_values), 2) or 0 for i in data_values]
p_values.insert(0, dict_key)
@ -1814,8 +1814,8 @@ async def scatter_model(
df['values'] = df['values'].astype(str) # 统一声明使用去重数的时候为str
interval_type = res['interval_type']
analysi = res['analysis']
groupby = res['groupby']
true_df = df.groupby(groupby).sum()
groupby = res['groupby'] # 分组项
true_df = df.groupby(groupby).sum() # 按分组求和
group_label = res['group_label']
quota_interval_arr = res['quota_interval_arr']
# 兼容合计的
@ -2070,7 +2070,7 @@ async def trace_model_sql(
event_num_dict = {}
event_next_event = {}
nodes = {'流失'}
name_list = analysis.events['event_namesdes']
name_list = analysis.events['event_namesdes'] # 参与分析的事件
name_dict = {}
for i in name_list:
name_dict[i['event_name']] = i['event_desc']
@ -2126,7 +2126,7 @@ async def trace_model_sql(
trail.append(i)
else:
nodes.append(i)
first.sort(reverse=True)
first.sort(reverse=True) # 排序
for i in first:
nodes.insert(0, i)
for i in trail:
@ -2341,8 +2341,8 @@ async def user_property_model(
account_id = list(df['#account_id'])
new_sql = f"""select `#account_id`,`#ip`,`#distinct_id`,rmbmoney,owner_name,lv,zhanli,channel,
channel,svrindex,maxmapid,name,`exp`,vip,jinbi,last_account_login_time,binduid from {game}.`user` where `#account_id` in ({account_id})"""
df1 = await ckdb.query_dataframe(new_sql)
new_values = df1.values.tolist()
df1 = await ckdb.query_dataframe(new_sql) # 按对应用户查询数据
new_values = df1.values.tolist() # 把所有值按列表返回
for i in range(len(new_values)):
if str(new_values[i][6]) == 'nan':
new_values[i][6] = 0

View File

@ -7,6 +7,7 @@ __all__ = 'select_map',
class CRUDSelectMap(CRUDBase):
# 更新数据
async def save(self, db: AsyncIOMotorDatabase, data_in: schemas.SelectMap):
where = {'attr_name': data_in.attr_name, 'game': data_in.game}
return await self.update_one(db, where, {'$set': data_in.dict(skip_defaults=True)}, upsert=True)
@ -20,7 +21,7 @@ class CRUDSelectMap(CRUDBase):
where = {'game': game}
res = await self.find_many(db, where, {'_id': 0})
return res
# 查找一条数据不返回_id
async def get_select(self, db: AsyncIOMotorDatabase, data_in: schemas.SelectAttr, game: str):
where = {'game': game, **data_in.dict()}
res = await self.find_one(db, where, {'_id': 0})

View File

@ -137,7 +137,7 @@ class BehaviorAnalysis:
self.data_where = []
async def init(self, *args, **kwargs):
# 在自动获取看板和报表信息的时候会走这里
if self.data_in.report_id:
db = get_database()
report = await crud.report.get(db, id=self.data_in.report_id)
@ -152,7 +152,7 @@ class BehaviorAnalysis:
except:
# 兼容以前的
e_days, s_days = self.event_view['recentDay'].split('-')
# 根据本地电脑时间,按时间范围自动适应当前的时间范围
self.event_view['endTime'] = arrow.get().shift(days=-int(e_days)).strftime('%Y-%m-%d 23:59:59')
self.event_view['startTime'] = arrow.get().shift(days=-int(s_days)).strftime('%Y-%m-%d 00:00:00')
@ -362,7 +362,7 @@ class BehaviorAnalysis:
user_filters.append(and_(*user_filter))
return event_filters, user_filters
# 根据筛选的where条件生成对应SQL
async def handler_filts(self, *filters, nu=0):
"""