1.代码加备注
This commit is contained in:
parent
9bd21f574c
commit
b6c5ce6187
@ -35,7 +35,7 @@ async def add_role_domain(
|
|||||||
# domain=item.game)
|
# domain=item.game)
|
||||||
#
|
#
|
||||||
# return schemas.Msg(code=0, msg='添加成功', data=True)
|
# return schemas.Msg(code=0, msg='添加成功', data=True)
|
||||||
res = await crud.url_list.get_all(db)
|
res = await crud.url_list.get_all(db) # 获取所有级别权限的所有路由和路由状态
|
||||||
role_id = {}
|
role_id = {}
|
||||||
for i in res:
|
for i in res:
|
||||||
role_id[i['auth_id']] = i['name']
|
role_id[i['auth_id']] = i['name']
|
||||||
@ -60,7 +60,7 @@ async def add_role_domain(
|
|||||||
quanxian_id=quanxian_id))
|
quanxian_id=quanxian_id))
|
||||||
return schemas.Msg(code=0, msg='添加成功', data=True)
|
return schemas.Msg(code=0, msg='添加成功', data=True)
|
||||||
|
|
||||||
|
# 疑似弃用
|
||||||
@router.post("/get_permissions_for_user_in_domain")
|
@router.post("/get_permissions_for_user_in_domain")
|
||||||
async def get_permissions_for_user_in_domain(
|
async def get_permissions_for_user_in_domain(
|
||||||
request: Request,
|
request: Request,
|
||||||
@ -153,6 +153,7 @@ async def add_policy(
|
|||||||
async def remove_policy(
|
async def remove_policy(
|
||||||
request: Request,
|
request: Request,
|
||||||
data_in: schemas.Del_role,
|
data_in: schemas.Del_role,
|
||||||
|
db: AsyncIOMotorDatabase = Depends(get_database),
|
||||||
current_user: schemas.UserDB = Depends(deps.get_current_user)):
|
current_user: schemas.UserDB = Depends(deps.get_current_user)):
|
||||||
"""
|
"""
|
||||||
修改角色api权限
|
修改角色api权限
|
||||||
|
@ -19,7 +19,7 @@ from models.x_analysis import XAnalysis
|
|||||||
|
|
||||||
router = APIRouter()
|
router = APIRouter()
|
||||||
|
|
||||||
|
# 弃用
|
||||||
@router.post("/update_event_view")
|
@router.post("/update_event_view")
|
||||||
async def update_event_view(
|
async def update_event_view(
|
||||||
request: Request,
|
request: Request,
|
||||||
|
@ -32,6 +32,7 @@ async def edit_show_report(
|
|||||||
db: AsyncIOMotorDatabase = Depends(get_database),
|
db: AsyncIOMotorDatabase = Depends(get_database),
|
||||||
current_user: schemas.UserDB = Depends(deps.get_current_user)
|
current_user: schemas.UserDB = Depends(deps.get_current_user)
|
||||||
) -> schemas.Msg:
|
) -> schemas.Msg:
|
||||||
|
"""修改看板"""
|
||||||
report_id = data_in.config.report_id
|
report_id = data_in.config.report_id
|
||||||
res = await crud.dashboard.update_one(db, {'_id': data_in.dashboard_id, 'reports.report_id': report_id},
|
res = await crud.dashboard.update_one(db, {'_id': data_in.dashboard_id, 'reports.report_id': report_id},
|
||||||
{'$set': {f'reports.$.{k}': v for k, v in
|
{'$set': {f'reports.$.{k}': v for k, v in
|
||||||
|
@ -20,7 +20,7 @@ from db.redisdb import get_redis_pool, RedisDrive
|
|||||||
|
|
||||||
router = APIRouter()
|
router = APIRouter()
|
||||||
|
|
||||||
|
# 疑似弃用
|
||||||
@router.post('/add_data_auth')
|
@router.post('/add_data_auth')
|
||||||
async def add_data_auth(request: Request,
|
async def add_data_auth(request: Request,
|
||||||
data_id: schemas.DataAuthCreate,
|
data_id: schemas.DataAuthCreate,
|
||||||
@ -32,7 +32,7 @@ async def add_data_auth(request: Request,
|
|||||||
await crud.data_auth.create(db, data_id, game)
|
await crud.data_auth.create(db, data_id, game)
|
||||||
return schemas.Msg(code=0, msg='ok', data=data_id)
|
return schemas.Msg(code=0, msg='ok', data=data_id)
|
||||||
|
|
||||||
|
# 疑似弃用
|
||||||
@router.post('/edit_data_auth')
|
@router.post('/edit_data_auth')
|
||||||
async def edit_data_auth(request: Request,
|
async def edit_data_auth(request: Request,
|
||||||
data_id: schemas.DataAuthEdit,
|
data_id: schemas.DataAuthEdit,
|
||||||
@ -44,7 +44,7 @@ async def edit_data_auth(request: Request,
|
|||||||
await crud.data_auth.edit_data_auth(db, data_id)
|
await crud.data_auth.edit_data_auth(db, data_id)
|
||||||
return schemas.Msg(code=0, msg='ok', data=data_id)
|
return schemas.Msg(code=0, msg='ok', data=data_id)
|
||||||
|
|
||||||
|
#各数据类型在分析指标时可支持的计算方法
|
||||||
@router.get("/quotas_map")
|
@router.get("/quotas_map")
|
||||||
async def quotas_map(
|
async def quotas_map(
|
||||||
request: Request,
|
request: Request,
|
||||||
@ -60,6 +60,7 @@ async def filter_map(
|
|||||||
game: str,
|
game: str,
|
||||||
current_user: schemas.UserDB = Depends(deps.get_current_user)
|
current_user: schemas.UserDB = Depends(deps.get_current_user)
|
||||||
) -> schemas.Msg:
|
) -> schemas.Msg:
|
||||||
|
# ck中,不同的数据类型的不同计算规则
|
||||||
return schemas.Msg(code=0, msg='ok', data=settings.CK_FILTER)
|
return schemas.Msg(code=0, msg='ok', data=settings.CK_FILTER)
|
||||||
|
|
||||||
|
|
||||||
@ -154,7 +155,7 @@ async def user_property(request: Request,
|
|||||||
'show_name': data_attr.get(k, {}).get('show_name', ''),
|
'show_name': data_attr.get(k, {}).get('show_name', ''),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
propertys = sorted(propertys, key=lambda x: x['show_name'])
|
propertys = sorted(propertys, key=lambda x: x['show_name']) # 按show_name排序
|
||||||
|
|
||||||
return schemas.Msg(code=0, msg='ok', data=propertys)
|
return schemas.Msg(code=0, msg='ok', data=propertys)
|
||||||
|
|
||||||
@ -170,7 +171,7 @@ async def load_prop_quotas(request: Request,
|
|||||||
) -> schemas.Msg:
|
) -> schemas.Msg:
|
||||||
"""事件属性 聚合条件"""
|
"""事件属性 聚合条件"""
|
||||||
|
|
||||||
event_columns = await ck.get_columns(game, 'event')
|
event_columns = await ck.get_columns(game, 'event') # 获取字段名和字段类型
|
||||||
|
|
||||||
data_attr = await crud.data_attr.find_many(db, {'game': game, 'cat': 'event'})
|
data_attr = await crud.data_attr.find_many(db, {'game': game, 'cat': 'event'})
|
||||||
data_attr = {item['name']: item for item in data_attr}
|
data_attr = {item['name']: item for item in data_attr}
|
||||||
|
@ -136,7 +136,7 @@ async def edit_data_attr(
|
|||||||
# await crud.select_map.save(db, data_in)
|
# await crud.select_map.save(db, data_in)
|
||||||
# return schemas.Msg(code=0, msg='ok', data=data_in)
|
# return schemas.Msg(code=0, msg='ok', data=data_in)
|
||||||
|
|
||||||
#在gametoos同步区服了,所以不需要这段代码
|
# 在gametoos同步区服了,但在其他事件映射时需要用到
|
||||||
@router.post("/add_select_map")
|
@router.post("/add_select_map")
|
||||||
async def add_select_map(
|
async def add_select_map(
|
||||||
request: Request,
|
request: Request,
|
||||||
@ -148,7 +148,7 @@ async def add_select_map(
|
|||||||
"""添加游戏区服信息选择映射"""
|
"""添加游戏区服信息选择映射"""
|
||||||
dfs = pd.read_excel(file, engine='openpyxl', sheet_name=None)
|
dfs = pd.read_excel(file, engine='openpyxl', sheet_name=None)
|
||||||
for attr_name, df in dfs.items():
|
for attr_name, df in dfs.items():
|
||||||
#将id这列转换成字符串类型
|
# 将id这列转换成字符串类型
|
||||||
if len(df) >0:
|
if len(df) >0:
|
||||||
df['id'] = df['id'].astype(str)
|
df['id'] = df['id'].astype(str)
|
||||||
map_ = df.to_dict('records')
|
map_ = df.to_dict('records')
|
||||||
@ -165,7 +165,7 @@ async def select_list(
|
|||||||
current_user: schemas.UserDB = Depends(deps.get_current_user)
|
current_user: schemas.UserDB = Depends(deps.get_current_user)
|
||||||
) -> schemas.Msg:
|
) -> schemas.Msg:
|
||||||
"""属性值选择映射列表"""
|
"""属性值选择映射列表"""
|
||||||
#当游戏为魔法门H5时,把game的值改为数据库中对应的值(mdb中的值和ck中的值是不一样的)
|
# 当游戏为魔法门H5时,把game的值改为数据库中对应的值(mdb中的值和ck中的值是不一样的)
|
||||||
if game == 'mfmh5':
|
if game == 'mfmh5':
|
||||||
game='mzmfmh5'
|
game='mzmfmh5'
|
||||||
resp = await crud.select_map.get_list(db, game)
|
resp = await crud.select_map.get_list(db, game)
|
||||||
@ -207,13 +207,13 @@ async def add_attr(
|
|||||||
data = await rdb.get(f'{game}_{data_in.cat}')
|
data = await rdb.get(f'{game}_{data_in.cat}')
|
||||||
data = json.loads(data)
|
data = json.loads(data)
|
||||||
if data_in.state =='add':
|
if data_in.state =='add':
|
||||||
#判断传入数据类型
|
# 判断传入数据类型
|
||||||
new_data_type=estimate_data(data_in.data_type)
|
new_data_type=estimate_data(data_in.data_type)
|
||||||
#添加数据
|
# 添加数据
|
||||||
data[data_in.new_attribute]=new_data_type
|
data[data_in.new_attribute]=new_data_type
|
||||||
else:
|
else:
|
||||||
del data[data_in.new_attribute]
|
del data[data_in.new_attribute]
|
||||||
#将字典转为字符串
|
# 将字典转为字符串
|
||||||
str_data=dict_to_str(data)
|
str_data=dict_to_str(data)
|
||||||
await rdb.set(f'{game}_{data_in.cat}',str_data)
|
await rdb.set(f'{game}_{data_in.cat}',str_data)
|
||||||
return schemas.Msg(code=0, msg='ok')
|
return schemas.Msg(code=0, msg='ok')
|
@ -27,9 +27,9 @@ async def event_list(
|
|||||||
current_user: schemas.UserDB = Depends(deps.get_current_user)
|
current_user: schemas.UserDB = Depends(deps.get_current_user)
|
||||||
) -> schemas.Msg:
|
) -> schemas.Msg:
|
||||||
"""事件列表"""
|
"""事件列表"""
|
||||||
#获取事件名
|
# 获取事件名
|
||||||
try:
|
try:
|
||||||
event_list = await ckdb.distinct(game, 'event', '#event_name')
|
event_list = await ckdb.distinct(game, 'event', '#event_name') # 获取事件名
|
||||||
# 获取事件量
|
# 获取事件量
|
||||||
event_count = await ckdb.yesterday_event_count(game)
|
event_count = await ckdb.yesterday_event_count(game)
|
||||||
event_meta = await crud.event_mana.find_many(db, {'game': game}) or {}
|
event_meta = await crud.event_mana.find_many(db, {'game': game}) or {}
|
||||||
|
@ -23,7 +23,7 @@ async def create(
|
|||||||
) -> schemas.Msg:
|
) -> schemas.Msg:
|
||||||
"""创建项目"""
|
"""创建项目"""
|
||||||
try:
|
try:
|
||||||
res_project = await crud.project.create(db, data_in, current_user=request.user)
|
res_project = await crud.project.create(db, data_in, current_user=request.user) # 创建项目
|
||||||
await crud.project_number.createxiangmu(db, data_in)
|
await crud.project_number.createxiangmu(db, data_in)
|
||||||
# 同步插入项目
|
# 同步插入项目
|
||||||
# await crud.project_number.createxiangmu(db, data_in)
|
# await crud.project_number.createxiangmu(db, data_in)
|
||||||
@ -36,7 +36,7 @@ async def create(
|
|||||||
await crud.user_url.updata_quanxian(db, schemas.Url_quanxian(user=user_url['user'], user_id=user_url['user_id'],
|
await crud.user_url.updata_quanxian(db, schemas.Url_quanxian(user=user_url['user'], user_id=user_url['user_id'],
|
||||||
game=user_url['game'],
|
game=user_url['game'],
|
||||||
quanxian_id=user_url['quanxian_id'],
|
quanxian_id=user_url['quanxian_id'],
|
||||||
quanxian=user_url['quanxian']))
|
quanxian=user_url['quanxian'])) # 配置权限
|
||||||
except pymongo.errors.DuplicateKeyError:
|
except pymongo.errors.DuplicateKeyError:
|
||||||
return schemas.Msg(code=-1, msg='项目名已存在', data='项目名已存在')
|
return schemas.Msg(code=-1, msg='项目名已存在', data='项目名已存在')
|
||||||
|
|
||||||
|
@ -181,16 +181,16 @@ async def event_model(
|
|||||||
request: Request,
|
request: Request,
|
||||||
game: str,
|
game: str,
|
||||||
data_in: schemas.CkQuery,
|
data_in: schemas.CkQuery,
|
||||||
ckdb: CKDrive = Depends(get_ck_db),
|
ckdb: CKDrive = Depends(get_ck_db), # ck 驱动
|
||||||
db: AsyncIOMotorDatabase = Depends(get_database),
|
db: AsyncIOMotorDatabase = Depends(get_database), # mongodb 驱动
|
||||||
rdb: RedisDrive = Depends(get_redis_pool),
|
rdb: RedisDrive = Depends(get_redis_pool), # redis 驱动
|
||||||
analysis: BehaviorAnalysis = Depends(BehaviorAnalysis),
|
analysis: BehaviorAnalysis = Depends(BehaviorAnalysis),
|
||||||
current_user: schemas.UserDB = Depends(deps.get_current_user)
|
current_user: schemas.UserDB = Depends(deps.get_current_user) # 登录校验
|
||||||
) -> schemas.Msg:
|
) -> schemas.Msg:
|
||||||
""" 事件分析"""
|
""" 事件分析"""
|
||||||
await analysis.init(data_where=current_user.data_where)
|
await analysis.init(data_where=current_user.data_where) # 初始化请求参数
|
||||||
try:
|
try:
|
||||||
sqls = await analysis.event_model_sql()
|
sqls = await analysis.event_model_sql() # 生成SQL,查询时间范围,日期类型
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return schemas.Msg(code=-9, msg='报表配置参数异常')
|
return schemas.Msg(code=-9, msg='报表配置参数异常')
|
||||||
res = []
|
res = []
|
||||||
@ -226,7 +226,7 @@ async def event_model(
|
|||||||
if last_value > 0:
|
if last_value > 0:
|
||||||
q['last_value'] = float(last_value)
|
q['last_value'] = float(last_value)
|
||||||
break
|
break
|
||||||
if list(item.get('event_name'))[-1] == '率':
|
if list(item.get('event_name'))[-1] == '率': # 比列类的全部加上百分比符号
|
||||||
for i in range(len(values)):
|
for i in range(len(values)):
|
||||||
values[i] = str((values[i])) + '%'
|
values[i] = str((values[i])) + '%'
|
||||||
q['values'].append(values)
|
q['values'].append(values)
|
||||||
@ -243,10 +243,10 @@ async def event_model(
|
|||||||
continue
|
continue
|
||||||
# sql语句
|
# sql语句
|
||||||
sql = item['sql']
|
sql = item['sql']
|
||||||
groupby = item['groupby']
|
groupby = item['groupby'] # 分组项
|
||||||
date_range = item['date_range'] # 获取的要查询的每一天的时间
|
date_range = item['date_range'] # 获取的要查询的每一天的时间
|
||||||
df = await ckdb.query_dataframe(sql) # 以sql语句查出数据,df是二维列表
|
df = await ckdb.query_dataframe(sql) # 以sql语句查出数据,df是二维列表
|
||||||
if item['event_name'] == '月充总额':
|
if item['event_name'] == '月充总额': # 充值总额和月充总额是单独拿出来做处理返回数据
|
||||||
date_range=df['date'].tolist()
|
date_range=df['date'].tolist()
|
||||||
q['date_range']=[str(i).split('-')[0]+'-'+str(i).split('-')[1] for i in date_range]
|
q['date_range']=[str(i).split('-')[0]+'-'+str(i).split('-')[1] for i in date_range]
|
||||||
else:
|
else:
|
||||||
@ -255,7 +255,7 @@ async def event_model(
|
|||||||
# 映射对应中文返回给前端展示
|
# 映射对应中文返回给前端展示
|
||||||
for i in groupby:
|
for i in groupby:
|
||||||
if i == 'svrindex':
|
if i == 'svrindex':
|
||||||
if game == 'mfmh5':
|
if game == 'mfmh5': # 只有mfmh5单独处理,因为同步区服的的游戏名不一样
|
||||||
game = 'mzmfmh5'
|
game = 'mzmfmh5'
|
||||||
chinese = {}
|
chinese = {}
|
||||||
resp = await crud.select_map.get_one(db, game, i)
|
resp = await crud.select_map.get_one(db, game, i)
|
||||||
@ -355,10 +355,10 @@ async def event_model(
|
|||||||
else:
|
else:
|
||||||
concat_data.append((i, group, 0))
|
concat_data.append((i, group, 0))
|
||||||
df_group = pd.concat([df_group, pd.DataFrame(concat_data, columns=df_group.columns)])
|
df_group = pd.concat([df_group, pd.DataFrame(concat_data, columns=df_group.columns)])
|
||||||
df_group.sort_values('date', inplace=True)
|
df_group.sort_values('date', inplace=True) # 按date这一列的值在原数据排序
|
||||||
q['values'].append(df_group['values'].to_list())
|
q['values'].append(df_group['values'].to_list())
|
||||||
q['sum'].append(round(float(df_group['values'].sum()), 2))
|
q['sum'].append(round(float(df_group['values'].sum()), 2)) # 求和
|
||||||
q['avg'].append(round(float(df_group['values'].mean()), 2))
|
q['avg'].append(round(float(df_group['values'].mean()), 2)) #平均数
|
||||||
for last_value in df['values'].values[::-1]:
|
for last_value in df['values'].values[::-1]:
|
||||||
if last_value > 0:
|
if last_value > 0:
|
||||||
q['last_value'] = float(last_value)
|
q['last_value'] = float(last_value)
|
||||||
@ -503,7 +503,7 @@ async def retention_model(request: Request,
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return schemas.Msg(code=-9, msg='报表配置参数异常')
|
return schemas.Msg(code=-9, msg='报表配置参数异常')
|
||||||
sql = res['sql'] # 获取到sql语句
|
sql = res['sql'] # 获取到sql语句
|
||||||
df = await ckdb.query_dataframe(sql)
|
df = await ckdb.query_dataframe(sql) # 查询数据
|
||||||
if df.empty:
|
if df.empty:
|
||||||
return schemas.Msg(code=-9, msg='无数据', data=None)
|
return schemas.Msg(code=-9, msg='无数据', data=None)
|
||||||
|
|
||||||
@ -512,12 +512,12 @@ async def retention_model(request: Request,
|
|||||||
retention_n = res['retention_n'] # 列表 int
|
retention_n = res['retention_n'] # 列表 int
|
||||||
filter_item_type = res['filter_item_type'] # all
|
filter_item_type = res['filter_item_type'] # all
|
||||||
filter_item = res['filter_item'] # 列表 0,1,3,7,14,21,30
|
filter_item = res['filter_item'] # 列表 0,1,3,7,14,21,30
|
||||||
df.set_index('reg_date', inplace=True)
|
df.set_index('reg_date', inplace=True) # 重新生成下标
|
||||||
# 补齐没有数据的日期
|
# 补齐没有数据的日期
|
||||||
for d in set(res['date_range']) - set(df.index):
|
for d in set(res['date_range']) - set(df.index):
|
||||||
df.loc[d] = 0
|
df.loc[d] = 0
|
||||||
|
|
||||||
df.sort_index(inplace=True)
|
df.sort_index(inplace=True) # 原数据排序
|
||||||
summary_values = {'均值': {}}
|
summary_values = {'均值': {}}
|
||||||
max_retention_n = 1
|
max_retention_n = 1
|
||||||
# 留存人数
|
# 留存人数
|
||||||
@ -557,11 +557,11 @@ async def retention_model(request: Request,
|
|||||||
retention_avg_dict[rn]['o_cnt0'] += rd['cnt0']
|
retention_avg_dict[rn]['o_cnt0'] += rd['cnt0']
|
||||||
retention_avg_dict[rn]['o_cntn'] += rd[f'on{rn}']
|
retention_avg_dict[rn]['o_cntn'] += rd[f'on{rn}']
|
||||||
# 算均值
|
# 算均值
|
||||||
tmp['p'] = []
|
tmp['p'] = [] # 留存率
|
||||||
tmp['n'] = []
|
tmp['n'] = [] # 留存人数
|
||||||
tmp['p_outflow'] = []
|
tmp['p_outflow'] = [] # 流失率
|
||||||
tmp['n_outflow'] = []
|
tmp['n_outflow'] = [] # 流失人数
|
||||||
tmp['d0'] = 0
|
tmp['d0'] = 0 # 总人数
|
||||||
for rt, rd in retention_avg_dict.items():
|
for rt, rd in retention_avg_dict.items():
|
||||||
tmp['d0'] = int(df['cnt0'].sum())
|
tmp['d0'] = int(df['cnt0'].sum())
|
||||||
n = round(rd['cntn'] * 100 / rd['cnt0'], 2)
|
n = round(rd['cntn'] * 100 / rd['cnt0'], 2)
|
||||||
@ -582,15 +582,15 @@ async def retention_model(request: Request,
|
|||||||
items[key].extend(['-'] * (retention_length - len(items[key])))
|
items[key].extend(['-'] * (retention_length - len(items[key])))
|
||||||
|
|
||||||
resp = {
|
resp = {
|
||||||
'summary_values': summary_values,
|
'summary_values': summary_values, # 留存/流失数据
|
||||||
# 'values': values,
|
# 'values': values,
|
||||||
'date_range': [d.strftime('%Y-%m-%d') for d in date_range],
|
'date_range': [d.strftime('%Y-%m-%d') for d in date_range], # 时间范围
|
||||||
'title': title,
|
'title': title, # 表头
|
||||||
'filter_item_type': filter_item_type,
|
'filter_item_type': filter_item_type,
|
||||||
'filter_item': filter_item,
|
'filter_item': filter_item,
|
||||||
'start_date': res['start_date'],
|
'start_date': res['start_date'], # 开始时间
|
||||||
'end_date': res['end_date'],
|
'end_date': res['end_date'], # 结束时间
|
||||||
'time_particle': res['time_particle']
|
'time_particle': res['time_particle'] # 时间类型
|
||||||
|
|
||||||
}
|
}
|
||||||
return schemas.Msg(code=0, msg='ok', data=resp)
|
return schemas.Msg(code=0, msg='ok', data=resp)
|
||||||
@ -668,13 +668,13 @@ async def retention_model(request: Request,
|
|||||||
if len(groupby_list) == 1:
|
if len(groupby_list) == 1:
|
||||||
max_v = int(df[groupby_list[0]['columnName']].max())
|
max_v = int(df[groupby_list[0]['columnName']].max())
|
||||||
min_v = int(df[groupby_list[0]['columnName']].min())
|
min_v = int(df[groupby_list[0]['columnName']].min())
|
||||||
for i in groupby:
|
for i in groupby: # 映射区服
|
||||||
if i == 'svrindex':
|
if i == 'svrindex':
|
||||||
if game == 'mfmh5':
|
if game == 'mfmh5':
|
||||||
game = 'mzmfmh5'
|
game = 'mzmfmh5'
|
||||||
chinese = {}
|
chinese = {}
|
||||||
resp = await crud.select_map.get_one(db, game, i)
|
resp = await crud.select_map.get_one(db, game, i)
|
||||||
if not resp:
|
if not resp: # 如果没有配置相关的映射数据就跳过
|
||||||
continue
|
continue
|
||||||
for ii in resp:
|
for ii in resp:
|
||||||
chinese[ii['id']] = ii['title']
|
chinese[ii['id']] = ii['title']
|
||||||
@ -682,11 +682,11 @@ async def retention_model(request: Request,
|
|||||||
# 开始映射
|
# 开始映射
|
||||||
df.loc[df['svrindex'] == k, 'svrindex'] = v
|
df.loc[df['svrindex'] == k, 'svrindex'] = v
|
||||||
times = df['reg_date'][0]
|
times = df['reg_date'][0]
|
||||||
df.set_index(groupby, inplace=True)
|
df.set_index(groupby, inplace=True) # 重新生成下标
|
||||||
# for d in set(res['date_range']) - set(df.index):
|
# for d in set(res['date_range']) - set(df.index):
|
||||||
# df.loc[d] = 0
|
# df.loc[d] = 0
|
||||||
|
|
||||||
df.sort_index(inplace=True)
|
df.sort_index(inplace=True) # 排序
|
||||||
summary_values = {'均值': {}}
|
summary_values = {'均值': {}}
|
||||||
max_retention_n = 1
|
max_retention_n = 1
|
||||||
# 留存人数
|
# 留存人数
|
||||||
@ -862,7 +862,7 @@ async def retention_model(request: Request,
|
|||||||
}
|
}
|
||||||
return schemas.Msg(code=0, msg='ok', data=resp)
|
return schemas.Msg(code=0, msg='ok', data=resp)
|
||||||
|
|
||||||
|
# 废弃
|
||||||
async def retention_model01(request: Request,
|
async def retention_model01(request: Request,
|
||||||
game: str,
|
game: str,
|
||||||
ckdb: CKDrive = Depends(get_ck_db),
|
ckdb: CKDrive = Depends(get_ck_db),
|
||||||
@ -986,7 +986,7 @@ async def retention_model_export(request: Request,
|
|||||||
export = d.to_stream()
|
export = d.to_stream()
|
||||||
return StreamingResponse(export, media_type=mime, headers={'Content-Disposition': f'filename="{file_name}"'})
|
return StreamingResponse(export, media_type=mime, headers={'Content-Disposition': f'filename="{file_name}"'})
|
||||||
|
|
||||||
|
# 弃用
|
||||||
@router.post("/retention_model_del", deprecated=True)
|
@router.post("/retention_model_del", deprecated=True)
|
||||||
async def retention_model_del(
|
async def retention_model_del(
|
||||||
request: Request,
|
request: Request,
|
||||||
@ -1095,7 +1095,7 @@ async def funnel_model(
|
|||||||
cond_level = res['cond_level']
|
cond_level = res['cond_level']
|
||||||
groupby = res['groupby']
|
groupby = res['groupby']
|
||||||
switch_test = analysis.event_view.get('switchTest', True)
|
switch_test = analysis.event_view.get('switchTest', True)
|
||||||
if switch_test:
|
if switch_test: # 界面的开关是开的走这里
|
||||||
df = await ckdb.query_dataframe(sql)
|
df = await ckdb.query_dataframe(sql)
|
||||||
if df.empty:
|
if df.empty:
|
||||||
return schemas.Msg(code=-9, msg='无数据', data=None)
|
return schemas.Msg(code=-9, msg='无数据', data=None)
|
||||||
@ -1112,7 +1112,7 @@ async def funnel_model(
|
|||||||
# df.set_index('date',inplace=True)
|
# df.set_index('date',inplace=True)
|
||||||
data_list = []
|
data_list = []
|
||||||
date_data = {}
|
date_data = {}
|
||||||
if df.shape == (0, 0):
|
if df.shape == (0, 0): # 元组的第一个元素代表行数,第二个元素代表列数,判断数据大小
|
||||||
return schemas.Msg(code=0, msg='ok', data={'list': data_list, 'level': cond_level})
|
return schemas.Msg(code=0, msg='ok', data={'list': data_list, 'level': cond_level})
|
||||||
|
|
||||||
tmp = {'title': '总体'}
|
tmp = {'title': '总体'}
|
||||||
@ -1123,7 +1123,7 @@ async def funnel_model(
|
|||||||
for i in tmp_df.index:
|
for i in tmp_df.index:
|
||||||
tmp_df.loc[i, 'values'] = tmp_df.loc[i:]['values'].sum()
|
tmp_df.loc[i, 'values'] = tmp_df.loc[i:]['values'].sum()
|
||||||
|
|
||||||
tmp['n'] = tmp_df['values'].to_list()
|
tmp['n'] = tmp_df['values'].to_list() # 取出values这一列的所有数据返回成列表
|
||||||
tmp['p1'] = [100]
|
tmp['p1'] = [100]
|
||||||
# tmp['p2'] = []
|
# tmp['p2'] = []
|
||||||
for i, v in tmp_df.loc[2:, 'values'].items():
|
for i, v in tmp_df.loc[2:, 'values'].items():
|
||||||
@ -1142,7 +1142,7 @@ async def funnel_model(
|
|||||||
tmp_df = tmp_df.groupby('level').sum()
|
tmp_df = tmp_df.groupby('level').sum()
|
||||||
tmp_df.sort_index(inplace=True)
|
tmp_df.sort_index(inplace=True)
|
||||||
for i in tmp_df.index:
|
for i in tmp_df.index:
|
||||||
tmp_df.loc[i, 'values'] = tmp_df.loc[i:]['values'].sum()
|
tmp_df.loc[i, 'values'] = tmp_df.loc[i:]['values'].sum() # 数据求和
|
||||||
|
|
||||||
tmp = dict()
|
tmp = dict()
|
||||||
|
|
||||||
@ -1161,12 +1161,12 @@ async def funnel_model(
|
|||||||
if groupby:
|
if groupby:
|
||||||
# 补齐数据
|
# 补齐数据
|
||||||
concat_data = []
|
concat_data = []
|
||||||
idx = set(df.set_index(['date'] + groupby).index)
|
idx = set(df.set_index(['date'] + groupby).index) # 下标去重
|
||||||
all_idx = {(*j, i) for i in range(1, len(cond_level) + 1) for j in idx}
|
all_idx = {(*j, i) for i in range(1, len(cond_level) + 1) for j in idx}
|
||||||
for i in all_idx - set(df.set_index(list(('date', *groupby, 'level'))).index):
|
for i in all_idx - set(df.set_index(list(('date', *groupby, 'level'))).index):
|
||||||
concat_data.append((*i, 0))
|
concat_data.append((*i, 0))
|
||||||
|
|
||||||
df = pd.concat([df, pd.DataFrame(concat_data, columns=df.columns)])
|
df = pd.concat([df, pd.DataFrame(concat_data, columns=df.columns)]) # 合并数据
|
||||||
# df.sort_values(list((*groupby, 'level')), inplace=True, ascending=False)
|
# df.sort_values(list((*groupby, 'level')), inplace=True, ascending=False)
|
||||||
# 映射对应中文返回给前端展示
|
# 映射对应中文返回给前端展示
|
||||||
for i in groupby:
|
for i in groupby:
|
||||||
@ -1184,7 +1184,7 @@ async def funnel_model(
|
|||||||
df.loc[df[i] == k, i] = v
|
df.loc[df[i] == k, i] = v
|
||||||
for key, tmp_df in df.groupby(groupby):
|
for key, tmp_df in df.groupby(groupby):
|
||||||
tmp = {'title': key}
|
tmp = {'title': key}
|
||||||
tmp_df = tmp_df.groupby('level').sum()
|
tmp_df = tmp_df.groupby('level').sum() #按level分组求和
|
||||||
tmp_df.sort_index(inplace=True)
|
tmp_df.sort_index(inplace=True)
|
||||||
for i in tmp_df.index:
|
for i in tmp_df.index:
|
||||||
tmp_df.loc[i, 'values'] = tmp_df.loc[i:]['values'].sum()
|
tmp_df.loc[i, 'values'] = tmp_df.loc[i:]['values'].sum()
|
||||||
@ -1220,14 +1220,14 @@ async def funnel_model(
|
|||||||
_ = date_data.setdefault(key[0].strftime('%Y-%m-%d'), {})
|
_ = date_data.setdefault(key[0].strftime('%Y-%m-%d'), {})
|
||||||
# [key[1]] = tmp
|
# [key[1]] = tmp
|
||||||
title = (groupby or ['总体']) + cond_level
|
title = (groupby or ['总体']) + cond_level
|
||||||
resp = {'list': data_list,
|
resp = {'list': data_list, # 数据列表
|
||||||
'date_data': date_data,
|
'date_data': date_data,
|
||||||
'title': title,
|
'title': title, # 表头
|
||||||
'level': cond_level,
|
'level': cond_level, # 所选事件
|
||||||
'switch_test': switch_test,
|
'switch_test': switch_test,
|
||||||
'start_date': res['start_date'],
|
'start_date': res['start_date'], # 开始时间
|
||||||
'end_date': res['end_date'],
|
'end_date': res['end_date'], # 结束时间
|
||||||
'time_particle': res['time_particle']
|
'time_particle': res['time_particle'] # 时间类型
|
||||||
}
|
}
|
||||||
return schemas.Msg(code=0, msg='ok', data=resp)
|
return schemas.Msg(code=0, msg='ok', data=resp)
|
||||||
else:
|
else:
|
||||||
@ -1243,8 +1243,8 @@ async def funnel_model(
|
|||||||
|
|
||||||
group_str = res['analysis']
|
group_str = res['analysis']
|
||||||
# 转int
|
# 转int
|
||||||
df[group_str] = df[group_str].astype(int)
|
df[group_str] = df[group_str].astype(int) # 声明这一列的数据类型为int
|
||||||
step_list = [str(i) for i in sorted(df[group_str].unique())]
|
step_list = [str(i) for i in sorted(df[group_str].unique())] # 唯一性去重排序
|
||||||
dict_k = {}
|
dict_k = {}
|
||||||
df['values'] = 1
|
df['values'] = 1
|
||||||
for k, nedf in df.groupby("date"):
|
for k, nedf in df.groupby("date"):
|
||||||
@ -1431,9 +1431,9 @@ async def scatter_model(
|
|||||||
res = await analysis.scatter_model_sql()
|
res = await analysis.scatter_model_sql()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return schemas.Msg(code=-9, msg='报表配置参数异常')
|
return schemas.Msg(code=-9, msg='报表配置参数异常')
|
||||||
end_date = analysis.end_date
|
end_date = analysis.end_date # 开始时间
|
||||||
start_date = analysis.start_date
|
start_date = analysis.start_date # 结束时间
|
||||||
where = analysis.events[-1]['quotaname']
|
where = analysis.events[-1]['quotaname'] # 查询条件的字段名
|
||||||
sql = res['sql']
|
sql = res['sql']
|
||||||
# columnName = analysis.events[-1]['label_id']
|
# columnName = analysis.events[-1]['label_id']
|
||||||
|
|
||||||
@ -1450,29 +1450,29 @@ async def scatter_model(
|
|||||||
if 'list' in str(type(df['values'][0])):
|
if 'list' in str(type(df['values'][0])):
|
||||||
# f=lambda x:x[0]
|
# f=lambda x:x[0]
|
||||||
# df['values']=df['values'].map(f)
|
# df['values']=df['values'].map(f)
|
||||||
df = df.explode("values").reset_index(drop=True)
|
df = df.explode("values").reset_index(drop=True) # 取字典类型数据并重置索引
|
||||||
# df['values']=df['values'].astype(str)
|
# df['values']=df['values'].astype(str)
|
||||||
df.fillna(0, inplace=True)
|
df.fillna(0, inplace=True) # 在原数据把缺失值填充为0
|
||||||
# 转换数据类型为int
|
# 转换数据类型为int
|
||||||
if analysis.events[-1].get('analysis') != 'uniqExact':
|
if analysis.events[-1].get('analysis') != 'uniqExact': # 如果不是去重数的话声明值为int类型
|
||||||
df['values'] = df['values'].astype(int)
|
df['values'] = df['values'].astype(int)
|
||||||
else:
|
else:
|
||||||
df['values'] = df['values'].astype(str) # 统一声明使用去重数的时候为str
|
df['values'] = df['values'].astype(str) # 统一声明使用去重数的时候为str
|
||||||
interval_type = res['interval_type']
|
interval_type = res['interval_type'] # 在前端的展现形式,默认/离散/自定义
|
||||||
analysi = res['analysis']
|
analysi = res['analysis']
|
||||||
groupby = res['groupby']
|
groupby = res['groupby']
|
||||||
quota_interval_arr = res['quota_interval_arr']
|
quota_interval_arr = res['quota_interval_arr']
|
||||||
# 兼容合计的
|
# 兼容合计的
|
||||||
if res['time_particle'] == 'total':
|
if res['time_particle'] == 'total': # 计算所有的数据走这里
|
||||||
df['date'] = '合计'
|
df['date'] = '合计'
|
||||||
|
|
||||||
if analysi != 'number_of_days' and interval_type != 'discrete':
|
if analysi != 'number_of_days' and interval_type != 'discrete':
|
||||||
try:
|
try:
|
||||||
max_v = int(df['values'].max())
|
max_v = int(df['values'].max()) # 数值中的最大值
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return schemas.Msg(code=-9, msg='请用离散数字', data=None)
|
return schemas.Msg(code=-9, msg='请用离散数字', data=None)
|
||||||
min_v = int(df['values'].min())
|
min_v = int(df['values'].min()) # 数值中的最小值
|
||||||
interval = (max_v - min_v) // 10 or 1
|
interval = (max_v - min_v) // 10 or 1 # 大小值之间10等分的区间数值
|
||||||
resp = {'list': dict(),
|
resp = {'list': dict(),
|
||||||
'start_date': res['start_date'],
|
'start_date': res['start_date'],
|
||||||
'end_date': res['end_date'],
|
'end_date': res['end_date'],
|
||||||
@ -1480,8 +1480,8 @@ async def scatter_model(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if not quota_interval_arr:
|
if not quota_interval_arr:
|
||||||
resp['label'] = [f'[{i},{i + interval})' for i in range(min_v, max_v, interval)]
|
resp['label'] = [f'[{i},{i + interval})' for i in range(min_v, max_v, interval)] # 表头
|
||||||
bins = [i for i in range(min_v, max_v + interval, interval)]
|
bins = [i for i in range(min_v, max_v + interval, interval)] #区间
|
||||||
else:
|
else:
|
||||||
quota_interval_arr = [-float('inf')] + quota_interval_arr + [float('inf')]
|
quota_interval_arr = [-float('inf')] + quota_interval_arr + [float('inf')]
|
||||||
resp['label'] = []
|
resp['label'] = []
|
||||||
@ -1493,16 +1493,16 @@ async def scatter_model(
|
|||||||
# 这是整体的
|
# 这是整体的
|
||||||
for key, tmp_df in df.groupby('date'):
|
for key, tmp_df in df.groupby('date'):
|
||||||
bins_s = pd.cut(tmp_df['values'], bins=bins,
|
bins_s = pd.cut(tmp_df['values'], bins=bins,
|
||||||
right=False, include_lowest=True).value_counts()
|
right=False, include_lowest=True).value_counts() # 按区间分开
|
||||||
bins_s.sort_index(inplace=True)
|
bins_s.sort_index(inplace=True) # 重新排序
|
||||||
total = int(bins_s.sum())
|
total = int(bins_s.sum()) # 算出数量
|
||||||
if res['time_particle'] == 'total':
|
if res['time_particle'] == 'total': # 合计的数据
|
||||||
resp['list']['合计'] = dict()
|
resp['list']['合计'] = dict()
|
||||||
p = list(round(bins_s * 100 / total, 2).to_list())
|
p = list(round(bins_s * 100 / total, 2).to_list())
|
||||||
resp['list']['合计']['总体'] = {'n': bins_s.to_list(), 'total': total,
|
resp['list']['合计']['总体'] = {'n': bins_s.to_list(), 'total': total,
|
||||||
'p': [str(i) + '%' for i in p],
|
'p': [str(i) + '%' for i in p],
|
||||||
'title': '总体'}
|
'title': '总体'}
|
||||||
else:
|
else: # 按时间分开算数据
|
||||||
p = list(round(bins_s * 100 / total, 2).to_list())
|
p = list(round(bins_s * 100 / total, 2).to_list())
|
||||||
resp['list'][key.strftime('%Y-%m-%d')] = dict()
|
resp['list'][key.strftime('%Y-%m-%d')] = dict()
|
||||||
resp['list'][key.strftime('%Y-%m-%d')]['总体'] = {'n': bins_s.to_list(), 'total': total,
|
resp['list'][key.strftime('%Y-%m-%d')]['总体'] = {'n': bins_s.to_list(), 'total': total,
|
||||||
@ -1523,7 +1523,7 @@ async def scatter_model(
|
|||||||
# 'title': title
|
# 'title': title
|
||||||
# }
|
# }
|
||||||
download = analysis.event_view.get('download', '')
|
download = analysis.event_view.get('download', '')
|
||||||
if download == 1:
|
if download == 1: # 下载数据
|
||||||
creat_df = create_df(resp)
|
creat_df = create_df(resp)
|
||||||
Download = Download_xlsx(creat_df, '分布分析')
|
Download = Download_xlsx(creat_df, '分布分析')
|
||||||
return Download
|
return Download
|
||||||
@ -1540,7 +1540,7 @@ async def scatter_model(
|
|||||||
labels = [str(i) for i in sorted(df['values'].unique())]
|
labels = [str(i) for i in sorted(df['values'].unique())]
|
||||||
resp['label'] = labels
|
resp['label'] = labels
|
||||||
shaixuan = analysis.events[0].get('analysis')
|
shaixuan = analysis.events[0].get('analysis')
|
||||||
for key, tmp_df in df.groupby(['date']):
|
for key, tmp_df in df.groupby(['date']): # 按日期分组
|
||||||
if shaixuan == 'uniqExact':
|
if shaixuan == 'uniqExact':
|
||||||
total = len(set(tmp_df['uid']))
|
total = len(set(tmp_df['uid']))
|
||||||
else:
|
else:
|
||||||
@ -1585,7 +1585,7 @@ async def scatter_model(
|
|||||||
number_str = str(number_int) + '%'
|
number_str = str(number_int) + '%'
|
||||||
list_p.append(number_str)
|
list_p.append(number_str)
|
||||||
resp['list'][dt] = {'总体': {'n': [labels_dict.get(i, 0) for i in labels], 'total': total,
|
resp['list'][dt] = {'总体': {'n': [labels_dict.get(i, 0) for i in labels], 'total': total,
|
||||||
'p': list_p}}
|
'p': list_p}} # 整合出数据
|
||||||
# resp['list'][dt] = {'总体': {'n': [labels_dict.get(i, 0) for i in labels], 'total': total,
|
# resp['list'][dt] = {'总体': {'n': [labels_dict.get(i, 0) for i in labels], 'total': total,
|
||||||
# 'p': [round(labels_dict.get(i, 0) * 100 / total, 2) for i in labels]}}
|
# 'p': [round(labels_dict.get(i, 0) * 100 / total, 2) for i in labels]}}
|
||||||
if where == "step_id" and event_type == "guide":
|
if where == "step_id" and event_type == "guide":
|
||||||
@ -1617,7 +1617,7 @@ async def scatter_model(
|
|||||||
# 'p': round(bins_s * 100 / total, 2).to_list(),
|
# 'p': round(bins_s * 100 / total, 2).to_list(),
|
||||||
# 'title': '总体'}
|
# 'title': '总体'}
|
||||||
|
|
||||||
|
# 疑弃用,有同类型分布分析
|
||||||
@router.post("/guide_model")
|
@router.post("/guide_model")
|
||||||
async def guide_model(
|
async def guide_model(
|
||||||
request: Request,
|
request: Request,
|
||||||
@ -1657,9 +1657,9 @@ async def guide_model(
|
|||||||
p_data = {}
|
p_data = {}
|
||||||
data = {}
|
data = {}
|
||||||
for dict_key, dict_data in dict_k.items():
|
for dict_key, dict_data in dict_k.items():
|
||||||
dict_data1 = deepcopy(dict_data)
|
dict_data1 = deepcopy(dict_data) # 深拷贝一份数据
|
||||||
dict_k1 = {int(k): v for k, v in dict_data1.items()}
|
dict_k1 = {int(k): v for k, v in dict_data1.items()}
|
||||||
sorted(dict_k1.keys())
|
sorted(dict_k1.keys()) # 按key排序
|
||||||
data_values = list(dict_k1.values())
|
data_values = list(dict_k1.values())
|
||||||
p_values = [round(i / sum(data_values), 2) or 0 for i in data_values]
|
p_values = [round(i / sum(data_values), 2) or 0 for i in data_values]
|
||||||
p_values.insert(0, dict_key)
|
p_values.insert(0, dict_key)
|
||||||
@ -1814,8 +1814,8 @@ async def scatter_model(
|
|||||||
df['values'] = df['values'].astype(str) # 统一声明使用去重数的时候为str
|
df['values'] = df['values'].astype(str) # 统一声明使用去重数的时候为str
|
||||||
interval_type = res['interval_type']
|
interval_type = res['interval_type']
|
||||||
analysi = res['analysis']
|
analysi = res['analysis']
|
||||||
groupby = res['groupby']
|
groupby = res['groupby'] # 分组项
|
||||||
true_df = df.groupby(groupby).sum()
|
true_df = df.groupby(groupby).sum() # 按分组求和
|
||||||
group_label = res['group_label']
|
group_label = res['group_label']
|
||||||
quota_interval_arr = res['quota_interval_arr']
|
quota_interval_arr = res['quota_interval_arr']
|
||||||
# 兼容合计的
|
# 兼容合计的
|
||||||
@ -2070,7 +2070,7 @@ async def trace_model_sql(
|
|||||||
event_num_dict = {}
|
event_num_dict = {}
|
||||||
event_next_event = {}
|
event_next_event = {}
|
||||||
nodes = {'流失'}
|
nodes = {'流失'}
|
||||||
name_list = analysis.events['event_namesdes']
|
name_list = analysis.events['event_namesdes'] # 参与分析的事件
|
||||||
name_dict = {}
|
name_dict = {}
|
||||||
for i in name_list:
|
for i in name_list:
|
||||||
name_dict[i['event_name']] = i['event_desc']
|
name_dict[i['event_name']] = i['event_desc']
|
||||||
@ -2126,7 +2126,7 @@ async def trace_model_sql(
|
|||||||
trail.append(i)
|
trail.append(i)
|
||||||
else:
|
else:
|
||||||
nodes.append(i)
|
nodes.append(i)
|
||||||
first.sort(reverse=True)
|
first.sort(reverse=True) # 排序
|
||||||
for i in first:
|
for i in first:
|
||||||
nodes.insert(0, i)
|
nodes.insert(0, i)
|
||||||
for i in trail:
|
for i in trail:
|
||||||
@ -2341,8 +2341,8 @@ async def user_property_model(
|
|||||||
account_id = list(df['#account_id'])
|
account_id = list(df['#account_id'])
|
||||||
new_sql = f"""select `#account_id`,`#ip`,`#distinct_id`,rmbmoney,owner_name,lv,zhanli,channel,
|
new_sql = f"""select `#account_id`,`#ip`,`#distinct_id`,rmbmoney,owner_name,lv,zhanli,channel,
|
||||||
channel,svrindex,maxmapid,name,`exp`,vip,jinbi,last_account_login_time,binduid from {game}.`user` where `#account_id` in ({account_id})"""
|
channel,svrindex,maxmapid,name,`exp`,vip,jinbi,last_account_login_time,binduid from {game}.`user` where `#account_id` in ({account_id})"""
|
||||||
df1 = await ckdb.query_dataframe(new_sql)
|
df1 = await ckdb.query_dataframe(new_sql) # 按对应用户查询数据
|
||||||
new_values = df1.values.tolist()
|
new_values = df1.values.tolist() # 把所有值按列表返回
|
||||||
for i in range(len(new_values)):
|
for i in range(len(new_values)):
|
||||||
if str(new_values[i][6]) == 'nan':
|
if str(new_values[i][6]) == 'nan':
|
||||||
new_values[i][6] = 0
|
new_values[i][6] = 0
|
||||||
|
@ -7,6 +7,7 @@ __all__ = 'select_map',
|
|||||||
|
|
||||||
|
|
||||||
class CRUDSelectMap(CRUDBase):
|
class CRUDSelectMap(CRUDBase):
|
||||||
|
# 更新数据
|
||||||
async def save(self, db: AsyncIOMotorDatabase, data_in: schemas.SelectMap):
|
async def save(self, db: AsyncIOMotorDatabase, data_in: schemas.SelectMap):
|
||||||
where = {'attr_name': data_in.attr_name, 'game': data_in.game}
|
where = {'attr_name': data_in.attr_name, 'game': data_in.game}
|
||||||
return await self.update_one(db, where, {'$set': data_in.dict(skip_defaults=True)}, upsert=True)
|
return await self.update_one(db, where, {'$set': data_in.dict(skip_defaults=True)}, upsert=True)
|
||||||
@ -20,7 +21,7 @@ class CRUDSelectMap(CRUDBase):
|
|||||||
where = {'game': game}
|
where = {'game': game}
|
||||||
res = await self.find_many(db, where, {'_id': 0})
|
res = await self.find_many(db, where, {'_id': 0})
|
||||||
return res
|
return res
|
||||||
|
# 查找一条数据,不返回_id
|
||||||
async def get_select(self, db: AsyncIOMotorDatabase, data_in: schemas.SelectAttr, game: str):
|
async def get_select(self, db: AsyncIOMotorDatabase, data_in: schemas.SelectAttr, game: str):
|
||||||
where = {'game': game, **data_in.dict()}
|
where = {'game': game, **data_in.dict()}
|
||||||
res = await self.find_one(db, where, {'_id': 0})
|
res = await self.find_one(db, where, {'_id': 0})
|
||||||
|
@ -137,7 +137,7 @@ class BehaviorAnalysis:
|
|||||||
self.data_where = []
|
self.data_where = []
|
||||||
|
|
||||||
async def init(self, *args, **kwargs):
|
async def init(self, *args, **kwargs):
|
||||||
|
# 在自动获取看板和报表信息的时候会走这里
|
||||||
if self.data_in.report_id:
|
if self.data_in.report_id:
|
||||||
db = get_database()
|
db = get_database()
|
||||||
report = await crud.report.get(db, id=self.data_in.report_id)
|
report = await crud.report.get(db, id=self.data_in.report_id)
|
||||||
@ -152,7 +152,7 @@ class BehaviorAnalysis:
|
|||||||
except:
|
except:
|
||||||
# 兼容以前的
|
# 兼容以前的
|
||||||
e_days, s_days = self.event_view['recentDay'].split('-')
|
e_days, s_days = self.event_view['recentDay'].split('-')
|
||||||
|
# 根据本地电脑时间,按时间范围自动适应当前的时间范围
|
||||||
self.event_view['endTime'] = arrow.get().shift(days=-int(e_days)).strftime('%Y-%m-%d 23:59:59')
|
self.event_view['endTime'] = arrow.get().shift(days=-int(e_days)).strftime('%Y-%m-%d 23:59:59')
|
||||||
self.event_view['startTime'] = arrow.get().shift(days=-int(s_days)).strftime('%Y-%m-%d 00:00:00')
|
self.event_view['startTime'] = arrow.get().shift(days=-int(s_days)).strftime('%Y-%m-%d 00:00:00')
|
||||||
|
|
||||||
@ -362,7 +362,7 @@ class BehaviorAnalysis:
|
|||||||
user_filters.append(and_(*user_filter))
|
user_filters.append(and_(*user_filter))
|
||||||
|
|
||||||
return event_filters, user_filters
|
return event_filters, user_filters
|
||||||
|
# 根据筛选的where条件生成对应SQL
|
||||||
async def handler_filts(self, *filters, nu=0):
|
async def handler_filts(self, *filters, nu=0):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user