增加单事件漏斗

This commit is contained in:
Àî×ÚÕñ 2022-08-18 19:13:55 +08:00
parent 8eb8da852c
commit f02ea6dd18
2 changed files with 217 additions and 26 deletions

View File

@ -1520,6 +1520,58 @@ async def scatter_model(
# 'title': '总体'}
@router.post("/guide_model")
async def guide_model(
request: Request,
game: str,
ckdb: CKDrive = Depends(get_ck_db),
db: AsyncIOMotorDatabase = Depends(get_database),
analysis: BehaviorAnalysis = Depends(BehaviorAnalysis),
current_user: schemas.UserDB = Depends(deps.get_current_user)
) -> schemas.Msg:
"""分布分析 模型"""
await analysis.init(data_where=current_user.data_where)
event_type = analysis.events[0]['eventName']
try:
res = await analysis.guide_model_sql()
except Exception as e:
return schemas.Msg(code=-9, msg='报表配置参数异常')
sql = res['sql']
df = await ckdb.query_dataframe(sql)
if df.empty:
return schemas.Msg(code=-9, msg='无数据', data=None)
group_str = res['analysis']
# 转int
df[group_str] = df[group_str].astype(int)
step_list = [str(i) for i in sorted(df[group_str].unique())]
dict_k = {}
for k, nedf in df.groupby("date"):
ste_k = {}
for kk, ste_df in nedf.groupby(group_str):
ste_df.reset_index(drop=True, inplace=True)
ste_k[str(kk)] = int(ste_df['values'][0])
for ste in step_list:
if ste not in list(ste_k.keys()):
ste_k[ste] = 0
dict_k[str(k)] = ste_k
data = {}
for dict_key, dict_data in dict_k.items():
dict_data1 = deepcopy(dict_data)
dict_k1 = {int(k): v for k, v in dict_data1.items()}
sorted(dict_k1.keys())
data_values = list(dict_k1.values())
data_values.insert(0, dict_key)
data[dict_key] = data_values
step_list.insert(0, '日期')
res_msg = {
'level': step_list,
'list': data
}
return schemas.Msg(code=0, msg='ok', data=res_msg)
@router.post("/scatter_model_details")
async def scatter_model(
request: Request,

View File

@ -873,7 +873,8 @@ ORDER BY level
if analysis in ['number_of_days', 'number_of_hours']:
values_col = func.count(func.distinct(e_account_id_col)).label('values')
if analysis in ['times', 'number_of_days', 'number_of_hours', 'sum', 'avg', 'median', 'max', 'min', 'distinct_count']:
if analysis in ['times', 'number_of_days', 'number_of_hours', 'sum', 'avg', 'median', 'max', 'min',
'distinct_count']:
if self.time_particle == 'total':
qry = sa.select(*self.groupby, values_col) \
.where(and_(*where)) \
@ -940,6 +941,144 @@ ORDER BY level
'end_date': self.end_date[:10],
}
async def guide_model_sql(self):
# 事件步骤生成sql
event = self.events[0]
event_name = event['eventName']
analysis = event['analysis']
if analysis in ['list_distinct', "set_distinct", "ele_distinct"]:
analysis = 'max'
e_account_id_col = getattr(self.event_tbl.c, '#account_id').label('uid')
u_account_id_col = getattr(self.user_tbl.c, '#account_id')
event_name_col = getattr(self.event_tbl.c, '#event_name')
event_time_col = getattr(self.event_tbl.c, '#event_time').label('date')
event_date_col = settings.TIME_GRAIN_EXPRESSIONS[self.time_particle](event_time_col, self.zone_time)
quota_interval_arr = event.get('quotaIntervalArr')
time = self.data_in.time
global where
# 判断是分布分析里面的分组详情改时间范围其他情况都走else
if time != None and time != '合计':
timeParticleSize = self.event_view.get('timeParticleSize') # 筛选是按周,按月,合计等情况,用不同的时间
if timeParticleSize == 'P1W': # 按周
start_date, end_date = get_week(time)
if start_date < strptime(self.start_date): # 开头的时间
where = [
func.addHours(event_time_col, self.zone_time) >= self.start_date,
func.addHours(event_time_col, self.zone_time) <= end_date,
]
elif end_date < strptime(self.end_date): # 中间的时间
where = [
func.addHours(event_time_col, self.zone_time) >= start_date,
func.addHours(event_time_col, self.zone_time) <= end_date, ]
else: # 结尾的时间
where = [
func.addHours(event_time_col, self.zone_time) >= start_date,
func.addHours(event_time_col, self.zone_time) <= self.end_date, ]
elif timeParticleSize == 'P1M': # 按月
start_date, end_date = start_end_month(time)
if strptime(self.start_date) > strptime1(time):
where = [
func.addHours(event_time_col, self.zone_time) >= self.start_date,
func.addHours(event_time_col, self.zone_time) <= end_date,
]
else:
where = [
func.addHours(event_time_col, self.zone_time) >= start_date,
func.addHours(event_time_col, self.zone_time) <= self.end_date,
]
else:
where = [
func.addHours(event_time_col, self.zone_time) >= self.start_date,
func.addHours(event_time_col, self.zone_time) <= self.end_date, ]
else:
where = [
func.addHours(event_time_col, self.zone_time) >= self.start_date,
func.addHours(event_time_col, self.zone_time) <= self.end_date,
]
if event_name != '*':
where.append(event_name_col == event_name)
event_filter, user_filter = await self.handler_filts((event['filts'], event.get('relation', 'and')),
(self.global_filters, self.global_relation)
, self.ext_filters)
if user_filter:
where.append(e_account_id_col.in_(sa.select(u_account_id_col).where(*user_filter)))
where.extend(event_filter)
values_col = func.count().label('values')
if analysis in ['number_of_days', 'number_of_hours']:
values_col = func.count(func.distinct(e_account_id_col)).label('values')
if analysis:
if self.time_particle == 'total':
qry = sa.select(*self.groupby, analysis, values_col) \
.where(and_(*where)) \
.group_by(*self.groupby, analysis, e_account_id_col)
else:
qry = sa.select(event_date_col, *self.groupby, values_col) \
.where(and_(*where)) \
.group_by(event_date_col, *self.groupby, e_account_id_col)
sql = str(qry.compile(compile_kwargs={"literal_binds": True}))
print(sql)
sqla = sql.replace('SELECT', f'SELECT {analysis}, ', 1)
sqlb = sqla.replace('GROUP BY', f'GROUP BY {analysis}, ', 1)
sqlc = sqlb.replace('WHERE', f'WHERE {analysis} is not null AND ', 1)
print(sqlc)
return {
'sql': sqlc,
'group_label': self.group_label,
'interval_type': event['intervalType'],
'analysis': analysis,
'quota_interval_arr': quota_interval_arr,
'groupby': [i.key for i in self.groupby],
'time_particle': self.time_particle,
'start_date': self.start_date[:10],
'end_date': self.end_date[:10],
}
elif event.get('quota'):
event_attr_col = getattr(self.event_tbl.c, event['quota'])
if self.time_particle == 'total':
if analysis == 'uniqExact':
# 去重数 合计
qry = sa.select(e_account_id_col,
event_attr_col.label('values')) \
.where(and_(*where)) \
.group_by(*self.groupby, e_account_id_col, event_attr_col)
else:
qry = sa.select(e_account_id_col,
settings.CK_FUNC[analysis](event_attr_col).label('values')) \
.where(and_(*where)) \
.group_by(*self.groupby, e_account_id_col)
else:
if analysis == 'uniqExact':
# 去重数
qry = sa.select(event_date_col, e_account_id_col,
event_attr_col.label('values')) \
.where(and_(*where)) \
.group_by(event_date_col, e_account_id_col, event_attr_col)
else:
qry = sa.select(event_date_col, e_account_id_col,
settings.CK_FUNC[analysis](event_attr_col).label('values')) \
.where(and_(*where)) \
.group_by(event_date_col, e_account_id_col)
sql = str(qry.compile(compile_kwargs={"literal_binds": True}))
columnName = event.get('label_id', '')
if columnName != '':
sql = sql.replace('SELECT', f'SELECT {columnName},', 1)
sql += f',{columnName}'
print(sql)
return {
'sql': sql,
'group_label': self.group_label,
'interval_type': event['intervalType'],
'analysis': analysis,
'quota_interval_arr': quota_interval_arr,
'groupby': [i.key for i in self.groupby],
'time_particle': self.time_particle,
'start_date': self.start_date[:10],
'end_date': self.end_date[:10],
}
async def trace_model_sql(self):
# 路径分析生成SQL
session_interval = self.event_view.get('session_interval')