1.优化分布分析分组项

2.优化分布分析显示对应埋点数据中文名
3.新增事件标签功能
This commit is contained in:
李伟 2022-05-12 16:30:48 +08:00
parent 7f96405a8c
commit 95c03737eb
6 changed files with 57 additions and 15 deletions

View File

@ -117,12 +117,13 @@ async def my_event(request: Request,
# my_data_auth = await crud.data_auth.get(db, ObjectId(data_auth_id))
# my_data_auth = my_data_auth['data']
event_show_name = await crud.event_mana.get_all_show_name(db, game)
event_show_name, event_show_label_id= await crud.event_mana.get_all_show_name(db, game)
event_list.append({'id': 'event', 'title': '全部事件', 'category': []})
for item in my_data_auth:
event_list[-1]['category'].append({
'event_name': item,
'event_desc': event_show_name.get(item, item)
'event_desc': event_show_name.get(item, item),
'event_label':event_show_label_id.get(item,'')
})
event_list[-1]['category'].append({'event_name': '*', 'event_desc': '任意事件'})
event_list.sort()

View File

@ -45,7 +45,8 @@ async def event_list(
'show_name': event_meta.get(name, {}).get('show_name', ''),
'is_show': event_meta.get(name, {}).get('is_show', True),
'desc': event_meta.get(name, {}).get('desc', ''),
'event_count': event_count.get(name, {}).get('v')
'event_count': event_count.get(name, {}).get('v'),
'label_id' : event_meta.get(name, {}).get('label_id', '')
}
)
return schemas.Msg(code=0, msg='ok', data=res)

View File

@ -1004,13 +1004,15 @@ async def scatter_model(
start_date=analysis.start_date
where=analysis.events[-1]['quotaname']
sql = res['sql']
#columnName = analysis.events[-1]['label_id']
#查询买量渠道owner为kuaiyou3的日注册玩家等级分布
# sql_list=sql.split("GROUP BY")
# sql01 = """and xiangsu.event.owner_name='kuaiyou3'GROUP BY"""""
# new_sql=sql_list[0]+sql01+sql_list[1]
# if columnName != '':
# sql = sql.replace('SELECT', f'SELECT {columnName},', 1)
# sql += f',{columnName}'
df = await ckdb.query_dataframe(sql)
if df.empty:
return schemas.Msg(code=-9, msg='无数据', data=None)
@ -1184,8 +1186,11 @@ async def scatter_model(
sql=res['sql']
columnName = analysis.event_view['groupBy'][-1]['columnName']
if analysis.event_view['groupBy'] != []:
sql=sql.replace(f'toDate(addHours({game}.event."#event_time", 8)) AS date', f'max(`{columnName}`) as va', 1)
sql=sql.replace(f'toDate(addHours({game}.event."#event_time", 8)),','',1)
if columnName != '':
sql = sql.replace(f'toDate(addHours({game}.event."#event_time", 8)) AS date', f'{columnName} as va',
1)
sql = sql.replace(f'toDate(addHours({game}.event."#event_time", 8))', columnName, 1)
df = await ckdb.query_dataframe(sql)
if df.empty:
return schemas.Msg(code=-9, msg='无数据', data=None)
@ -1205,6 +1210,7 @@ async def scatter_model(
df['date'] = '合计'
if analysi != 'number_of_days' and interval_type != 'discrete':
#默认区间
max_v = int(df['values'].max())
min_v = int(df['values'].min())
interval = (max_v - min_v) // 10 or 1
@ -1214,7 +1220,11 @@ async def scatter_model(
'time_particle': res['time_particle'],
'biaotou':columnName
}
if 'float' in str(df.dtypes['va']):
df['va'] = df['va'].astype(int)
if 'list' in str(type(df['va'][0])):
f = lambda x: x[0]
df['va'] = df['va'].map(f)
if not quota_interval_arr:
resp['label'] = [f'[{i},{i + interval})' for i in range(min_v, max_v, interval)]
bins = [i for i in range(min_v, max_v + interval, interval)]
@ -1225,16 +1235,16 @@ async def scatter_model(
for i, v in enumerate(quota_interval_arr[1:]):
resp['label'].append(f'[{quota_interval_arr[i]},{v})')
bins.append(v)
if 'float' in str(df.dtypes['va']):
df['va'] = df['va'].astype(int)
if 'list' in str(type(df['va'][0])):
f = lambda x: x[0]
df['va'] = df['va'].map(f)
# if 'float' in str(df.dtypes['va']):
# df['va'] = df['va'].astype(int)
# if 'list' in str(type(df['va'][0])):
# f = lambda x: x[0]
# df['va'] = df['va'].map(f)
# 这是分组的
for key, tmp_df in df.groupby('va'):
bins_s = pd.cut(tmp_df['values'], bins=bins,
right=False).value_counts()
right=True,include_lowest=True).value_counts()
bins_s.sort_index(inplace=True)
total = int(bins_s.sum())
if res['time_particle'] == 'total':
@ -1248,6 +1258,17 @@ async def scatter_model(
for i in range(len(p)):
if str(p[i]) == 'nan':
p[i] = 0
#映射对应的埋点数据
re = await crud.select_map.get_list(db, game)
re_list=[i['attr_name'] for i in re]
if columnName in re_list:
for i in re:
if columnName == i['attr_name']:
for datas in i['map_']:
if key == datas['id']:
key=datas['title']
break
break
if 'time' not in columnName:
resp['list'][key] = dict()
resp['list'][key] = {'n': bins_s.to_list(), 'total': total,
@ -1266,6 +1287,7 @@ async def scatter_model(
return Download
return schemas.Msg(code=0, msg='ok', data=resp)
else:
#离散数字
resp = {'list': {}, 'label': [],
'start_date': res['start_date'],
'end_date': res['end_date'],
@ -1282,6 +1304,17 @@ async def scatter_model(
if res['time_particle'] == 'total':
dt = '合计'
else:
#映射对应的埋点数据
re = await crud.select_map.get_list(db, game)
re_list=[i['attr_name'] for i in re]
if columnName in re_list:
for i in re:
if columnName == i['attr_name']:
for datas in i['map_']:
if key == datas['id']:
key=datas['title']
break
break
dt = key
#dt = key.strftime('%Y-%m-%d')
#dt='合计'

View File

@ -22,9 +22,11 @@ class CRUDEventMap(CRUDBase):
async def get_all_show_name(self, db: AsyncIOMotorDatabase, game: str):
cursor = self.find(db, {'game': game})
res = {}
res1= {}
async for item in self.to_list(cursor):
res[item['event_name']] = item['show_name']
return res
res1[item['event_name']] = item.get('label_id','')
return res,res1
async def create_index(self, db: AsyncIOMotorDatabase):
await db[self.coll_name].create_index(

View File

@ -679,6 +679,10 @@ ORDER BY level
.where(and_(*where)) \
.group_by(event_date_col,e_account_id_col)
sql = str(qry.compile(compile_kwargs={"literal_binds": True}))
columnName = event['label_id']
if columnName != '':
sql = sql.replace('SELECT', f'SELECT {columnName},', 1)
sql += f',{columnName}'
print(sql)
return {
'sql': sql,

View File

@ -6,3 +6,4 @@ class EventMateEdit(BaseModel):
show_name: str
is_show: bool
desc: str
label_id:str