1
This commit is contained in:
parent
779369ecf6
commit
28ccf54d65
@ -178,7 +178,7 @@ async def load_prop_quotas(request: Request,
|
||||
'id': item['name'],
|
||||
'data_type': data_type,
|
||||
'title': title,
|
||||
'category': settings.CK_OPERATOR.get(data_type) or []
|
||||
# 'category': settings.CK_OPERATOR.get(data_type) or []
|
||||
}
|
||||
event_props.append(event_prop)
|
||||
if data_in.model == 'scatter':
|
||||
@ -188,21 +188,18 @@ async def load_prop_quotas(request: Request,
|
||||
"data_type": None,
|
||||
"analysis": "times",
|
||||
"title": "次数",
|
||||
"category": []
|
||||
},
|
||||
{
|
||||
"id": "*",
|
||||
"data_type": None,
|
||||
"analysis": "number_of_days",
|
||||
"title": "天数",
|
||||
"category": []
|
||||
},
|
||||
{
|
||||
"id": "*",
|
||||
"data_type": None,
|
||||
"analysis": "number_of_hours",
|
||||
"title": "小时数",
|
||||
"category": []
|
||||
},
|
||||
]
|
||||
else:
|
||||
@ -212,21 +209,18 @@ async def load_prop_quotas(request: Request,
|
||||
"data_type": None,
|
||||
"analysis": "total_count",
|
||||
"title": "总次数",
|
||||
"category": []
|
||||
},
|
||||
{
|
||||
"id": "*",
|
||||
"analysis": "touch_user_count",
|
||||
"data_type": None,
|
||||
"title": "触发用户数",
|
||||
"category": []
|
||||
},
|
||||
{
|
||||
"id": "*",
|
||||
"analysis": "touch_user_avg",
|
||||
"data_type": None,
|
||||
"title": "人均次数",
|
||||
"category": []
|
||||
},
|
||||
]
|
||||
|
||||
@ -263,7 +257,6 @@ async def load_filter_props(request: Request,
|
||||
'id': item['name'],
|
||||
'data_type': data_type,
|
||||
'title': title,
|
||||
'category': settings.CK_FILTER.get(data_type) or []
|
||||
}
|
||||
event_props.append(event_prop)
|
||||
|
||||
@ -277,7 +270,6 @@ async def load_filter_props(request: Request,
|
||||
'id': item['name'],
|
||||
'data_type': data_type,
|
||||
'title': title,
|
||||
'category': settings.CK_FILTER.get(data_type) or []
|
||||
}
|
||||
user_props.append(user_prop)
|
||||
|
||||
|
@ -136,8 +136,8 @@ async def event_model(
|
||||
# df_group.reset_index(drop=True, inplace=True)
|
||||
q['groups'].append(groupby)
|
||||
q['values'].append(df['values'].to_list())
|
||||
q['sum'].append(round(float(df['values'].sum()),2))
|
||||
q['avg'].append(round(float(df['values'].mean()),2))
|
||||
q['sum'].append(round(float(df['values'].sum()), 2))
|
||||
q['avg'].append(round(float(df['values'].mean()), 2))
|
||||
for last_value in df['values'].values[::-1]:
|
||||
if last_value > 0:
|
||||
q['last_value'] = float(last_value)
|
||||
@ -165,8 +165,8 @@ async def event_model(
|
||||
df_group = pd.concat([df_group, pd.DataFrame(concat_data, columns=df_group.columns)])
|
||||
df_group.sort_values('date', inplace=True)
|
||||
q['values'].append(df_group['values'].to_list())
|
||||
q['sum'].append(round(float(df_group['values'].sum()),2))
|
||||
q['avg'].append(round(float(df_group['values'].mean()),2))
|
||||
q['sum'].append(round(float(df_group['values'].sum()), 2))
|
||||
q['avg'].append(round(float(df_group['values'].mean()), 2))
|
||||
for last_value in df['values'].values[::-1]:
|
||||
if last_value > 0:
|
||||
q['last_value'] = float(last_value)
|
||||
@ -188,8 +188,8 @@ async def event_model(
|
||||
if last_value > 0:
|
||||
q['last_value'] = float(last_value)
|
||||
break
|
||||
q['sum'].append(round(float(df['values'].sum()),2))
|
||||
q['avg'].append(round(float(df['values'].mean()),2))
|
||||
q['sum'].append(round(float(df['values'].sum()), 2))
|
||||
q['avg'].append(round(float(df['values'].mean()), 2))
|
||||
if item['time_particle'] in ('P1D', 'P1W'):
|
||||
q['date_range'] = [d.strftime('%Y-%m-%d') for d in q['date_range']]
|
||||
elif item['time_particle'] in ('P1M',):
|
||||
@ -198,6 +198,15 @@ async def event_model(
|
||||
q['date_range'] = [d.strftime('%Y-%m-%d %H:%M:%S') for d in q['date_range']]
|
||||
# q['eventNameDisplay']=item['event_name_display']
|
||||
res.append(q)
|
||||
# 按总和排序
|
||||
for item in res:
|
||||
sort_key = np.argsort(np.array(item['sum']))[::-1]
|
||||
if item.get('groups'):
|
||||
item['groups'] = np.array(item['groups'])[sort_key].tolist()
|
||||
item['values'] = np.array(item['values'])[sort_key].tolist()
|
||||
item['sum'] = np.array(item['sum'])[sort_key].tolist()
|
||||
item['avg'] = np.array(item['avg'])[sort_key].tolist()
|
||||
|
||||
return schemas.Msg(code=0, msg='ok', data=res)
|
||||
|
||||
|
||||
@ -267,13 +276,34 @@ async def retention_model(request: Request,
|
||||
'days': days,
|
||||
'date_range': [d.strftime('%Y-%m-%d') for d in date_range][:unit_num + 1],
|
||||
'title': title,
|
||||
'filter_item_type':filter_item_type,
|
||||
'filter_item':filter_item,
|
||||
'filter_item_type': filter_item_type,
|
||||
'filter_item': filter_item,
|
||||
|
||||
}
|
||||
return schemas.Msg(code=0, msg='ok', data=resp)
|
||||
|
||||
|
||||
@router.post("/retention_model_export")
|
||||
async def retention_model_export(request: Request,
|
||||
game: str,
|
||||
ckdb: CKDrive = Depends(get_ck_db),
|
||||
analysis: BehaviorAnalysis = Depends(BehaviorAnalysis),
|
||||
current_user: schemas.UserDB = Depends(deps.get_current_user)
|
||||
):
|
||||
""" 留存分析模型 数据导出"""
|
||||
await analysis.init()
|
||||
data = analysis.retention_model_sql2()
|
||||
file_name = quote(f'留存分析.xlsx')
|
||||
mime = mimetypes.guess_type(file_name)[0]
|
||||
|
||||
sql = data['sql']
|
||||
df = await ckdb.query_dataframe(sql)
|
||||
df_to_stream = DfToStream((df, '留存分析'))
|
||||
with df_to_stream as d:
|
||||
export = d.to_stream()
|
||||
return StreamingResponse(export, media_type=mime, headers={'Content-Disposition': f'filename="{file_name}"'})
|
||||
|
||||
|
||||
@router.post("/retention_model_del", deprecated=True)
|
||||
async def retention_model_del(
|
||||
request: Request,
|
||||
|
@ -1,9 +1,12 @@
|
||||
import mimetypes
|
||||
from collections import defaultdict
|
||||
from urllib.parse import quote
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from motor.motor_asyncio import AsyncIOMotorDatabase
|
||||
from starlette.responses import StreamingResponse
|
||||
|
||||
import crud, schemas
|
||||
from common import *
|
||||
@ -16,6 +19,7 @@ from db.redisdb import get_redis_pool, RedisDrive
|
||||
from models.behavior_analysis import BehaviorAnalysis
|
||||
from models.user_analysis import UserAnalysis
|
||||
from models.x_analysis import XAnalysis
|
||||
from utils import DfToStream
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
@ -60,3 +64,24 @@ async def ltv_model_sql(
|
||||
}
|
||||
|
||||
return schemas.Msg(code=0, msg='ok', data=data)
|
||||
|
||||
|
||||
@router.post("/ltv_model_export")
|
||||
async def ltv_model_export(request: Request,
|
||||
game: str,
|
||||
ckdb: CKDrive = Depends(get_ck_db),
|
||||
analysis: XAnalysis = Depends(XAnalysis),
|
||||
current_user: schemas.UserDB = Depends(deps.get_current_user)
|
||||
):
|
||||
""" ltv分析 数据导出"""
|
||||
await analysis.init()
|
||||
data = analysis.ltv_model_sql()
|
||||
file_name = quote(f'lvt.xlsx')
|
||||
mime = mimetypes.guess_type(file_name)[0]
|
||||
|
||||
sql = data['sql']
|
||||
df = await ckdb.query_dataframe(sql)
|
||||
df_to_stream = DfToStream((df, 'ltv'))
|
||||
with df_to_stream as d:
|
||||
export = d.to_stream()
|
||||
return StreamingResponse(export, media_type=mime, headers={'Content-Disposition': f'filename="{file_name}"'})
|
||||
|
@ -354,7 +354,7 @@ class BehaviorAnalysis:
|
||||
qry = qry.order_by(sa.Column('date'))
|
||||
else:
|
||||
qry = qry.order_by(sa.Column('values').desc())
|
||||
qry = qry.limit(1000)
|
||||
qry = qry.limit(10000)
|
||||
|
||||
sql = str(qry.compile(compile_kwargs={"literal_binds": True}))
|
||||
print(sql)
|
||||
@ -592,8 +592,8 @@ ORDER BY values desc"""
|
||||
}
|
||||
|
||||
def retention_model_sql2(self):
|
||||
filter_item_type = self.event_view.get('filter-item-type')
|
||||
filter_item = self.event_view.get('filter-item')
|
||||
filter_item_type = self.event_view.get('filter_item_type')
|
||||
filter_item = self.event_view.get('filter_item')
|
||||
event_name_a = self.events[0]['eventName']
|
||||
event_name_b = self.events[1]['eventName']
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user