1.家园玩法分析部分加上百分比显示数据
2.新增事件属性添加,修改,删除功能 3.新增用户属性添加,修改,删除功能 4.优化新手引导滞留,等级分布,活跃关卡数据不显示问题
This commit is contained in:
parent
af7091adcc
commit
365a55e528
@ -286,7 +286,12 @@ async def account_owner_list(request: Request,
|
||||
resp[-1]['owner_list'] = ','.join(item.get('ftv', []))
|
||||
break
|
||||
return schemas.Msg(code=0, msg='ok', data=resp)
|
||||
|
||||
# @router.post("/git_owner")
|
||||
# async def git_owner(request: Request,
|
||||
# game: str,
|
||||
# db: AsyncIOMotorDatabase = Depends(get_database),
|
||||
# current_user: schemas.UserDB = Depends(deps.get_current_user)) -> schemas.Msg:
|
||||
# user=await crud.user
|
||||
|
||||
@router.post("/update_account_owner")
|
||||
async def account_owner_list(request: Request,
|
||||
|
@ -12,6 +12,7 @@ from core.config import settings
|
||||
from db import get_database
|
||||
from db.ckdb import CKDrive, get_ck_db
|
||||
from db.redisdb import get_redis_pool
|
||||
from utils import estimate_data,dict_to_str
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
@ -27,7 +28,7 @@ async def read_data_attr(
|
||||
rdb: Redis = Depends(get_redis_pool),
|
||||
current_user: schemas.UserDB = Depends(deps.get_current_user)
|
||||
) -> schemas.Msg:
|
||||
"""事件属性列表"""
|
||||
"""事件属性列表或用户属性列表"""
|
||||
data = await rdb.get(f'{game}_{cat}')
|
||||
data = json.loads(data)
|
||||
res = []
|
||||
@ -130,3 +131,27 @@ async def select_attr(
|
||||
resp = await crud.select_map.get_select(db, data_in, game)
|
||||
code = 0 if resp else -9
|
||||
return schemas.Msg(code=code, msg='ok', data=resp)
|
||||
|
||||
@router.post("/add_attr")
|
||||
async def add_attr(
|
||||
request: Request,
|
||||
game: str,
|
||||
data_in: schemas.Add_attr,
|
||||
db: AsyncIOMotorDatabase = Depends(get_database),
|
||||
rdb: Redis = Depends(get_redis_pool),
|
||||
current_user: schemas.UserDB = Depends(deps.get_current_user)
|
||||
) -> schemas.Msg:
|
||||
"""添加事件属性或添加用户属性"""
|
||||
data = await rdb.get(f'{game}_{data_in.cat}')
|
||||
data = json.loads(data)
|
||||
if data_in.state =='add':
|
||||
#判断传入数据类型
|
||||
new_data_type=estimate_data(data_in.data_type)
|
||||
#添加数据
|
||||
data[data_in.new_attribute]=new_data_type
|
||||
else:
|
||||
del data[data_in.new_attribute]
|
||||
#将字典转为字符串
|
||||
str_data=dict_to_str(data)
|
||||
await rdb.set(f'{game}_{data_in.cat}',str_data)
|
||||
return schemas.Msg(code=0, msg='ok')
|
@ -27,7 +27,9 @@ async def event_list(
|
||||
current_user: schemas.UserDB = Depends(deps.get_current_user)
|
||||
) -> schemas.Msg:
|
||||
"""事件列表"""
|
||||
#获取事件名
|
||||
event_list = await ckdb.distinct(game, 'event', '#event_name')
|
||||
#获取事件量
|
||||
event_count = await ckdb.yesterday_event_count(game)
|
||||
event_meta = await crud.event_mana.find_many(db, {'game':game}) or {}
|
||||
if event_meta:
|
||||
|
@ -172,14 +172,19 @@ async def event_model(
|
||||
if item.get('combination_event'):
|
||||
combination_event = CombinationEvent(res, item.get('combination_event'), item['format'])
|
||||
values, sum_, avg = combination_event.parse()
|
||||
q['values'].append(values)
|
||||
q['sum'].append(sum_)
|
||||
|
||||
# q['values'].append(values)
|
||||
#q['sum'].append(sum_)
|
||||
q['avg'].append(avg)
|
||||
q['date_range'] = item['date_range']
|
||||
for last_value in values[::-1]:
|
||||
if last_value > 0:
|
||||
q['last_value'] = float(last_value)
|
||||
break
|
||||
for i in range(len(values)):
|
||||
values[i]=str(values[i])+'%'
|
||||
q['values'].append(values)
|
||||
q['sum'].append(str(sum_)+'%')
|
||||
res.append(q)
|
||||
continue
|
||||
#sql语句
|
||||
@ -400,10 +405,7 @@ async def retention_model(request: Request,
|
||||
}
|
||||
return schemas.Msg(code=0, msg='ok', data=resp)
|
||||
|
||||
#计算流失率
|
||||
# retention_model
|
||||
# retention_model_loss
|
||||
#@router.post("/retention_model")
|
||||
|
||||
async def retention_model01(request: Request,
|
||||
game: str,
|
||||
ckdb: CKDrive = Depends(get_ck_db),
|
||||
@ -904,6 +906,7 @@ async def scatter_model(
|
||||
) -> schemas.Msg:
|
||||
"""分布分析 模型"""
|
||||
await analysis.init(data_where=current_user.data_where)
|
||||
event_type = analysis.events[0]['eventName']
|
||||
res = await analysis.scatter_model_sql()
|
||||
sql = res['sql']
|
||||
|
||||
@ -1001,31 +1004,37 @@ async def scatter_model(
|
||||
label = str(key2)
|
||||
n = len(tmp_df2)
|
||||
labels_dict[label] = n
|
||||
|
||||
if event_type == 'pay':
|
||||
#如需要2之后所有之和,则执行下面代码,返回值为字典的labels_dict01
|
||||
labels_dict01={}
|
||||
v=-1
|
||||
for i in labels:
|
||||
v +=1
|
||||
if int(i) == 1:
|
||||
labels_dict01["1"]=labels_dict['1']
|
||||
else:
|
||||
# for number in labels_dict.keys():
|
||||
# if number >=i:
|
||||
values=list(labels_dict.values())
|
||||
n=sum(values[v:])
|
||||
labels_dict01[i]=n
|
||||
labels_dict01={}
|
||||
v=-1
|
||||
for i in labels:
|
||||
v +=1
|
||||
if int(i) == 1:
|
||||
labels_dict01["1"]=labels_dict["1"]
|
||||
else:
|
||||
# for number in labels_dict.keys():
|
||||
# if number >=i:
|
||||
values=list(labels_dict.values())
|
||||
n=sum(values[v:])
|
||||
labels_dict01[i]=n
|
||||
#传入百分比数据
|
||||
list_p=[]
|
||||
for i in labels:
|
||||
number_int=round(labels_dict01.get(i, 0) * 100 / total, 2)
|
||||
number_str=str(number_int)+'%'
|
||||
list_p.append(number_str)
|
||||
list_p=[]
|
||||
for i in labels:
|
||||
number_int=round(labels_dict01.get(i, 0) * 100 / total, 2)
|
||||
number_str=str(number_int)+'%'
|
||||
list_p.append(number_str)
|
||||
|
||||
resp['list'][dt] = {'总体': {'n': [labels_dict01.get(i, 0) for i in labels], 'total': total,
|
||||
'p': list_p}}
|
||||
# resp['list'][dt] = {'总体': {'n': [labels_dict.get(i, 0) for i in labels], 'total': total,
|
||||
# 'p': [round(labels_dict.get(i, 0) * 100 / total, 2) for i in labels]}}
|
||||
resp['list'][dt] = {'总体': {'n': [labels_dict01.get(i, 0) for i in labels], 'total': total,
|
||||
'p': list_p}}
|
||||
else:
|
||||
list_p=[]
|
||||
for i in labels:
|
||||
number_int=round(labels_dict.get(i, 0) * 100 / total, 2)
|
||||
number_str=str(number_int)+'%'
|
||||
list_p.append(number_str)
|
||||
resp['list'][dt] = {'总体': {'n': [labels_dict.get(i, 0) for i in labels], 'total': total,
|
||||
'p': [round(labels_dict.get(i, 0) * 100 / total, 2) for i in labels]}}
|
||||
return schemas.Msg(code=0, msg='ok', data=resp)
|
||||
|
||||
# bins_s = pd.cut(tmp_df['values'], bins=bins,
|
||||
@ -1177,4 +1186,4 @@ async def user_property_model(
|
||||
return schemas.Msg(code=0, msg='ok', data={
|
||||
'value': data,
|
||||
'title': title
|
||||
})
|
||||
})
|
@ -4,6 +4,7 @@ from collections import defaultdict
|
||||
import time
|
||||
from urllib.parse import quote
|
||||
import re
|
||||
from clickhouse_driver import Client
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
@ -264,4 +265,4 @@ async def ltv_model_export(request: Request,
|
||||
df_to_stream = DfToStream((df, 'ltv'))
|
||||
with df_to_stream as d:
|
||||
export = d.to_stream()
|
||||
return StreamingResponse(export, media_type=mime, headers={'Content-Disposition': f'filename="{file_name}"'})
|
||||
return StreamingResponse(export, media_type=mime, headers={'Content-Disposition': f'filename="{file_name}"'})
|
@ -31,6 +31,13 @@ class Settings(BaseSettings):
|
||||
'db': 1,
|
||||
'decode_responses': 'utf-8',
|
||||
}
|
||||
#本地Redis,测试用
|
||||
# REDIS_CONF = {
|
||||
# 'host': '127.0.0.1',
|
||||
# 'port': 6379,
|
||||
# 'db': 1,
|
||||
# 'decode_responses': 'utf-8',
|
||||
# }
|
||||
|
||||
CK_CONFIG = {'host': '139.159.159.3',
|
||||
'port': 9654,
|
||||
@ -389,7 +396,13 @@ class Debug(Settings):
|
||||
MDB_DB: str = 'xdata'
|
||||
|
||||
DATABASE_URI = f'mongodb://{MDB_USER}:{MDB_PASSWORD}@{MDB_HOST}:{MDB_PORT}/admin'
|
||||
|
||||
#本地MongoDB的库测试
|
||||
# class Debug(Settings):
|
||||
# MDB_HOST: str = '127.0.0.1'
|
||||
# MDB_PORT: int = 27017
|
||||
# MDB_DB: str = 'xdata'
|
||||
#
|
||||
# DATABASE_URI = f'mongodb://{MDB_HOST}:{MDB_PORT}/admin'
|
||||
|
||||
class Produce(Settings):
|
||||
MDB_HOST: str = '127.0.0.1'
|
||||
|
@ -72,7 +72,6 @@ class CKDrive:
|
||||
df = await self.query_dataframe(sql)
|
||||
return df.T.to_dict().values()
|
||||
|
||||
|
||||
ckdb = CKDrive()
|
||||
|
||||
|
||||
|
@ -124,8 +124,8 @@ class XAnalysis:
|
||||
|
||||
return event_filters
|
||||
|
||||
def ltv_model_sql(self):
|
||||
|
||||
def ltv_model_sql(self):
|
||||
days = (arrow.get(self.event_view['endTime']).date() - arrow.get(self.event_view['startTime']).date()).days
|
||||
quota = self.event_view['quota']
|
||||
select_ltv = []
|
||||
@ -171,7 +171,6 @@ class XAnalysis:
|
||||
qry = sa.select().where(*where_account)
|
||||
sql = str(qry.compile(compile_kwargs={"literal_binds": True}))
|
||||
where_account_str = sql.split('WHERE ')[1]
|
||||
|
||||
sql = f"""SELECT reg.date as date,
|
||||
cnt1,
|
||||
{select_ltv_str},
|
||||
@ -201,4 +200,4 @@ class XAnalysis:
|
||||
'end_date': self.event_view['endTime'][:10],
|
||||
'date_range': self.date_range,
|
||||
'ltv_n': ltv_n
|
||||
}
|
||||
}
|
@ -6,3 +6,9 @@ class DataAttrEdit(BaseModel):
|
||||
show_name: str
|
||||
is_show: bool
|
||||
cat: str
|
||||
|
||||
class Add_attr(BaseModel):
|
||||
cat: str
|
||||
new_attribute: str
|
||||
state: str
|
||||
data_type: str
|
@ -12,3 +12,26 @@ def get_bijiao(bijiao):
|
||||
return "IN"
|
||||
elif bijiao == '!=' or bijiao == 'not like' or bijiao == 'is null':
|
||||
return 'NOT LIKE'
|
||||
|
||||
|
||||
# 判断传入的数据类型
|
||||
def estimate_data(data_type):
|
||||
if data_type == 'int':
|
||||
return "Nullable(Int64)"
|
||||
elif data_type == 'ip':
|
||||
return "Nullable(DateTime('UTC'))"
|
||||
else:
|
||||
return "Nullable(String)"
|
||||
#将字典变成字符串
|
||||
def dict_to_str(dic):
|
||||
c = str()
|
||||
b = 0
|
||||
for k, v in dic.items():
|
||||
b += 1
|
||||
if b == 1:
|
||||
c += "{\"%s\":\"%s\"," % (k, v)
|
||||
elif b != len(dic):
|
||||
c += "\"%s\":\"%s\"," % (k, v)
|
||||
else:
|
||||
c += "\"%s\":\"%s\"}" % (k, v)
|
||||
return c
|
Loading…
Reference in New Issue
Block a user