From b461ec87f451ccc6303440353af636ff4cb89232 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E4=BC=9F?= <250213850@qq.com> Date: Fri, 24 Dec 2021 10:00:54 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=98=BE=E7=A4=BA11=E6=9C=88?= =?UTF-8?q?23=E5=8F=B7=E4=B9=8B=E5=89=8D=E7=9A=84=E6=95=B0=E6=8D=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- api/api_v1/endpoints/xquery.py | 39 ++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/api/api_v1/endpoints/xquery.py b/api/api_v1/endpoints/xquery.py index 023d8a9..3b8cb46 100644 --- a/api/api_v1/endpoints/xquery.py +++ b/api/api_v1/endpoints/xquery.py @@ -3,7 +3,7 @@ import mimetypes from collections import defaultdict import time from urllib.parse import quote - +import re import pandas as pd import numpy as np from fastapi import APIRouter, Depends, Request @@ -75,7 +75,42 @@ async def ltv_model_sql( split_="""AND 1 """ news_sql = split_sql[0] + split_+new_sql + split_sql[1] + split_+new_sql+ split_sql[2]+split_+split_sql[3] df = await ckdb.query_dataframe(news_sql) - + # 判断11月23号之前的数据 + list_data_range=analysis.date_range + liststr_data_range=[] + for i in list_data_range: + liststr_data_range.append(str(i)) + quota = analysis.event_view['quota'] + #判断是设备LTV则执行下面代码,如是角色实充LTV则不执行 + if quota == '#distinct_id': + if '2021-11-22' in liststr_data_range or '2021-11-22' >=liststr_data_range[-1]: + #取搜索最后为11.23号之前的数据 + if '2021-11-22' >=liststr_data_range[-1]: + news_sql="""""" + split_sql=sql.split('AND is_new_device = 1') + new_sql=split_sql[0]+split_sql[1]+split_sql[2] + news_sql+=new_sql + df_twenty_three=await ckdb.query_dataframe(news_sql) + #取包含有11.23号之前和23号之后的那一段 + else: + start_date=str(list_data_range[0]) + end_date='2021-11-22' + news_sql = """""" + split_sql = sql.split('AND is_new_device = 1') + for i in split_sql: + news_sql += i + #用正则表达式切时间 + zhengze_time=r'\d{4}-\d{1,2}-\d{1,2}' + zhengze_sql=re.split(zhengze_time,news_sql) + zz_new_sql=zhengze_sql[0]+start_date+zhengze_sql[1]+end_date+zhengze_sql[2]+start_date+zhengze_sql[3]+end_date+zhengze_sql[4] + zz_news_sql="""""" + zz_news_sql+=zz_new_sql + df_twenty_three = await ckdb.query_dataframe(zz_news_sql) + #上下合并两组数据,忽略以前的索引下标 + df= pd.concat([df,df_twenty_three], axis=0, ignore_index=True) + df.sort_values('date', inplace=True) + #去重 + #df.drop_duplicates(inplace=True) quota = res['quota'] #字段名 ltv_n = res['ltv_n'] #df = await ckdb.query_dataframe(sql)