#1.23
sqlplus "/ as sysdba"
set lines 200 pages 1000
col state for a18
col username for a11
col machine for a15
col event for a30 trunc
col sql_text for a45
col spid for a8
col sql_id for a13
col logon_time for a19
col seconds for 99999
col sid for 9999
col blk_sid for 9999
col blk_inst for 99
col program for a23 trunc
select * from (select sid, username,event,sql_id,final_blocking_instance blk_inst,
final_blocking_session blk_sid, program,to_char(logon_time, 'yy-mm-dd hh24:mi:ss')
logon_time, seconds_in_wait seconds,state,wait_time from v$session where (wait_class <> 'idle')
or state <> 'waiting' order by seconds_in_wait desc, event)
where sid <> (select sid from v$mystat where rownum < 2) and rownum < 201;
col sql_text for a80
select sql_text from v$sqltext where sql_id='&sql_id' order by piece;
set linesize 200 pagesize 1000
col column_name for a30 trunc
select * from table(dbms_xplan.display_cursor('&sql_id',null,'advanced'));
set long 1000000
set longchunksize 1000000
set lin 300
set pages 1000
set trim on
set trimspool on
set echo off
set feedback off
select dbms_sqltune.report_sql_monitor(sql_id=>'&sqlid',type=>'text',report_level=>'all') mon_rpt from dual;
select * from table(dbms_xplan.display_awr('&sqlid'));
alter session set statistics_level=all;
select count(0) from t1;
select * from table(dbms_xplan.display_cursor(null,null,'all allstats last outline'));
--根据os pid 查看会话
set lines 170 pages 1000
col username for a11
col machine for a15
col program for a32
col event for a30
select sid,serial#,username,to_char(logon_time,'yyyy-mm-dd hh24:mi:ss')logon_time,program,machine,event,sql_id from v$session where paddr=(select addr from v$process where spid=&pid);
--查看非空闲等待事件
set line 150 pages 100
col event for a40
col p1text for a15
col p2text for a15
col p3text for a20
select * from (select sid, event, p1text, p1, p2text,p2, p3text, p3, seconds_in_wait seconds from v$session_wait where wait_class <> 'idle' and sid <>(select sid from v$mystat where rownum<2) order by seconds_in_wait desc,event) where rownum<201;
--查被阻塞会话
set lin 200 pages 1000
col username for a10
col program for a40
col event for a30
col waiting_session for a20
with tkf_block_info as
(select a.inst_id || '_' || a.sid waiting_session,
a.username, a.program, a.event, a.sql_id, a.last_call_et,
decode(a.blocking_instance || '_' || a.blocking_session,
'_', null, a.blocking_instance || '_' || a.blocking_session) holding_session
from gv$session a,
(select inst_id, sid
from gv$session
where blocking_session is not null
union
select blocking_instance, blocking_session
from gv$session
where blocking_session is not null) b
where a.inst_id = b.inst_id
and a.sid = b.sid)
select lpad(' ', 3 * (level - 1)) || waiting_session waiting_session,
username, program, event, sql_id, last_call_et
from tkf_block_info
connect by prior waiting_session = holding_session
start with holding_session is null;
select 'blocker(' || lb.sid || ':' || sb.username || ')-sql:' || qb.sql_text blockers,
'waiter (' || lw.sid || ':' || sw.username || ')-sql:' || qw.sql_text waiters
from v$lock lb, v$lock lw, v$session sb, v$session sw, v$sql qb, v$sql qw
where lb.sid = sb.sid
and lw.sid = sw.sid
and sb.prev_sql_addr = qb.address
and sw.sql_address = qw.address
and lb.id1 = lw.id1
and sw.lockwait is not null
and sb.lockwait is null
and lb.block = 1;
--阻塞会话的详细信息
set lines 170 pages 1000
col username for a15
col machine for a15
col program for a35
col event for a30
col sql_text for a45
col spid for a8
select sid,spid,a.username,a.program,machine,event,sql_id,to_char(logon_time,'yyyy-mm-dd hh24:mi:ss')logon_time from v$session a,v$process c where c.addr=a.paddr and sid=&session_id;
col sql_text for a80
select sql_text from v$sqltext where sql_id='&sql_id' order by piece;
--查看历史语句
select dbms_lob.substr(sql_text,2000,1) txt from dba_hist_sqltext where sql_id='&sql_id';
-- 看语句耗费
set lines 180 pages 1000
col sql_text for a50
col cpu_time for 99999999999999
col elapsed_time for 999999999999
select distinct a.sql_id, elapsed_time,cpu_time,executions,disk_reads,buffer_gets,fetches,substr(sql_text,1,50)sql_text,round(elapsed_time/decode(executions,0,1,executions)) second_per from v$sqlarea a,v$session b where a.sql_id=b.sql_id and b.status='active' order by second_per desc;
-- kill sid
set lin 200 pages 100
col sql_text for a55
col ses_stat for a8
col username for a10
select 'alter system kill session '''||a.sid||','||a.serial#||''' immediate;' sql_text
from v$session a where 1=1 and username='sscs' and (event='sql*net message to client' or event ='latch free' );
select 'alter system kill session '''||a.sid||','||a.serial#||''' immediate;' sql_text
,a.username,b.start_time,b.start_scnb,b.xidusn,b.used_urec,b.used_ublk,a.status ses_stat,a.sql_id
from v$session a ,v$transaction b where a.saddr=b.ses_addr
and username='sscs' and event='sql*net message from client' ;
-- a.sid=2872;
--需绑定变量语句
set lines 150
set pages 100
col txt for a65
select substr(sql_text, 1, 60) txt, count(0)
from v$sqlarea
group by substr(sql_text, 1, 60)
having count(substr(sql_text, 1, 60)) > 50
order by 2 desc;
select dbms_sqltune.report_sql_detail( report_level=>'all',type=>'text') mon_rpt from dual;
set timing on
begin
dbms_stats.gather_table_stats(
ownname =>'&owner',
tabname => '&table_name',
no_invalidate => false,
estimate_percent => 3,
degree => 16,
cascade => true
);
end;
/
-- no_invalidate 强制相关游标变为无效,下次执行时重新生成执行计划,过度集中使用会导致硬解析问题
sqlplus "/as sysdba"
set lines 200 pages 100
col txt for a65
select a.sql_id,a.cnt,a.pctload,b.sql_text txt from (select * from (
select sql_id,count(0) cnt,round(count(0)/sum(count(0)) over(),4)*100 pctload
from v$active_session_history a
where a.sample_time>sysdate-5/60/24
and sql_id is not null
group by sql_id
order by count(0) desc)
where rownum<11) a left join (select distinct sql_text,sql_id from v$sqltext where piece=0) b on a.sql_id=b.sql_id order by 2 desc ,1;
过滤空行和注释
grep -ev "^$|[#;]" aaa.txt
cpu使用率最高的前10个进程(linux)
ps aux --sort=-%cpu|grep -m 11 -v `whoami`
--占用cpu最多的进程
ps -ef|sort -rn 7 |head -n 18
cpu使用率最高的前10个进程(aix)
ps aux |head -1 ; ps aux|sort -rn 2|head -20
占用内存最高的前10个进程(linux)
ps aux|head -1;ps aux|grep -v pid|sort -rn -k 4|head
占用内存最高的20个进程(aix)
ps aux |head -1 ; ps aux|sort -rn 4 |head -20
使用内存最多的10个进程
svmon -up -t 10|grep -p pid|grep '^.*[0-9]'|grep -v pid
使用交换区最多的10个进程
svmon -gp -t 10|grep -p pid|grep '^.*[0-9]'
--10g以上 未绑定变量的
col sql_text for a80
set pages 1000
set lines 150
col force_matching_signature for 999999999999999999999
select * from (
with c as
(select force_matching_signature, count(*) cnt
from v$sqlarea
where force_matching_signature != 0
group by force_matching_signature
having count(*) >20),
sq as
(select sql_text,
force_matching_signature,
row_number() over(partition by force_matching_signature order by sql_id desc) p
from v$sqlarea s
where force_matching_signature in
(select force_matching_signature from c))
select sq.sql_text, sq.force_matching_signature, c.cnt "unshared count"
from c, sq
where sq.force_matching_signature = c.force_matching_signature
and sq.p = 1
order by c.cnt desc
) where rownum<11;
--绑定变量情况
col name for a10
col datatype_string for a20
set lines 150 pages 100
col value_string for a40
select * from (
select name,datatype_string,value_string,last_captured
from dba_hist_sqlbind where sql_id='&sqlid' order by snap_id desc,last_captured desc ,1
) where rownum<51;
alter session set events '10046 trace name context forever,level 12';
alter session set events '10046 trace name context off';
--最近的长时间会话
col opname for a25
col target for a20
col units for a10
col message for a60
col elapsed_seconds for 999999
col start_time for a20
set lines 180
set pages 1000
select opname,sofar,units,elapsed_seconds, to_char(start_time,'yyyy-mm-dd hh24:mi:ss') start_time,message from v$session_longops
where start_time>=to_date('2017-02-14 10:00:00','yyyy-mm-dd hh24:mi:ss') and start_time<=to_date('2017-02-14 15:00:00','yyyy-mm-dd hh24:mi:ss')
and rownum<51 order by start_time;
--监控rman每个备份片备份进度
set lin 120 pages 100
col sid for 999999
col username for a5
col opname for a30
col usernamefor a20
col message for a40
col target for a5
col units for a10
select sid,target,username,opname, -- message,
sofar, totalwork, elapsed_seconds,
round(sofar / totalwork * 100, 2) percent
from v$session_longops
where totalwork != 0 and sofar <> totalwork order by target;
select sid,target,username,opname, sofar, totalwork, elapsed_seconds ,percent,round(elapsed_seconds / (percent/100)/60 ) remain_min
from (select sid,target,username,opname, sofar, totalwork, elapsed_seconds,
round(sofar / totalwork * 100, 2) percent from v$session_longops
where totalwork != 0 and sofar <> totalwork order by target) ;
--占用空间大小
set pages 100 lin 120
col segment_name for a32
col segment_type for a20
col owner for a20
select * from (
select owner,segment_name, segment_type, round(sum(bytes) / 1024 / 1024/1024) g from dba_segments
-- where tablespace_name = 'system'
group by owner,segment_name, segment_type order by g desc) where rownum<51;
--分析过程
col owner for a20
select * from (
select owner,table_name,num_rows,blocks,avg_row_len,
to_char(last_analyzed,'yyyy-mm-dd hh24:mi:ss') last_analyzed
from dba_tables where last_analyzed is not null order by last_analyzed desc)
where rownum<501;
--分析时间较长的
create table t1 as
select * from (
select owner,table_name,num_rows,blocks,avg_row_len,last_analyzed
from dba_tables where last_analyzed is not null order by last_analyzed desc)
where last_analyzed>sysdate-6;
select *
from (select owner,table_name, num_rows,al, round((bl - al) * 24) hours
from (select a.table_name,
a.owner,
b.rn2,
a.num_rows,
to_date(a.last_analyzed, 'yyyy-mm-dd hh24:mi:ss') al,
to_date(b.last_analyzed, 'yyyy-mm-dd hh24:mi:ss') bl
from (select t1.*, rownum rn1 from t1 order by last_analyzed) a,
(select t1.*, rownum rn2 from t1 order by last_analyzed) b
where a.rn1 - 1 = b.rn2)
where (bl - al) > 2 / 24
order by hours desc)
where rownum < 21;
exec dbms_system.set_sql_trace_in_session(sid,serial#,false);
exec dbms_workload_repository.create_snapshot;
analyze table t1 compute statistics;
create or replace synonym yxqf.hb_smsg_qf for smsmain.hb_smsg_qf;
hp-ux 创建lv: lvcreate -n data_p_32g_01 -i 2 -i 128 -l 32768 /dev/vgdata02
ps -ef|grep local=no|grep -v grep|awk '{print $2}'|xargs -i kill -9 {}
ipcs -m |grep oracle|awk '{print $2}'|xargs ipcrm shm
nohup find ./adump -name *.aud -mtime 5 -exec rm -f {} \; &
find ./audit -name *.aud -mtime 5 -exec rm -f {} \;
find . -name "*.trc" -exec du -sm {} \; | sort -rn|head
重启主机后 find /u01 -type f -name "*.trc" -size 500m -exec du -sm {} \;|sort -rn| head -30
mklv -y pms3000 -t o -w n -s n -r n -t'raw' pmsdbvg 80
chown oracle:oinstall /dev/rpms3000 两个节点都要做
create materialized view
alter any materialized view
drop any materialized view
alter user sde quota unlimited on sde; 6.112
alter user scott quota unlimited on users;
alter user scott quota 100m on sysaux;
select distinct username,tablespace,contents,sql_id ,segtype from v$sort_usage order by 4;
select distinct username,blocks ,sql_id ,segtype,session_addr,sql_id from v$sort_usage ;
select s.username,s.event,s.prev_sql_id,u.tablespace,u.contents,u.segtype,u.extents,u.blocks,round(((u.blocks*p.value)/1024/1024),2) mb
from v$session s ,v$sort_usage u,v$parameter p
where s.saddr=u.session_addr
and upper(p.name)='db_block_size'
order by mb desc;
--查看临时表空间使用情况
select * from (
select se.username,se.sid,se.status,se.sql_hash_value,
se.prev_sql_id,su.tablespace,su.segtype,su.contents,round(su.blocks*8/1024,2)mb
from v$session se,v$sort_usage su
where se.saddr=su.session_addr
order by mb desc) where rownum<40;
select * from dba_hist_active_sess_history h where h.sample_time >= to_date('2013-05-13 15:05:00', 'yyyy-mm-dd hh24:mi:ss') and h.sample_time <= to_date('2013-05-13 15:11:00', 'yyyy-mm-dd hh24:mi:ss')
and h.instance_number=1
col tablespace_name for a20
select tablespace_name,file_id,round(bytes_used/1024/1024,2) bytes_used_mb,
round(blocks_used*8/1024,2) blocks_used_m,bytes_free/1024/1024 free_mb from v$temp_space_header;
--会话占用内存
col name for a25
col username for a12
col program for a25
col event for a38
select * from (
select s.sid, name, trunc(value / 1024/1024) m,s2.username,s2.program,s2.event,s2.sql_id
from v$sesstat s, v$statname n,v$session s2
where s.statistic# = n.statistic# and s.sid=s2.sid
and name like '%memory%'
order by m desc
) where rownum<51;
--查看隐含参数
set lines 150
col name for a40
col value for a20
col descript for a70
select a.ksppinm name,b.ksppstvl value,a.ksppdesc descript
from x$ksppi a,x$ksppcv b where a.indx=b.indx and a.ksppinm like '%¶%';
export nls_date_format='yyyy-mm-dd hh24miss'
export nls_lang="simplified chinese_china.al32utf8"
export nls_lang=american_america.zhs16gbk
归档切换频率:
select sequence#,
to_char(first_time, 'yyyy-mm-dd hh24:mi:ss') dt,
round((first_time - lag(first_time) over(order by first_time)) * 24 * 60,
2) minutes
from v$log_history
where first_time > sysdate - 1
order by first_time
---
授予创建awr权限:
grant execute on dbms_workload_repository to xxx;
grant select on sys.v_$instance to xxx;
grant select on sys.v_$database to xxx;
grant select on sys.dba_hist_database_instance to xxx;
grant select on sys.dba_hist_snapshot to xxx;
--v$sesison不能反映递归session 所以报ora-00018 超出最大会话数时 看实际的会话
select ksuudnam,count(0) from x$ksuse where bitand(ksspaflg,1)<>0 group by ksuudnam order by 1;
select username,count(0) from v$session group by username order by 1;
--闪回空间使用
select * from v$flash_recovery_area_usage;
收集heap dump,可以获得对内存的分配情况。
action plan/solution
=========================
1. trace file generated with:
alter system set events '10235 trace name context forever, level 65536';
alter system set events '4031 trace name errorstack level 3;name heapdump level 536870914';
2. after trace generation, the events can be disabled with:
alter system set events '4031 trace name errorstack off;name heapdump off';
alter system set events '10235 trace name context off';
=========================
或者采用下面的方式:
conn / as sysdba
oradebug setmypid
oradebug unlimit
oradebug dump heapdump 536870914
oradebug tracefile_name
oradebug close_trace
-----------方法1----------------
挂起时在一个实例上用sysdba 执行:
1 执行挂起分析
sqlplus -prelim / as sysdba
oradebug setmypid
oradebug unlimit
oradebug -g all hanganalyze 3
等30秒
oradebug -g all hanganalyze 3
oradebug tracefile_name
exit
2 打开另一个会话,抓取状态
sqlplus -prelim / as sysdba
oradebug setmypid
oradebug unlimit
oradebug -g all dump systemstate 10
等30秒
oradebug -g all dump systemstate 10
oradebug tracefile_name
exit
-----------方法2----------
抓取系统状态(10g)
sqlplus -prelim / as sysdba
oradebug setmypid
oradebug unlimit
oradebug -g all dump systemstate 266
等30秒
oradebug -g all dump systemstate 266
exit
如果产生的diag文件太大或挂起
在各实例上执行
sqlplus -prelim / as sysdba
oradebug setmypid
oradebug unlimit
oradebug hanganalyze 3
等30秒
oradebug hanganalyze 3
oradebug dump systemstate 258
oradebug tracefile_name
exit
11g 及以上
oradebug setospid
oradebug unlimit
oradebug -g all hanganalyze 3
等30秒
oradebug -g all hanganalyze 3
oradebug -g all dump systemstate 258
exit
---------------------------
ultraedit 规则表达式 搜索^{ora-^}^{error^}
prompt *********check database flashback ***************
prompt
select flashback_on from v\$database;
prompt
col name format a50;
col value format a20;
select name,value from v$\parameter where name like 'db_recovery_file_dest%';
vi替换
sed -i "s/^m//" file.sh
替换保存
:
1,$s/rem ... /--/g
1,$s/"users"/ "bzh"/g
1,$s/connect /--/g
1,$s/rem / /g
s.sql
ultraedit替换空行
ctrl r
%[ ^t] ^p
--内存抖动
col component for a30
col started_time for a22
col end_time for a22
set lin 200 pages 100
select component,
oper_type,
round(a.initial_size/1024/1024) initial_size_mb,
round(final_size/1024/1024) final_size_mb,
round((final_size-initial_size)/1024/1024) resize_mb,
to_char(start_time, 'yyyy-mm-dd hh24:mi:ss') started_time,
to_char(end_time, 'yyyy-mm-dd hh24:mi:ss') end_time
from v$sga_resize_ops a
where start_time >= to_date('2019-10-01 00:00', 'yyyy-mm-dd hh24:mi')
and start_time <= to_date('2019-10-10 23:59', 'yyyy-mm-dd hh24:mi')
order by started_time;
select component, oper_type, to_char(start_time, 'yyyy-mm-dd hh24') started_time, count(0) cnt
from v$sga_resize_ops s
where start_time >= to_date('2019-10-01 00:00', 'yyyy-mm-dd hh24:mi')
and start_time <= to_date('2019-10-10 23:59', 'yyyy-mm-dd hh24:mi')
and component='shared pool'
group by component, oper_type,to_char(start_time, 'yyyy-mm-dd hh24')
order by started_time;
--飘
set echo off
set feedback on
set heading on
set linesize 180
set pagesize 2000
set timing off
set trimspool on
set verify off
clear columns breaks computes
column day format a8 heading 'd/t'
column h00 format 999b heading '00'
column h01 format 999b heading '01'
column h02 format 999b heading '02'
column h03 format 999b heading '03'
column h04 format 999b heading '04'
column h05 format 999b heading '05'
column h06 format 999b heading '06'
column h07 format 999b heading '07'
column h08 format 999b heading '08'
column h09 format 999b heading '09'
column h10 format 999b heading '10'
column h11 format 999b heading '11'
column h12 format 999b heading '12'
column h13 format 999b heading '13'
column h14 format 999b heading '14'
column h15 format 999b heading '15'
column h16 format 999b heading '16'
column h17 format 999b heading '17'
column h18 format 999b heading '18'
column h19 format 999b heading '19'
column h20 format 999b heading '20'
column h21 format 999b heading '21'
column h22 format 999b heading '22'
column h23 format 999b heading '23'
column total format 999,999,999 heading 'total'
break on report
compute sum label 'total' avg label 'avg' max label 'max' min label 'min' of total on report
select
substr(to_char(first_time, 'mm/dd/rr hh:mi:ss'),1,5) day
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'00',1,0)) h00
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'01',1,0)) h01
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'02',1,0)) h02
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'03',1,0)) h03
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'04',1,0)) h04
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'05',1,0)) h05
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'06',1,0)) h06
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'07',1,0)) h07
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'08',1,0)) h08
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'09',1,0)) h09
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'10',1,0)) h10
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'11',1,0)) h11
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'12',1,0)) h12
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'13',1,0)) h13
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'14',1,0)) h14
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'15',1,0)) h15
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'16',1,0)) h16
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'17',1,0)) h17
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'18',1,0)) h18
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'19',1,0)) h19
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'20',1,0)) h20
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'21',1,0)) h21
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'22',1,0)) h22
, sum(decode(substr(to_char(first_time, 'mm/dd/rr hh24:mi:ss'),10,2),'23',1,0)) h23
, count(*) total
from
v$log_history a
where first_time>sysdate-30 --and thread#=1
group by substr(to_char(first_time, 'mm/dd/rr hh:mi:ss'),1,5)
order by substr(to_char(first_time, 'mm/dd/rr hh:mi:ss'),1,5)
/
set echo off
set feedback on
set heading on
set linesize 180
set pagesize 2000
set timing off
set trimspool on
set verify off
clear columns breaks computes
break on instance_name on thread#
column instance_name format a13 heading 'instance name'
column thread# format 999 heading 'thread#'
column group# format 999 heading 'group#'
column member format a60 heading 'member'
column type format a10 heading 'type'
column status format a15 heading 'status'
column bytes format 999,999 heading 'logsize mb'
column archived format a10 heading 'archived'
select
i.instance_name
, i.thread#
, f.group#
, f.member
, f.type
, l.status
, l.bytes/1024/1024 bytes
, l.archived
from
gv$logfile f
, gv$log l
, gv$instance i
where
f.group# = l.group#
and l.thread# = i.thread#
and i.inst_id = f.inst_id
and f.inst_id = l.inst_id
order by
i.instance_name
, f.group#
, f.member
/
--检查分区大小 是否有漏删数据
set timing on
col segment_name for a32
col pt for a10
set lines 120 pages 1000
select * from (
select segment_name,substr(partition_name,1,9) pt,round(sum(bytes)/1024/1024/1024) g
from dba_segments where partition_name is not null
group by segment_name,substr(partition_name,1,9)
order by 1,2,3
)where rownum<501;
--表空间大小
set pages 100
col tablespace_name for a20
select d.tablespace_name "tablespace_name",
to_char(nvl(a.bytes / 1024 / 1024, 0), '99,999,990.90') "total_m",
to_char(nvl(a.bytes - nvl(f.bytes, 0), 0) / 1024 / 1024,
'99999999.99') "used_m",
to_char(nvl(nvl(f.bytes, 0), 0) / 1024 / 1024, '99999999.99') "free_m",
to_char(nvl((a.bytes - nvl(f.bytes, 0)) / a.bytes * 100, 0),
'990.00') "used %"
from dba_tablespaces d,
(select tablespace_name, sum(bytes) bytes
from dba_data_files
group by tablespace_name) a,
(select tablespace_name, sum(bytes) bytes
from dba_free_space
group by tablespace_name) f
where d.tablespace_name = a.tablespace_name( )
and d.tablespace_name = f.tablespace_name( )
and not
(d.extent_management like 'local' and d.contents like 'temporary')
union all
select d.tablespace_name "tablespace_name",
to_char(nvl(a.bytes / 1024 / 1024, 0), '99,999,990.90') total_m,
to_char(nvl(t.bytes, 0) / 1024 / 1024, '99999999.99') "used_m",
to_char(nvl(a.bytes - t.bytes, 0) / 1024 / 1024, '99999999.99') "free_m",
to_char(nvl(t.bytes / a.bytes * 100, 0), '990.00') "used %"
from dba_tablespaces d,
(select tablespace_name, sum(bytes) bytes
from dba_temp_files
group by tablespace_name) a,
(select tablespace_name, sum(bytes_cached) bytes
from v$temp_extent_pool
group by tablespace_name) t
where d.tablespace_name = a.tablespace_name( )
and d.tablespace_name = t.tablespace_name( )
and d.extent_management like 'local'
and d.contents like 'temporary'
order by 5 desc
/
--表空间使用率(mini)
col tablespace_name for a20
select a.tablespace_name,
round(a.bytes / 1024 / 1024) "sum mb",
round((a.bytes - b.bytes) / 1024 / 1024) "used mb",
round(b.bytes / 1024 / 1024) "free mb",
round(((a.bytes - b.bytes) / a.bytes) * 100, 2) "percent_used"
from (select tablespace_name, sum(bytes) bytes
from dba_data_files
group by tablespace_name) a,
(select tablespace_name, sum(bytes) bytes, max(bytes) largest
from dba_free_space
group by tablespace_name) b
where a.tablespace_name = b.tablespace_name
order by ((a.bytes - b.bytes) / a.bytes) desc;
--数据文件使用率
select b.file_id ,
b.tablespace_name ,
b.file_name ,
round(b.bytes / 1024 / 1024) size_m,
round(c.max_extents / 1024 / 1024) max_extents_m,
round(b.bytes / 1024 / 1024 c.max_extents / 1024 / 1024) total_m,
trunc((b.bytes - sum(nvl(a.bytes, 0))) / 1024 / 1024) used_m,
trunc(sum(nvl(a.bytes, 0)) / 1024 / 1024) free_m,
trunc(100-sum(nvl(a.bytes, 0)) / (b.bytes) * 100, 2) used_percent
from dba_free_space a, dba_data_files b, dba_tablespaces c
where a.file_id = b.file_id
and b.tablespace_name = c.tablespace_name
group by b.tablespace_name,
b.file_name,
b.file_id,
b.bytes,
c.max_extents,
b.bytes / 1024 / 1024 c.max_extents / 1024 / 1024
order by b.file_id;
--表空间大小(全)
set timing on
set lines 150 pages 100
col tablespace_name for a20
col type for a10
col stat for a8
col "ext mgmt" for a8
col "seg mgmt" for a8
col autoextend for a4
select /* first_rows */
d.tablespace_name,
nvl(a.bytes / 1024 / 1024, 0) "size mb",
round(nvl(a.bytes - nvl(f.bytes, 0), 0) / 1024 / 1024,2) "used mb",
round(nvl((a.bytes - nvl(f.bytes, 0)) / a.bytes * 100, 0),2) "used %",
a.autoext "autoextend",
round(nvl(f.bytes, 0) / 1024 / 1024,2) "free mb",
d.status "stat",
a.count "files",
d.contents "type",
d.extent_management "ext mgmt",
d.segment_space_management "seg mgmt"
from sys.dba_tablespaces d,
(select tablespace_name,
sum(bytes) bytes,
count(file_id) count,
decode(sum(decode(autoextensible, 'no', 0, 1)),
0,
'no',
'yes') autoext
from dba_data_files
group by tablespace_name) a,
(select tablespace_name, sum(bytes) bytes
from dba_free_space
group by tablespace_name) f
where d.tablespace_name = a.tablespace_name( )
and d.tablespace_name = f.tablespace_name( )
and not d.contents = 'undo'
and not (d.extent_management = 'local' and d.contents = 'temporary')
and d.tablespace_name like '%%'
union all
select d.tablespace_name,
nvl(a.bytes / 1024 / 1024, 0),
round(nvl(t.bytes, 0) / 1024 / 1024,2),
round(nvl(t.bytes / a.bytes * 100, 0),2),
a.autoext,
round((nvl(a.bytes, 0) / 1024 / 1024 - nvl(t.bytes, 0) / 1024 / 1024),2),
d.status,
a.count,
d.contents,
d.extent_management,
d.segment_space_management
from sys.dba_tablespaces d,
(select tablespace_name,
sum(bytes) bytes,
count(file_id) count,
decode(sum(decode(autoextensible, 'no', 0, 1)),
0,
'no',
'yes') autoext
from dba_temp_files
group by tablespace_name) a,
(select ss.tablespace_name,
sum((ss.used_blocks * ts.blocksize)) bytes
from gv$sort_segment ss, sys.ts$ ts
where ss.tablespace_name = ts.name
group by ss.tablespace_name) t
where d.tablespace_name = a.tablespace_name( )
and d.tablespace_name = t.tablespace_name( )
and d.extent_management = 'local'
and d.contents = 'temporary'
and d.tablespace_name like '%%'
union all
select d.tablespace_name,
nvl(a.bytes / 1024 / 1024, 0),
round(nvl(u.bytes, 0) / 1024 / 1024,2),
round(nvl(u.bytes / a.bytes * 100, 0),2),
a.autoext,
round(nvl(a.bytes - nvl(u.bytes, 0), 0) / 1024 / 1024,2),
d.status,
a.count,
d.contents,
d.extent_management,
d.segment_space_management
from sys.dba_tablespaces d,
(select tablespace_name,
sum(bytes) bytes,
count(file_id) count,
decode(sum(decode(autoextensible, 'no', 0, 1)),
0,
'no',
'yes') autoext
from dba_data_files
group by tablespace_name) a,
(select tablespace_name, sum(bytes) bytes
from (select tablespace_name, sum(bytes) bytes, status
from dba_undo_extents
where status = 'active'
group by tablespace_name, status
union all
select tablespace_name, sum(bytes) bytes, status
from dba_undo_extents
where status = 'unexpired'
group by tablespace_name, status)
group by tablespace_name) u
where d.tablespace_name = a.tablespace_name( )
and d.tablespace_name = u.tablespace_name( )
and d.contents = 'undo'
--and d.tablespace_name like '%%'
order by 4 desc
/
col property_name for a30
col property_value for a50
col description for a60
set lines 150 pages 100
select property_name ,property_value,description from database_properties where property_name like '%characterset%' order by 1;
--查索引
set pages 1000 lin 150
col owner for a10
col table_owner for a10
col index_type for a25
col table_name for a30
col index_name for a30
col degree for a10
select owner,index_name,index_type,status,blevel,num_rows,degree,last_analyzed from dba_indexes where table_name=upper('&tblname');
--查看表状态
col owner for a20
select owner,num_rows,blocks,avg_row_len,to_char(last_analyzed,'yyyy-mm-dd hh24:mi:ss') last_analyzed
from dba_tables where table_name=upper('&table_name');
--索引字段
col column_name for a20
col index_owner for a15
col index_name for a30
select index_owner,index_name,column_name,column_position,descend from dba_ind_columns where table_name=upper('&tblname') order by 1,2,4;
--字段统计信息
select column_name,num_distinct,num_nulls,histogram,to_char(last_analyzed,'mmdd hh24:mi')analyzed from dba_tab_col_statistics where table_name=upper('&tbl') order by 1;
--最近执行过的语句
set lines 150 pages 100;
col sql_text for a100
col last_load_time for a20
select * from (select sql_id,sql_text,last_load_time from v$sql order by last_load_time desc) where rownum<6;
--顶级语句
select * from (
select sql_id,count(0) cnt,round(count(0)/sum(count(0)) over(),4)*100 pctload
from v$active_session_history a
where a.sample_time>sysdate-5/60/24
and sql_id is not null
group by sql_id
order by count(0) desc)
where rownum<11;
--顶级会话
col program for a30
select *
from (select session_id, user_id, program, count(0) cnt,
round(count(0) / sum(count(0)) over(), 4) * 100 pctload
from v$active_session_history a
where a.sample_time > sysdate - 5 / 60 / 24
and sql_id is not null
group by session_id, user_id, program
order by count(0) desc)
where rownum < 11;
--历史会话被锁情况
col program for a38
col event for a30
col module for a32
set lines 150 pages 100
select session_id,
/*
session_serial#,
user_id,sql_id,
*/
program,module,event,h.blocking_session,h.blocking_inst_id,h.blocking_session_serial#
from dba_hist_active_sess_history h
where h.sample_time >= to_date('2016-08-24 16:16:00', 'yyyy-mm-dd hh24:mi:ss')
and h.sample_time <= to_date('2016-08-24 16:36:00', 'yyyy-mm-dd hh24:mi:ss')
and session_id=3346 and instance_number=1
and rownum<51;
col name for a15
select name,trunc((total_mb-free_mb)/1024) used_g,trunc(total_mb/1024) total_g, trunc(free_mb/1024) free_g,round((total_mb-free_mb)/total_mb*100,2) used_percent
from v$asm_diskgroup order by used_percent desc ;
col path for a30
set lines 150
select group_number,total_mb,free_mb,name,path ,state from v$asm_disk;
alter system flush buffer_cache;
alter system flush shared_pool;
set pages 100
break on report
compute sum label 'total' avg label 'avg' max label 'max' min label 'min' of g on report
select * from (select to_char(completion_time,'yyyy-mm-dd') dt,round(sum(blocks *block_size)/1024/1024/1024) g,count(0) cnt from
v$archived_log group by to_char(completion_time,'yyyy-mm-dd')) where g>10 order by 1;
col version for a15
col comments for a40
set lin 150 pages 100
select version,id,bundle_series,comments from dba_registry_history;
sqlplus / as sysdba
col owner for a12
col directory_name for a25
col directory_path for a80
set lin 190 pages 100
select * from dba_directories order by 1,3;
sqlplus / as sysdba
col owner for a10
col db_link for a20
col username for a15
col host for a40
set line 150
set pages 100
select * from dba_db_links order by db_link;
-- 检查集群负载均衡
with sys_time as (
select inst_id,sum(case stat_name when 'db time' then value end) db_time,
sum(case when stat_name in ('db cpu','backupground cpu time') then value end) cpu_time
from gv$sys_time_model group by inst_id)
select instance_name,
round(db_time/1000000,2) db_time_secs,
round(db_time*100/sum(db_time) over(),2) db_time_pct,
round(cpu_time/1000000,2) cpu_time_secs,
round(cpu_time*100/sum(cpu_time) over(),2) cpu_time_pct
from sys_time
join gv$instance using (inst_id);
dbca -silent -createdatabase -templatename general_purpose.dbc -gdbname aicpuat -sid aicpuat -characterset zhs16gbk -syspassword db#zyfd2018 -systempassword db#zyfd2018 -totalmemory 2048 -redologfilesize 200 -datafiledestination /oradata -createlistener listener:1521
dbca -silent -deletedatabase -sourcedb aicpuat
select usn,xacts,round(rssize/1024/1024) rssize_m , round(hwmsize/1024/1024) hwmsize_m,shrinks
from v$rollstat order by rssize;
select tablespace_name,status, round(sum(bytes)/1024/1024) bytes_m
from dba_undo_extents group by tablespace_name,status order by 1,2;
--pga使用
set lines 200;
set pages 200;
column name format a25;
column pname format a12;
col spid for a15
column "usedmb" format a10;
set numwidth 6;
select s.sid, s.serial#,p.pid, p.spid,p.pname, sn.name, round(ss.value/(1024 *1024))||'mb' "usedmb"
from v$sesstat ss, v$statname sn, v$session s, v$process p
where s.paddr = p.addr
and sn.statistic# = ss.statistic#
and s.sid = ss.sid
and sn.name in ('session pga memory' , 'session pga memory max')
and p.pname like 'dia%'
order by ss.value desc;
select thread#,max(sequence#) from v$archived_log where applied='yes' group by thread# order by 1;
select * from v$archive_gap;
col status for a15
select process,status,sequence#,thread# from v$managed_standby;
alter database recover managed standby database disconnect from session;
alter database recover managed standby database using current logfile disconnect from session;
alter database recover managed standby database cancel;
recover standby database; --手工 rman recover database
#主库
select thread#,max(sequence#) from v$archived_log where resetlogs_change#=(select resetlogs_change# from v$database)group by thread# order by thread#;
#备库
select thread#,max(sequence#) from v$archived_log where applied='yes' group by thread#;
备库v$managed_standby;
col status for a12
set pages 100
select process,status,thread#,sequence# from v$managed_standby;
--强制把备库切为主
alter database recover managed standby database finish force;
alter database commit to switchover to primary;
--正常主备切换(简单粗暴)
-主库
select switchover_status from v$database;
应该是to standby状态
alter database commit to switchover to physical standby;
shu immediate
startup mount
alter database recover managed standby database disconnect from session;
alter database open
-备库
select switchover_status from v$database;
应该是 to primary状态
alter database commit to switchover to primary with session shutdown;
shu immediate
startup
--login beijing
sqlplus / as sysdba
startup nomount
exit
rman target sys/db#zyfd2018@tns_orcldg auxiliary sys/db#zyfd2018@tns_orcl
duplicate target database for standby from active database dorecover nofilenamecheck;
set lin 120 pages 100
col name for a8
col db_unique_name for a16
col database_role for a20
col switchover_status for a20
col open_mode for a22
select name,db_unique_name,database_role,switchover_status,open_mode from gv$database;
set pagesize 9999 line 9999
col ts_name format a30
col pdbname format a15
col ts_name format a20
col logging format a10
select con_id,
pdbname,
ts#,
ts_name,
ts_size_m,
free_size_m,
used_size_m,
used_per,
max_size_g,
used_per_max,
block_size,
logging,
ts_df_count
from (with wt1 as (select ts.con_id,
(select np.name
from v$containers np
where np.con_id = ts.con_id) pdbname,
(select a.ts#
from v$tablespace a
where a.name = upper(ts.tablespace_name)
and a.con_id = ts.con_id) ts#,
ts.tablespace_name,
df.all_bytes,
decode(df.type,
'd',
nvl(fs.freesiz, 0),
't',
df.all_bytes - nvl(fs.freesiz, 0)) freesiz,
df.maxsiz,
ts.block_size,
ts.logging,
ts.force_logging,
ts.contents,
ts.extent_management,
ts.segment_space_management,
ts.retention,
ts.def_tab_compression,
df.ts_df_count
from cdb_tablespaces ts,
(select d.con_id,
'd' type,
tablespace_name,
count(*) ts_df_count,
sum(bytes) all_bytes,
sum(decode(maxbytes, 0, bytes, maxbytes)) maxsiz
from cdb_data_files d
group by d.con_id,
tablespace_name
union all
select d.con_id,
't',
tablespace_name,
count(*) ts_df_count,
sum(bytes) all_bytes,
sum(decode(maxbytes, 0, bytes, maxbytes))
from cdb_temp_files d
group by d.con_id,
tablespace_name) df,
(select d.con_id,
tablespace_name,
sum(bytes) freesiz
from cdb_free_space d
group by d.con_id,
tablespace_name
union all
select d.con_id,
tablespace_name,
sum(d.block_size * a.blocks) bytes
from gv$sort_usage a,
cdb_tablespaces d
where a.tablespace = d.tablespace_name
and a.con_id = d.con_id
group by d.con_id,
tablespace_name) fs
where ts.tablespace_name = df.tablespace_name
and ts.con_id = df.con_id
and ts.tablespace_name = fs.tablespace_name( )
and ts.con_id = fs.con_id( ))
select t.con_id,
(case
when t.pdbname = lag(t.pdbname, 1)
over(partition by t.pdbname order by ts#) then
null
else
t.pdbname
end) pdbname,
ts#,
t.tablespace_name ts_name,
round(t.all_bytes / 1024 / 1024) ts_size_m,
round(t.freesiz / 1024 / 1024) free_size_m,
round((t.all_bytes - t.freesiz) / 1024 / 1024) used_size_m,
round((t.all_bytes - t.freesiz) * 100 / t.all_bytes, 3) used_per,
round(maxsiz / 1024 / 1024 / 1024, 3) max_size_g,
round(decode(maxsiz,
0,
to_number(null),
(t.all_bytes - freesiz)) * 100 / maxsiz,
3) used_per_max,
round(t.block_size) block_size,
t.logging,
t.ts_df_count
from wt1 t
union all
select distinct t.con_id,
'' pdbname,
to_number('') ts#,
'all ts:' ts_name,
round(sum(t.all_bytes) / 1024 / 1024, 3) ts_size_m,
round(sum(t.freesiz) / 1024 / 1024) free_size_m,
round(sum(t.all_bytes - t.freesiz) / 1024 / 1024) used_size_m,
round(sum(t.all_bytes - t.freesiz) * 100 / sum(t.all_bytes),
3) used_per,
round(sum(maxsiz) / 1024 / 1024 / 1024) max_size,
to_number('') "used,% of max size",
to_number('') block_size,
'' logging,
to_number('') ts_df_count
from wt1 t
group by rollup(con_id,pdbname)
)
order by con_id,ts#;
declare
my_task_name varchar2(30);
my_sqltext clob;
begin
my_task_name := dbms_sqltune.create_tuning_task(
sql_id => '&sqlid',
scope => 'comprehensive',
time_limit => 600,
task_name => 'whn_sql_test2',
description => 'task to tune a query on a specified table');
end;
/
exec dbms_sqltune.execute_tuning_task('whn_sql_test2');
--select task_name,status from user_advisor_tasks where task_name='whn_sql_test2';
set long 99999
set pages 1000 lin 180
select dbms_sqltune.report_tuning_task( 'whn_sql_test2') from dual;
exec dbms_sqltune.drop_tuning_task('whn_sql_test2');
按时间模型统计数据库负载历史
select *
from (select a.snap_id,
a.instance_number,
b.begin_interval_time 0 begin_time,
b.end_interval_time 0 end_time,
round(value - lag(value, 1, '0')
over(order by a.instance_number, a.snap_id)) "db time"
from (select b.snap_id,
instance_number,
sum(value) / 1000000 / 60 value
from dba_hist_sys_time_model b
where b.dbid = (select dbid from v$database)
and upper(b.stat_name) in upper('db time')
group by b.snap_id, instance_number) a,
dba_hist_snapshot b
where a.snap_id = b.snap_id
and b.dbid = (select dbid from v$database)
and b.instance_number = a.instance_number)
where to_char(begin_time, 'yyyy-mm-dd') >=to_char(sysdate - 7, 'yyyy-mm-dd ')
order by begin_time desc;
--查看一个语句历史执行情况
select (select username from dba_users b where b.user_id = a.user_id) username,
user_id, sql_id, session_id, session_serial# serial#, sql_exec_id, sql_exec_start, count(0),
to_char(max(sample_time), 'yyyymmdd hh24:mi:ss') max_sample_time,
(to_date(to_char(max(sample_time), 'yyyymmdd hh24:mi:ss'),
'yyyymmdd hh24:mi:ss') - sql_exec_start) * 24 * 3600 exec_seconds
from dba_hist_active_sess_history a
where sql_id = '&sqlid'
and to_char(sample_time, 'yyyymmdd hh24:mi') between '20200101 19:00' and '20200101 21:00'
group by user_id, sql_id, session_id, session_serial#, sql_exec_id, sql_exec_start
order by sql_exec_start;
--看回收站对象占用空间大小
with rec_bytes as
(select (sum(space)*(select value from v$parameter where name='db_block_size'))/1024/1024/1024 bytes_in_gb,owner,ts_name
from dba_recyclebin group by owner,ts_name order by 1 desc)
select owner "user",
ts_name "tablespace name",
round(bytes_in_gb,1) "space consumption(gb)",
(select sum(a.bytes) / 1024/1024/1024
from dba_data_files a
where a.tablespace_name = ts_name) "size of tablespace(gb)",
ceil((bytes_in_gb /
(select sum(a.bytes) / 1024/1024/1024
from dba_data_files a
where a.tablespace_name = ts_name)) * 100) "percent usage(%)"
from rec_bytes
where bytes_in_gb > 0.1
order by 3 desc;
--查看rman备份
select a.recid "backup set",
a.set_stamp,
decode(b.incremental_level,
'',
decode(backup_type, 'l', 'archivelog', 'full'),
1,
'incr-1级',
0,
'incr-0级',
b.incremental_level) "type lv",
b.controlfile_included "包含ctl",
decode(a.status,
'a',
'available',
'd',
'deleted',
'x',
'expired',
'error') "status",
a.device_type "device type",
a.start_time "start time",
a.completion_time "completion time",
a.elapsed_seconds "elapsed seconds",
a.bytes / 1024 / 1024 / 1024 "size(g)",
a.compressed,
a.tag "tag",
a.handle "path"
from gv$backup_piece a, gv$backup_set b
where a.set_stamp = b.set_stamp
and a.deleted = 'no'
order by a.completion_time desc;
create or replace function display_raw (rawval raw, type varchar2)
return varchar2
is
cn number;
cv varchar2(32);
cd date;
cnv nvarchar2(32);
cr rowid;
cc char(32);
begin
if (type = 'number') then
dbms_stats.convert_raw_value(rawval, cn);
return to_char(cn);
elsif (type = 'varchar2') then
dbms_stats.convert_raw_value(rawval, cv);
return to_char(cv);
elsif (type = 'date') then
dbms_stats.convert_raw_value(rawval, cd);
return to_char(cd);
elsif (type = 'nvarchar2') then
dbms_stats.convert_raw_value(rawval, cnv);
return to_char(cnv);
elsif (type = 'rowid') then
dbms_stats.convert_raw_value(rawval, cr);
return to_char(cnv);
elsif (type = 'char') then
dbms_stats.convert_raw_value(rawval, cc);
return to_char(cc);
else
return 'unknown datatype';
end if;
end;
/
select
a.column_name,
a.num_distinct,
display_raw(a.low_value,b.data_type) as low_val,
display_raw(a.high_value,b.data_type) as high_val,
b.data_type
from
dba_tab_col_statistics a, dba_tab_cols b
where
a.owner='sys' and
a.table_name='t1' and
a.table_name=b.table_name and
a.column_name=b.column_name and
a.column_name = 'cdate' --id
and b.owner=a.owner
order by 1, 2;
--主库上监控dest状态
set lin 200 pages 100
col dest_name for a25
col error for a20 trunc
col gap_status for a12
select dest_name,status,error,target,affirm,compression,applied_scn from v$archive_dest where target='standby';
select dest_name,status,database_mode,recovery_mode,error,gap_status from v$archive_dest_status;
--dg差异
col open_mode for a10
col protection_mode for a20
col database_role for a18
col switchover_status for a17
col thread# for 99
col name for a10
col diff for 9999
set lin 200
select a.thread#,c.name,c.open_mode,c.protection_mode,c.database_role,c.switchover_status,a.applog,b.nowlog, a.applog- b.nowlog diff from (select thread#, max(sequence#) as "applog" from v$archived_log where applied='yes' and resetlogs_change#=(select resetlogs_change# from v$database) group by thread#) a,(select thread#, max(sequence#) as "nowlog" from v$log group by thread#) b,v$database c where a.thread#=b.thread#;
set lin 200 pages 100
col process for a11
col pid for 99999999
col status for a15
col client_process for a11
col client_pid for a11
col group# for a5
col sequence# for 99999999
col block# for 99999999
col blocks for 99999999
col delay_mins for 99999999
col thread# for 99
select process,pid,status,client_process,client_pid,group#,thread#,sequence#,block#,blocks,delay_mins from v$managed_standby;
--sql历史
set lines 200
set pages 1000
col shijian for a12
col execu_d for 999999
col bg_d for 9999999999
col dr_d for 9999999999
col et_d for 99999999
col ct_d for 99999999
col io_time for 999999
col clus_time for 999999
col ap_time for 999999
col cc_time for 999999
col et_onetime for 999999
select to_char(b.end_interval_time, 'yyyymmddhh24') shijian,
plan_hash_value,
sum(a.executions_delta) execu_d,
sum(a.buffer_gets_delta) bg_d,
sum(a.disk_reads_delta) dr_d,
sum(a.elapsed_time_delta / 1000000) et_d,
sum(a.cpu_time_delta / 1000000) ct_d,
sum(iowait_delta / 1000000) io_time,
sum(clwait_delta / 1000000) clus_time,
sum(apwait_delta / 1000000) ap_time,
sum(ccwait_delta / 1000000) cc_time,
decode(sum(a.executions_delta),
0,
sum(a.buffer_gets_delta),
round(sum(a.buffer_gets_delta) / sum(a.executions_delta), 0)) get_onetime,
decode(sum(a.executions_delta),
0,
sum(a.rows_processed_delta),
round(sum(a.rows_processed_delta) / sum(a.executions_delta), 0)) rows_onetime,
decode(sum(a.executions_delta),
0,
sum(a.elapsed_time_delta / 1000),
round(sum(a.elapsed_time_delta / 1000) /
sum(a.executions_delta),
0)) et_ms_once
from dba_hist_sqlstat a, dba_hist_snapshot b
where a.snap_id = b.snap_id
and a.instance_number = b.instance_number
and a.sql_id = '&sql_id'
group by to_char(b.end_interval_time, 'yyyymmddhh24'),plan_hash_value
order by 1,2;
undefine sql_id;
阅读(1572) | 评论(0) | 转发(0) |