Daily Work Scripts

cnt=sqlplus -S /nolog <<EOF | sed -e $'s/^[ \t]*//' -e $'s/[ \t]*$//' ===> Remove leading blank spaces from sqlplus output conn / as sysdba set heading off feedback off set pagesize 0 set linesize 50 select count(username) from v\\$session; exit; EOF

:12,25s/^/–/ –> Add comments in sqlfiles for lines between 12 to 25
=============== Filter every nth line using Sed ==========
sed -n ‘1~3p’ test.txt > s1.txt (1,4,7)
sed -n ‘2~3p’ test.txt > s2.txt (2,5,8)

sed -n ‘3~3p’ test.txt > s3.txt (3,6,9)

https://www.hhutzler.de/blog/acfs-a-closer-look/ –> ACFS to be unmounted when doing patching

/usr/bin/prstat -n 1 -Z 1 1 2> /dev/null|grep -v “PID”|grep -v “Total:” –Solaris Memory usage at OS Level

/sbin/acfsutil registry

/bin/umount /emagdir

/sbin/acfsutil info fs

/bin/mount /emagdir

mount -t acfs /dev/asm/emagdir-393 /emagdir

/bin/mount

/usr/sbin/swap -l|awk ‘{TOT+=$4} {FREE+=$5} END {print “total: “TOT ” used: ” TOT-FREE}’;/usr/sbin/prtconf|grep Memory;vmstat 1 3;kstat zfs:0:arcstats:size | grep size|tr -s ‘ ‘ ”|tr -s ‘\t’ ‘^’

prstat -Z

=LEFT(TRIM(A1),FIND(“_”,TRIM(A1))-1)
find . -name ‘00000‘ ! -iname ‘history*’ -mtime +2 -exec rm –f {} \;
echo $TMOUT
600
[cbdik7bq@meylvvmsdp01a ~]$ unset TMOUT
-bash: unset: TMOUT: cannot unset: readonly variable

UNNAMED file in standby after adding new file to primary – ORA-01111, ORA-01110, ORA-01157

https://dbpilot.net/2018/generating-multiple-awr-reporst-in-one-step/ —> Multiple awr reports within range…
https://flashdba.com/database/useful-scripts/awr-generator/
https://blog.yannickjaquier.com/oracle/script-generate-series-awr-reports.html
https://dbastreet.com/?p=322

du -k /var/opt/fds/config/ere/*/saved | awk ‘BEGIN{sum=0}{sum=sum+$1}END{print sum}’ | awk ‘{print ($1/1024)}’

ps -ef|grep -i tns|grep -v grep|awk ‘{ORS=” && “} {print “lsnrctl status “$10}’

crontab -l|cut -d”>” -f1|awk ‘/^/{print $6}’ —> List only cron entries
cat ss.lst|cut -d”>” -f1|awk ‘/^/{print $6}’|awk -F”/” ‘{print $NF}’ –> This gives files names from crontabs.
cat ss.lst|cut -d”>” -f1|awk ‘/^/{print $6}’|sed ‘s#/[^/]*$##’ —> This gives the directory path excluding file names.. use sort -|uniq – to get distinct values

sed -i -e ‘s/\r$//’  to remove

sed -n ‘/debug all;/,$p’ msglog >test.log –> Copy text from “debug all;” from msglog to test.log
sed -n ‘/Apr 9 01:04:21/,$p’ msglog >RMAN_Debug_1.log
awk ‘/^2020-03-25 00:00./,/^2020-03-25 00:02./’ enterprisedb-2020-03-25.log >test.lst —> Print lines between two patters
cat pgpool.conf| grep -Ev ‘^#|^$|\s+#’ —> Ignores are lines starting and having #
echo | xargs rm –> Used to remove files when arg list is too long find . -name ‘.aud’|xargs rm -f1

sdiff -s in Linux for file comparison (Solaris is also there)

sed ‘/^$/d’ finalcron.lst.tst > ss.lst ===> Remove empty lines from file
crontab -l|cut -d” ” -f6 > sunil.list ==> GIves only scritps from crontab. but if we have more spaces between day of week and script start will give different values.

[bilprodb4|BSCS_PROD_NEW] $ ps -eo comm,pid,etime|grep xargs| nawk -F “(:)|(-)” ‘BEGIN{a[4]=1;a[3]=60;a[2]=3600;a[1]=86400;s=0};{for (i=NF;i>=1;i–) s=s+a[i]$i}END{ if (s > 300) print s}’ 153600 You have new mail in /var/mail//orabscs [bilprodb4|BSCS_PROD_NEW] $ ps -eo comm,pid,etime|grep xargs| nawk -F “(:)|(-)” ‘BEGIN{a[4]=1;a[3]=60;a[2]=3600;a[1]=86400;s=0};{for (i=NF;i>=1;i–) s=s+a[i]$i}END{ if (s > 300) print “Hello”}’
Hello

cp /home/prddown/_setup/oracle/instantclient_11_2/network/admin/tnsnames.ora /home/prddown/_setup/oracle/instantclient_11_2/network/admin/tnsnames.ora_bkp

perl -i -p -e ‘s/10.175.51.22/172.20.210.86/g;s/SERVICE_NAME = EDMPR/SERVICE_NAME=edmprdr/g’ /home/prddown/_setup/oracle/instantclient_11_2/network/admin/tnsnames.ora ————–

http://www.bijoos.com/ora7/oracle_unix.htm
http://www.tldp.org/LDP/abs/html/abs-guide.html#EX30 –> for shell scripting

T:\Sunil\Putty_LOgs\&H-&Y&M&D-&T.log
1410065407

\Client\E$\Sunil\Putty_LOgs\&H-&Y&M&D-&T.log

findstr /L /I /N /M DBA_SCHEDULER.SET_ATTRIBUTE *
findstr /L /I /N /M /C:”ADD HEARTBEATTABLE” *

Notepad++ Line break after every 10 lines:
Find what: ((?:[^\r\n]+(\R)){10})
Replace with: $1$2

vi editor delete n lines from current line
ndd

default PS1=bash-4.4$ echo $PS1
\s-
\$

echo $PS1
[\d \t \u@\h:\w ]
PS1 – to set the prompt string, ofcourse!
PS2 – to set the subshell prompt string, which by default is “>”, when we enter a command half and press enter, we get the subshell prompt string which is >, then we can complete the command and press enter, it runs. We can change the > to something by modifying this var
PS3 – mostly used in ksh, this is used to define the prompt string displayed by ‘select’ command, which creates a menu, so in the menu, the prompt string for each option can be defined by changing this one
PS4- we can debug a shell script by putting a set -x while running it, this enables to print each command and then its results a + sign is put before each command, this “+” prompt string can be changed by defining the PS4 var.

ls -ltr | awk ‘$6 == “May” && $7 == 8 {print “mv “$9″ /may8_trc/.”}’

find . -type f |xargs ls -l |grep -c “12:40” –> Give the count of files generated at 12:40

Solaris:
/usr/xpg4/bin/grep -Ev ^# db_install.rsp > install.rsp
sed -e ‘/^[ \t]*$/d’ install.rsp > install.rsp.orig

sed -e ‘s/ADD/DROP/g;s/FOREIGN.*$//’ drop_constraints.log >drop_constraints.log.3
| |— Remove rest of line from FOREIGN
|– “-e ADD/DROP is for replace

:%s/.*Hello/Hello/ —> Will remove upto last occurence of Hello from all the lines

sed -i “8r b.sh” 10.95.187.152.log –> to insert at 8th line contents for b.sh
Solaris replace string in multiple files ==============
for i in *.sql
do
j=$i;
sed ‘s/ismdba/dbmonitor/g’ <$i >$i.1
mv $i.1 $j
done

*** As single line ***
for i in *.sql; do j=$i;sed ‘s/ISMDBA/DBMONITOR/g'<$i >$i.1;mv $i.1 $j;done

Linux:

awk ‘c–>0;$0~s{if(b)for(c=b+1;c>1;c–)print r[(NR-c+1)%b];print;c=a}b{r[NR%b]=$0}’ b=3 a=5 s=”conflict with recovery” enterprisedb-2020-08-12.log > cis22_db_Aug12.log ==> Print 3 lines before and 5 lines after the patch

Solaris:
nawk ‘c–>0;$0~s{if(b)for(c=b+1;c>1;c–)print r[(NR-c+1)%b];print;c=a}b{r[NR%b]=$0}’ b=3 a=5 s=”abcd” a.txt

==== Print previous,current and next line for matching pattern ==========
sed -n -e ‘/Dead/{x;1!p;g;$!N;p;D;}’ -e h alert_ncprod.log.1
Sat Jun 01 16:06:37 2019
ORA-00060: Deadlock detected. See Note 60.1 at My Oracle Support for Troubleshooting ORA-60 Errors. More info in file /oracle/app/diag/rdbms/ncprod/ncprod/trace/ncprod_ora_14680.trc.
Sat Jun 01 16:33:24 2019
Sun Jun 02 12:42:11 2019
ORA-00060: Deadlock detected. See Note 60.1 at My Oracle Support for Troubleshooting ORA-60 Errors. More info in file /oracle/app/diag/rdbms/ncprod/ncprod/trace/ncprod_ora_14784.trc.
Sun Jun 02 12:56:22 2019
Sun Jun 02 16:11:59 2019

sed -n -e ‘/Dead/{x;1!p;g;p;}’ -e h alert_ncprod.log.1 –>>> print current n previous line

nawk ‘/SVR4/{print; nr[NR+4]; next}; NR in nr’ alert_BSCSPR.log.1 > test.log ===> Prints 4th

To get script path and log file path..

Code:
crontab -l | awk ‘{for (i=1;i<=NF;i++) { if ($i ~ /^\//) {print $i }}}’

To get names

Code:
crontab -l | grep -o -i -E ‘[^/].sh|[^/].log’

http://blog.hakzone.info/posts-and-articles/editors/understanding-regex-with-notepad/comment-page-1/
https://npp-user-manual.org/docs/searching/#regular-expressions
https://alvinalexander.com/unix/edu/examples/find.shtml

find . -name “p16619894*.zip” 2>&1|grep -v ‘Permission denied’
find . -name alert_BSCSPR.log 2> /dev/null

findstr /S /N /M rocfm *

findstr /L /I /N /M CheckConflictAgainstOHWithDetail *

find . -size +10k -exec ls -ls {} + | sort -n

or in reverse order add an -r :

find . -size +200M -exec ls -ls {} + | sort -nr

finally, your title says find biggest file in directory. You can do that by then piping the code to tail

find . -size +10k -exec ls -ls {} + | sort -n | tail -1 would find you the largest file in the directory and its sub directories.

note you could also sort files by size by using -S, and negate the need for sort. but to find the largest file you would need to use head so

find . -size +10k -exec ls -lS {} + | head -1

=CHAR(RANDBETWEEN(65,90))&CHAR(RANDBETWEEN(97,122))&CHAR(RANDBETWEEN(97,122))&CHAR(RANDBETWEEN(65,90))&RANDBETWEEN(1000,9999)&CHAR(RANDBETWEEN(42,43))

opatch lsinventory|egrep “13417321|18604144|18966843|19835133|19949371|20476776|20887355|21864513|21904072|22496904|22731026|22737974|25136212|25906117|27734982”
Patch 19949371 : applied on Thu Jun 11 20:55:34 GST 2015
19949371
Patch 18604144 : applied on Thu Jun 11 20:50:45 GST 2015
18604144

find. -print |cpio -pdmv
https://www.commandlinefu.com/commands/using/cpio

ASMCMD> lspwusr
Username sysdba sysoper sysasm
SYS TRUE TRUE TRUE
ASMSNMP TRUE FALSE FALSE
ASM_CDC FALSE FALSE TRUE
ASMCMD> orapwusr –add –privilege sysasm asm_ods
Enter password: ***
ASMCMD> lspwusr
Username sysdba sysoper sysasm
SYS TRUE TRUE TRUE
ASMSNMP TRUE FALSE FALSE
ASM_CDC FALSE FALSE TRUE
ASM_ODS FALSE FALSE TRUE

select INSTANCE_NAME,HOST_NAME,STARTUP_TIME from dba_hist_database_instance; History of instance startup based on awr

================= Kill CRM long running sessions =====================

select ‘alter system kill session ‘ || ”” || sid || ‘,’ || serial# || ‘,@’||inst_id|| ”’ immediate;’ from gv$session where last_call_et/60/60 > 1 and username not in ‘SYS’ and status=’ACTIVE’ and type<>’BACKGROUND’;

https://blog.toadworld.com/how-does-oracle-reuse-expired-and-unexpired-undo-extents

The following formula calculates the peak undo blocks generated per second:

SQL> SELECT undoblks/((end_time-begin_time)*86400) “Peak Undo Block Generation” FROM v$undostat WHERE undoblks=(SELECT MAX(undoblks) FROM v$undostat);

Column END_TIME and BEGIN_TIME are DATE data types. When DATE data types are subtracted, the resulting value is the # of days between both dates. To convert days to seconds, you multiply by 86400, the number of seconds in a day (24 hours * 60 minutes * 60 seconds).

The following query calculates the number of bytes needed to handle a peak undo activity:

SELECT (UR * (UPS * DBS)) AS “Bytes”
FROM (SELECT value AS UR FROM v$parameter WHERE name = ‘undo_retention’),
(SELECT undoblks/((end_time-begin_time)*86400) AS UPS
FROM v$undostat
WHERE undoblks = (SELECT MAX(undoblks) FROM v$undostat)),
(SELECT block_size AS DBS
FROM dba_tablespaces
WHERE tablespace_name = (SELECT UPPER(value) FROM v$parameter WHERE name = ‘undo_tablespace’));

SQL> SELECT (UR * (UPS * DBS)) AS “Bytes”
FROM (select max(tuned_undoretention) AS UR from v$undostat),
(SELECT undoblks/((end_time-begin_time)*86400) AS UPS
FROM v$undostat
WHERE undoblks = (SELECT MAX(undoblks) FROM v$undostat)),
(SELECT block_size AS DBS
FROM dba_tablespaces
WHERE tablespace_name = (SELECT UPPER(value) FROM v$parameter WHERE name = ‘undo_tablespace’));

set pagesize 25
set linesize 120

select inst_id,
to_char(begin_time,’MM/DD/YYYY HH24:MI’) begin_time,
UNXPSTEALCNT “# Unexpired|Stolen”,
EXPSTEALCNT “# Expired|Reused”,
SSOLDERRCNT “ORA-1555|Error”,
NOSPACEERRCNT “Out-Of-space|Error”,
MAXQUERYLEN “Max Query|Length”
from gv$undostat
where begin_time between
to_date(’01/07/2019 00:00:00′,’MM/DD/YYYY HH24:MI:SS’)
and
to_date(’01/08/2019 09:00:00′,’MM/DD/YYYY HH24:MI:SS’)
order by inst_id, begin_time;

============ Linux Memory Usage =============
ps aux | sort -rn -k 5,6
ps aux –sort=-vsz,-rss | head -5 —> Gives top memory used processes (- is for desc and + is for asc)
ps -eo size,pid,user,command –sort -size | awk ‘{ hr=$1/1024 ; printf(“%13.2f Mb “,hr) } { for ( x=4 ; x<=NF ; x++ ) { printf(“%s “,$x) } print “” }’
ps aux | awk ‘{print $2, $4, $11}’ | sort -k2r | head -n 15
* Total Memory Used on Server8
ps -eo size,pid,user,command –sort -size | awk ‘{ hr=$1/1024 ; printf(“%13.2f Mb “,hr) } { for ( x=4 ; x<=NF ; x++ ) { printf(“%s “,$x) } print “” }’ | awk ‘{total=total + $1} END {print total}’

======================== IO Stats Solaris========
iostat -xtc 1 20
iostat -xtcpn 1 20
https://www.thegeekdiary.com/12-iostat-examples-for-solaris-performance-troubleshooting/

http://www.unixarena.com/2012/07/performance-issues-on-solaris-page1.html/
From the above output, we need to look at the below columns to identify the I/O bottle necks.

1.asvc_t average service time of active transactions, in milliseconds. If the average service time is exceeding more than 25, then there is issue in the specific disk and need attention.

2.wsvc_t average service time in wait queue, in milliseconds. If the wait queue is crossed more than 10 then we need to check with SAN team regarding this.To more about the rest of the fields ,see man page of iostat.

If you want to create real time disk I/O bottleneck on your test environment, you can use “find / > /dev/null 2>&1 &”.Make sure to kill the process after your test.

https://pcp.io/docs/howto.diskperf.html

exec dbms_workload_repository.create_snapshot;
================================================= DBA_JOBS Status ====================
set pagesize 100
set linesize 120
ttitle –
center ‘Submitted DBMS Jobs’ skip 2

col job format 99999 heading ‘job#’
col subu format a10 heading ‘Submitter’ trunc
col lsd format a5 heading ‘Last|Ok|Date’
col lst format a5 heading ‘Last|Ok|Time’
col nrd format a5 heading ‘Next|Run|Date’
col nrt format a5 heading ‘Next|Run|Time’
col fail format 999 heading ‘Errs’
col ok format a2 heading ‘Ok’

select
job,
log_user subu,
what proc,
to_char(last_date,’MM/DD’) lsd,
substr(last_sec,1,5) lst,
to_char(next_date,’MM/DD’) nrd,
substr(next_sec,1,5) nrt,
failures fail,
decode(broken,’Y’,’N’,’Y’) ok
from
sys.dba_jobs;

==========================================================
tar cvf – .|gzip -c > /tmp/ORACLE_HOME_backup/oracle_software_hostname_date +%Y%m%d.tar.gz
================= Locked Objects in EAI =================
COLUMN owner FORMAT A20
COLUMN username FORMAT A20
COLUMN object_owner FORMAT A20
COLUMN object_name FORMAT A30
COLUMN locked_mode FORMAT A15

SELECT lo.session_id AS sid,
s.serial#,
NVL(lo.oracle_username, ‘(oracle)’) AS username,
o.owner AS object_owner,
o.object_name,
Decode(lo.locked_mode, 0, ‘None’,
1, ‘Null (NULL)’,
2, ‘Row-S (SS)’,
3, ‘Row-X (SX)’,
4, ‘Share (S)’,
5, ‘S/Row-X (SSX)’,
6, ‘Exclusive (X)’,
lo.locked_mode) locked_mode,
lo.os_user_name
FROM v$locked_object lo
JOIN dba_objects o ON o.object_id = lo.object_id
JOIN v$session s ON lo.session_id = s.sid
where o.object_name = upper(‘&object_name’)

ORDER BY 1, 2, 3, 4;

===================== Primary Key Column Referenced in other tables ============
This will give list of tables and columns that are referenced by Primary key in other tables where primary table is CFG_WEBSERVICES
SELECT fk.owner, fk.table_name, col.column_name
FROM dba_constraints pk
JOIN dba_constraints fk
ON pk.constraint_name = fk.r_constraint_name
AND fk.constraint_type = ‘R’
JOIN dba_cons_columns col
ON fk.constraint_name = col.constraint_name
WHERE pk.owner = ‘DSL_LOG’ AND pk.table_name = ‘CFG_WEBSERVICES’ AND pk.constraint_type = ‘P’;

================== Row Lock Details =============
select s.p1raw,o.owner,o.object_name,
dbms_rowid.rowid_create(1,o.data_object_id,f.relative_fno,s.row_wait_block#,s.row_wait_row#) row_id
from v$session s
join dba_objects o on s.row_wait_obj#=o.object_id
join dba_segments m on o.owner=m.owner and o.object_name=m.segment_name
join dba_data_files f on s.row_wait_file#=f.file_id and m.tablespace_name=f.tablespace_name

where s.event like ‘enq: TX%’

SELECT free.tablespace_name TABLESPACE,
ROUND(files.bytes / 1073741824, 2) gb_total,
ROUND((files.bytes – free.bytes) / 1073741824, 2) gb_used,
ROUND(free.bytes / files.bytes * 100) || ‘%’ “%FREE”
FROM
(
SELECT tablespace_name, SUM(bytes) bytes FROM dba_free_space
GROUP BY tablespace_name
) free,
(
SELECT tablespace_name, SUM(bytes) bytes FROM dba_data_files
GROUP BY tablespace_name
) files
WHERE
free.tablespace_name = files.tablespace_name;

select * from ( select POOL, NAME, BYTES, BYTES/1048576 as MBytes from v$sgastat where pool=’shared pool’ order by BYTES desc ) where rownum <= 25;

================ Count of sessions history in database =============
SQL> select instance_number,count(distinct session_id) from dba_hist_active_sess_history where to_char(sample_time,’DD/MM/YY HH24:MI’)=’09/11/18 17:40′ group by instance_number;

INSTANCE_NUMBER COUNT(DISTINCTSESSION_ID)


          1                       842
          2                       838

======================================================================

====== NLS Language and expdp/impdp======

http://neeraj-dba.blogspot.com/2011/04/oracle-nlslang-setting-for.html

PROMPT_COMMAND=msg=$(history 1 | { read x y; echo $y; });user=$(whoami);loginuser=$(who am i); logger -t user_cmd “${msg} by ${user} from [$loginuser]”
ORACLE_HOME=/opt/oracle/product/12cR1/db

[bilprodb4|BSCS_PROD_NEW] $ cat lnsrctl_stop_start.sh
lsnrctl stop LISTENER && lsnrctl start LISTENER
lsnrctl stop LISTENER_BSCSPR && lsnrctl start LISTENER_BSCSPR
lsnrctl stop LISTENER_BSCSPR28 && lsnrctl start LISTENER_BSCSPR28
lsnrctl stop LISTENER_BSCSPR29 && lsnrctl start LISTENER_BSCSPR29
lsnrctl stop LISTENER_BSCSPR30 && lsnrctl start LISTENER_BSCSPR30
lsnrctl stop LISTENER_BSCSPR31 && lsnrctl start LISTENER_BSCSPR31

&& specifies to run second command only if first completes successfully

====

SQL> select volume_name,volume_device from v$asm_volume;

VOLUME_NAME VOLUME_DEVICE


DATASTORE /dev/asm/datastore-134
UTILITY12 /dev/asm/utility12-134
UTILITY11 /dev/asm/utility11-134

[grid@meypmblv-oda11 ~]$ crsctl status resource ora.drivers.acfs -init
NAME=ora.drivers.acfs
TYPE=ora.drivers.acfs.type
TARGET=ONLINE
STATE=ONLINE on meypmblv-oda11

[grid@meypmblv-oda11 ~]$ lsmod | grep ora
oracleacfs 3481019 22
oracleadvm 624636 44
oracleoks 513567 2 oracleacfs,oracleadvm

Max process:
=====================
select username,machine,program,count(*),sql_id,status from v$session where username is not null and username <>’SYS’ group by username,machine,program,sql_id,status order by 4;

SELECT SYS_CONTEXT(‘USERENV’,’CURRENT_SCHEMA’) from DUAL;

SELECT RECOVERY_MODE FROM V$ARCHIVE_DEST_STATUS WHERE RECOVERY_MODE!=’IDLE’;
MANAGED REAL TIME APPLY

https://blog.yannickjaquier.com/oracle/segment-advisor.html — Segment advisor

https://blog.pythian.com/mining-the-awr-to-identify-performance-trends/
$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ File I/O Stats $$$$$$$$$$$$$$$$$$$$$$$$$$$$$
http://ksun-oracle.blogspot.com/2015/04/oracle-112040-awr-tablespace-io-stats.html
http://lefterhs.blogspot.com/2012/10/rac-awr-file-io-stats-report.html
http://ermanarslan.blogspot.com/2014/05/awr-analysis-part-1.html
http://www.rampant-books.com/art_high_av_rd.htm
https://osamaoracle.com/2012/06/28/understand-awr-report/
http://dbmentors.blogspot.com/2011/10/how-to-check-if-io-of-database-is-slow.html –> Nice
$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$

SQL> oradebug setpid 23624
ORA-00070: command setpid is not valid
SQL> oradebug setmypid 23624
ORA-00073: command SETMYPID takes between 0 and 0 argument(s)
SQL> oradebug setospid 23624
Oracle pid: 164, Unix process pid: 23624, image: oracle@meycsglvdb01 (TNS V1-V3)
SQL> oradebug EVENT 10046 trace name context forever, level 12
Statement processed.
SQL> oradebug TRACEFILE_NAME
/oracle/app/diag/rdbms/epprod/epprod1/trace/epprod1_ora_23624.trc

ORADEBUG EVENT 10046 TRACE NAME CONTEXT OFF;

EXEC DBMS_SYSTEM.set_ev(si=>8242, se=>65446, ev=>10046, le=>12, nm=>”);

EXEC DBMS_SYSTEM.set_ev(si=>123, se=>1234, ev=>10046, le=>0, nm=>”);

############ Solaris Print lines based on condition

nawk ‘{if ( $8 >= 20 ) { print }}’ raprodb2_iostat_18.06.24.1400.dat

#
############## OS WATCHER

0 * * * * ps -ef | grep oswbb | grep -v grep || cd /oracle/app/tools/oswbb;nohup ./startOSWbb.sh 60 240 gzip /oracle/app/tools/oswbb/archive/crmprod3 &
http://www.br8dba.com/oswatcher/
http://www.dbaexpert.com/blog/the-better-way-to-configure-oswatcher/

0 * * * * ps -ef | grep oswbb | grep -v grep || cd /oracle/ra1s/tools/oswbb;nohup ./startOSWbb.sh 60 240 gzip /oracle/ra2s/tools/oswbb/archive/orara1s &

https://www.codecrete.net/UnwrapIt/ ===> Unwrap a pl/sql code

select owner,job_name,SESSION_ID,SLAVE_OS_PROCESS_ID,RUNNING_INSTANCE,ELAPSED_TIME from dba_scheduler_running_jobs;

select log_id,job_name,to_char(ACTUAL_START_DATE,’DD/MM/YY HH24:MI’) “Schedule Start”,status,to_char(RUN_DURATION,’HH24:MI’) “Duration”,
session_id from dba_scheduler_job_run_details where job_name like ‘%OPT%’ and log_date >=sysdate-20 order by log_date;

SELECT ARCHIVED_THREAD#, ARCHIVED_SEQ#, APPLIED_THREAD#, APPLIED_SEQ# FROM V$ARCHIVE_DEST_STATUS;
set lines 200
select inst_id,round(WAIT_TIME_MILLI/1000,2) wait_secs, last_update_time when, wait_count “How_many_times||since startup”
from GV$EVENT_HISTOGRAM where event like ‘RFS write%’;

–and round(WAIT_TIME_MILLI/1000,2) > 2 order by 2 desc
–It is useful to order by 3 as well to see the latest bucket first/

select CLIENT_PROCESS,THREAD#,SEQUENCE#,status from v$managed_standby;

select message from v$dataguard_status;

SELECT r.thread#,MAX(R.SEQUENCE#) LAST_SEQ_RECD, MAX(L.SEQUENCE#) LAST_SEQ_SENT FROM
gV$ARCHIVED_LOG R, V$LOG L WHERE
R.DEST_ID=2 AND L.ARCHIVED=’YES’ group by r.thread#;

  1. alter user FOO grant create database link;
  2. alter user FOO grant connect through SOMEDBA;
  3. connect SOMEDBA[FOO]@mydb;
  4. create database link …
  5. connect SOMEDBA@mydb;

6 revoke create database link from FOO;

select
d.inst_id, — INST_ID NUMBER
d.indx, — EVENT# NUMBER
d.kslednam, — EVENT VARCHAR2(64)
s.kslsesmaxdur, — WAIT_TIME_MILLI NUMBER
s.kslsesval, — WAIT_COUNT NUMBER
decode( — LAST_UPDATE_TIME VARCHAR2(64)
s.kslsesval, 0, NULL, s.kslsestimestamp)
from
x$kslseshist s,
x$ksled d
where
s.kslsesenum = d.indx

http://oracledbascriptsfromajith.blogspot.in/2012/10/find-details-of-troublesome-sql-that.html
Solaris Swap usage:
/sbin/swap -l -h
/sbin/swap -s -h
total: 2.9G allocated + 1.1G reserved = 4.0G used, 7.6G available

for i in G M K
do
du -ah | grep [0-9]$i | sort -nr -k 1
done | head -n 10

ls -l | sort +4rn | more —> List files order by size
ls -s | sort -nr | more
ls -lh | grep M |sort +4rn | head -40

$ ls -l | sort +4rn | more

Note that this command sorts files in a list by the character that is in the fourth field, starting from the left.

set pagesize 0 ==> Will help you get sqlplus output into variable and display in single line.

=========== Use aggregate function in where clause=======
select segment_name,sum(bytes)/1024/1024/1024 “Size” from dba_segments group by segment_name having sum(bytes)/1024/1024/1024 between 1 and 2;
========== Sqloutput into shell variable==========

!/bin/ksh

ORACLE_HOME=/oracle1/product/11.2.0.4/db
ORACLE_SID=tibtst
PATH=$PATH:$ORACLE_HOME/bin
export ORACLE_HOME ORACLE_SID PATH
count=0
export count
count=sqlplus -S /nolog <<EOF conn / as sysdba set heading off feedback off select sum(bytes)/1024/1024/1024 from dba_segments where owner='SYS'; exit; EOF
if [ “${count}” -le 10 ]
then
echo ” Size is less than 10″
echo “Size of SYS :” ${count}
else
echo “Size is greater than 10 :” ${count}
fi

===============================================

COL SQL_TEXT format a45

select se.sql_id, substr(sql_text,1,200) sql_text
from v$sql sq, v$session se
where se.username=’DWHDB’ and upper(sql_text) like ‘%DUccd%’;
and sql_text not like ‘%/* MYCOMMENT1 */%’ ;

Please provide output of below query.

select (a.x+b.y+c.z)/(102410241024) DB_SIZE from
(select sum(bytes) x from dba_data_files)a,
(select sum(bytes)y from v$tempfile)b,
(select sum(bytes)z from v$log)c;

============= Avoid ^H characters while pressing backspace ===========
http://tldp.org/HOWTO/Keyboard-and-Console-HOWTO-5.html
stty erase
stty erase ‘^H’
================ Excel extract file name alone from path ==============
=MID(B1,FIND(““,SUBSTITUTE(B1,”/”,”“,LEN(B1)-LEN(SUBSTITUTE(B1,”/”,””))))+1,LEN(B1))

Get only the path

=LEFT(B2,FIND(“?”,SUBSTITUTE(B2,”/”,”?”,LEN(B2)-LEN(SUBSTITUTE(B2,”/”,””)))))

Linux: Create file of a given size
Sometimes you need a file of some size (possibly for testing purposes). On Linux, you can use dd to create one.

Let’s say you want a 23 MB file called test.file. You would then run this:

COPY
dd if=/dev/zero of=test.file bs=1048576 count=23
The block size (bs) is set to 1 MB (1024^2 bytes) here, writing 23 such chunks makes the file 23 MB big.\
Adjust to your needs.

Linux: Quickly create large files for testing
To create a 10 GB file:

COPY
fallocate -l 10G huge_file.dat

======================== set lines and delete lines from vi editor ============
set nu

:6,47d

cd $ORACLE_HOME/lib
ln -s /usr/lib/libnsrora.so libobk.so

When RMAN throws error RMAN-19554 error allocating device device type sbt_tape device name

http://nixys.fr/blog/?p=1665 –> Bash command history editor

http://mkkoracleapps.blogspot.in/2012/11/how-to-interpret-accountstatus-column.html

===============================crsctl formatted output: nawk to be used in solaris… awk in linux========================================================
crsctl status res |grep -v “^$”|nawk -F “=” ‘BEGIN {print ” “} {printf(“%s”,NR%4 ? $2″|” : $2″\n”)}’|\
sed -e ‘s/ *, /,/g’ -e ‘s/, /,/g’|nawk -F “|” ‘BEGIN { printf “%-40s%-35s%-20s%-50s\n”,\
“Resource Name”,”Resource Type”,”Target “,”State” }{ split ($3,trg,”,”) split ($4,st,”,”)}\
{for (i in trg) {printf “%-40s%-35s%-20s%-50s\n”,$1,$2,trg[i],st[i]}}’

crsctl status res |grep -v “^$”|awk -F “=” ‘BEGIN {print ” “} {printf(“%s”,NR%4 ? $2″|” : $2″\n”)}’|\
sed -e ‘s/ *, /,/g’ -e ‘s/, /,/g’|awk -F “|” ‘BEGIN { printf “%-40s%-35s%-20s%-50s\n”,\
“Resource Name”,”Resource Type”,”Target “,”State” }{ split ($3,trg,”,”) split ($4,st,”,”)}\

{for (i in trg) {printf “%-40s%-35s%-20s%-50s\n”,$1,$2,trg[i],st[i]}}’

Please refer: CRS 10gR2/ 11gR1/ 11gR2 Diagnostic Collection Guide ( Doc ID 330358.1 ) —> To Collect diagnostic info when tfactl is not working
run from root user
set ORACLE_HOME= for example: set ORACLE_HOME=D:\app\11.2.0\grid
set PATH=%PATH%;%ORACLE_HOME%\perl\bin
perl %ORACLE_HOME%\bin\diagcollection.pl –collect –crshome %ORACLE_HOME%

The following .zip files will be generated in the current directory and need to be uploaded:

crsData_.zip,
ocrData_.zip,
oraData_.zip,
coreData_.zip (only –core option specified)

For chmosdata*:

perl %ORACLE_HOME%\bin\diagcollection.pl –collect –crshome %ORACLE_HOME%

Display the Number of CPUs in IBM AIX and HP/UX
In AIX and HP/UX the lsdev command can be used to see the number of CPUs on a server. This is very important,
because it shows the number of Parallel Query processes that can be used on that server. That, in turn, limits
the value that you can use following the DEGREE keyword in a Parallel Query or DML statement. The following example is taken from an AIX server, and shows that the server has four CPUs:

L 5-6

lsdev -C|grep Process|wc ?l
4

Display Number of CPUs in Solaris
In Solaris, the prsinfo command can be used to count the number of CPUs on the processor. Here we see that we have two CPUs on this server:

L 5-7

psrinfo -v|grep “Status of processor”|wc -l
2

Display Number of CPUs in Linux
To see the number of CPUs on a Linux server, you can cat the /proc/cpuinfo file. In the example here we see that our Linux server has four CPUs:

L 5-8

cat /proc/cpuinfo|grep processor|wc -l
4

Remember that we need to know the number of CPUs on our server because the vmstat runqueue value must never exceed the number of CPUs.
A runqueue value of 32 is perfectly acceptable for a 36-CPU server, while a value of 32 would be a serious problem for a 24-CPU server.

http://allappsdba.blogspot.in/2012/04/to-check-library-cache-lock-contention.html –> Library Cache Locks scripts

Sessions generating redo:
SELECT s.sid, s.serial#, s.username, s.program,
i.block_changes
FROM v$session s, v$sess_io i
WHERE s.sid = i.sid and i.block_changes>10000
ORDER BY 5, 1, 2, 3, 4;

SELECT s.sid, s.serial#, s.username, s.program,
t.used_ublk, t.used_urec
FROM v$session s, v$transaction t
WHERE s.taddr = t.addr
ORDER BY 5 desc, 6 desc, 1, 2, 3, 4;

select sql.sql_text sql_text, t.USED_UREC Records, t.USED_UBLK Blocks,
(t.USED_UBLK*8192/1024) KBytes from v$transaction t,
v$session s,
v$sql sql
where t.addr = s.taddr
and s.sql_id = sql.sql_id
and s.username =’&USERNAME’;

SELECT NAME, child_number, DATATYPE_STRING,VALUE_STRING
FROM v$sql_bind_capture
WHERE sql_id=’&SQL_ID’ order by child_number;

select * from table (dbms_xplan.display_cursor(‘&SQL_ID’, 1, format => ‘TYPICAL +PEEKED_BINDS’));

select SQL_ID,LAST_CAPTURED,VALUE_STRING from DBA_HIST_SQLBIND where SQL_ID=’0uf7cy3tdwpmf’ order by 2;

SET PAUSE ON
SET PAUSE ‘Press Return to Continue’
SET PAGESIZE 60
SET LINESIZE 300

COLUMN sql_text FORMAT A120
COLUMN sql_id FORMAT A13
COLUMN bind_name FORMAT A10
COLUMN bind_value FORMAT A26

SELECT
sql_id,
t.sql_text sql_text,
b.name bind_name,
b.value_string bind_value
FROM
v$sql t
JOIN
v$sql_bind_capture b using (sql_id)
WHERE
b.value_string is not null
AND
sql_id=’&sqlid’
/

col sid format a4
col username format a5
col sql_hash_value format 99999999
col sqlid format a14
col sql_child_number format 9
col name format a4
col value_string format a8
col last_captured format a9
select s.sid,
s.username,
–sq.sql_text,
s.sql_hash_value,
s.sql_id,
s.sql_child_number,
spc.name,
spc.value_string,
last_captured
from v$sql_bind_capture spc, v$session s,v$sql sq
where s.sql_hash_value = spc.hash_value
and s.sql_address = spc.address
and sq.sql_id=s.sql_id
and spc.was_captured=’YES’
and s.type<>’BACKGROUND’
and s.status=’ACTIVE’;

col sql_id head “SQL ID” form a13
col child_number head “Chi|ld|No” form 9999
col name head “Bind|Name” form a10
col position head “Bind|Posi|tion” form 9999
col datatype head “Data|type|ID” form 9999
col datatype_string head “Datatype” form a14
col precision head “Precision” form 9999
col scale head “Scale” form 9999
col max_length head “Max Bind|Length” form 9999
col was_captured head “Bind|value|Capt|ured|?” form a5
col lc head “Last|Captured” form a18
col value_string head “Value|of Bind” form a15

select SQL_ID,
CHILD_NUMBER,
NAME, POSITION,
DATATYPE, DATATYPE_STRING,
PRECISION, SCALE, MAX_LENGTH,
WAS_CAPTURED, to_char(LAST_CAPTURED,’dd-mon-yy hh24:mi:ss’) lc,
VALUE_STRING
from V$SQL_BIND_CAPTURE
where sql_id=’&1′
order by 1,2,4
/

YESTERDAY=TZ=GMT+24 date +%d-%m-%Y; echo $YESTERDAY –> For Calculating yesterday in solaris shell

HTML Reporting with colors (Using of entmap off/on is important)
http://stelliosdba.blogspot.in/2012/06/html-reports-using-sqlplus.html
http://my-oracle-10g-tips.blogspot.in/2008/10/using-colors-and-fonts-in-html-reports.html

========== Cannot type @ in HPUX============
$ stty -a
speed 38400 baud; line = 0;
rows = 52; columns = 197
min = 4; time = 0;
intr = DEL; quit = ^\; erase = ^H; kill = @
eof = ^D; eol = ^@; eol2 ; swtch ———
stop = ^S; start = ^Q; susp ; dsusp
werase ; lnext
-parenb -parodd cs8 -cstopb hupcl cread -clocal -loblk -crts
-ignbrk brkint -ignpar -parmrk -inpck istrip -inlcr -igncr icrnl -iuclc
ixon ixany -ixoff -imaxbel -rtsxoff -ctsxon -ienqak
isig icanon -iexten -xcase echo -echoe echok -echonl -noflsh
-echoctl -echoprt -echoke -flusho -pendin
opost -olcuc onlcr -ocrnl -onocr -onlret -ofill -ofdel -tostop tab3

$ stty intr ^C kill ^U erase ^? susp ^Z —> Type this and retry

=============================

ls -ltr *.dmp | awk ‘{ total += $5 }; END { print total/1024/1024/1024 ” GB”}’ –> Gives the total size of .dmp files
find . -type f -mtime 10 -exec ls -l {} \; | awk ‘{sum += $5} END{print sum}’

SQL> SHOW ERRORS VIEW

http://datavirtualizer.com/oracle-sqlnet-wait-events/

SELECT SUBSTR(owner||’.’||segment_name,1,50) OBJECT,
EXTENTS, MAX_EXTENTS
FROM DBA_SEGMENTS
WHERE MAX_EXTENTS – EXTENTS < 1
ORDER BY EXTENTS;

======================== LOB Partitions Details===================
select s.segment_name, s.partition_name, bytes/1048576 “Size (MB)”
from dba_segments s, dba_lobs l
where s.segment_name = l.segment_name
and s.segment_name in (”)
order by s.segment_name, s.partition_name;

select l.column_name, l.partition_name, l.lob_name, l.lob_partition_name, s.bytes/1048576 “Size (MB)”
from dba_segments s, dba_lob_partitions l
where s.segment_name = l.lob_name
and s.owner=’GLOGOWNER’
and l.table_name =’I_TRANSACTION’
and l.lob_partition_name = s.partition_name;

select resource_name, current_utilization, max_utilization from v$resource_limit where resource_name in (‘processes’,’sessions’);

select SESSIONID||’,’||ENTRYID||’,’||STATEMENT||’,’||TIMESTAMP#||’,’||USERID||’,’||USERHOST||’,’||TERMINAL||’,’||ACTION#||’,’||RETURNCODE||’,’||OBJ$CREATOR||’,’||OBJ$NAME
from system.aud$ where USERID=’DP990′ and ntimestamp# >= trunc(sysdate)-1;

mailx -s “Listener log from Node2” -a /oracle/app/diag/tnslsnr/meyslclvdb02/listener/trace/listener.log.Node2.gz sunil.potluri@du.ae –> Mailx in linux

:%s/\/u01\/oracle\/dba/\/oracle\/scripts/g
will replace all appearences of /u01/oracle/dba with /oracle/scripts

bash-3.2$ ls -lR *.dmp | awk ‘{total += $5} END {print “Total:”, total/1024/1024/1024}’
Total: 127.6

grep -vE ^# inputfile > outputfile –> To remove lines starting with #

================
For any normal user (not a part of “oinstall” / “dba” groups ) to be able to run sqlplus and access an ORACLE database , read/execute permissions are required for these 4 directories :
$ORACLE_HOME/bin
$ORACLE_HOME/lib
$ORACLE_HOME/oracore
$ORACLE_HOME/sqlplus
In addition, these 4 parameters should also be set in the user’s environment (.profile) :
ORACLE_HOME,
LD_LIBRARY_PATH,
ORACLE_SID,
PATH

===========================
https://linuxacademy.com/blog/linux/conditions-in-bash-scripting-if-statements/
===============Command to get all failed logins from Listener.log==============

NF==0 also counts for any status/start/stop as last column will be 0

cat listener.log | grep 18-JAN | grep CONNECT | awk -F* ‘{print $3}’ | grep -o “192.*)” | grep -v 192.168.100.99 | awk -FPORT ‘{print $1}’ | sort | uniq -c

cat listener.log |grep “PORT=18711” | awk ‘{print $8}’ | grep ADDRESS | sed ‘s/^.((HOST.)/\1/g’|sed ‘s/.PORT=.*//g’| sort | uniq -c

grep “PORT=18711” listener.log| awk ‘{print $8}’ | grep ADDRESS | sed ‘s/^.((HOST.)/\1/g’|sed ‘s/.PORT=.*//g’| sort | uniq -c

cat listener_bscspr28.log | awk ‘{ if ( $NF != 0 ) print $0 }’

cat listener.log | awk ‘{ if [ ( $NF != 0 || $NF != 2017 ) ] print $0,” “,$1,” “,$2}’ > listener_node1.log

cat listener_bscspr28.log| cut -f1,2,6,12 -d ‘ ‘|cut -f1,4,5 -d ‘(‘|cut -f1,4 -d ‘)’ > listener_bscspr28.log.May31

cat listener_bscspr34.log| cut -f6 -d ‘ ‘|cut -f4,5 -d ‘(‘|cut -f1 -d ‘)’|egrep “HOST”

cat listener_bscspr28.log|grep “27-FEB-2018 14:47″|awk -F”*” ‘{ if ( $NF == 0 ) print $0 }’|wc -l –> To get count of successful connections made during a time

cat listener.log |awk -F”*” ‘{if ( $NF != 0 ) print $NF}’ > test.log

cat listener_bscspr28.log|grep “27-FEB-2018 19:16″|awk -F”*” ‘{ if ( $NF == 0 ) print $0 }’|wc -l

for i in ls -ltr listener_bscspr28.log.*|tail|awk '{print $9}'; do echo $i; gzcat $i| cut -f6 -d ‘ ‘|cut -f4,5 -d ‘(‘|cut -f1 -d ‘)’|egrep “HOST”|sort|uniq; done

for i in {35..45}
do
echo “20-JUN-2018 17:”$i
cat listener_bscspr29.log|grep “20-JUN-2018 17:”$i|awk -F”*” ‘{ if ( $NF == 0 ) print $0 }’|wc -l
done

for i in 0{20..30} –> For leading 0 minutes 00 to 09
do
echo “08-JUL-2018 16:”$i
grep “08-JUL-2018 20:00” listener_bscspr28.log | grep establish | wc -l
done

for i in {20..30}
do
echo “08-JUL-2018 16:”$i
grep “08-JUL-2018 16:”$i listener_bscspr30.log| grep establish | wc -l
done

cat listener.log|grep “15-JUN-2018 15:38″|awk -F”*” ‘{ if ( $NF == 0 ) print $0 }’|wc -l

cat listener_bscspr34.log| grep -v SERVICE |cut -f4,6 -d ‘ ‘|cut -f6,7,10 -d ‘(‘|cut -f1,2,5 -d ‘)’|grep -v status|tail|tr -d ‘()’ (tr will remove any occurences of ( or )
http://qdosmsq.dunbar-it.co.uk/blog/2018/06/snorkelling-in-the-oracle-listener-logs/
grep -o ‘[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}’ | sort | uniq -c | sort -n

===== To get machines connected to BSCS databases in last 10days using all the listeners. LISTENERS.txt has entries in capital letters.. tr is used to convert to case
#!/bin/bash
ps -ef|grep tns|grep -v grep|awk ‘{print $10}’|grep -v inherit
for i in cat listeners.txt
do
log_file=echo $i|tr '[:upper:]' '[:lower:]'
for j in $(ls -ltr ${log_file}.log.*.gz|tail|awk ‘{print $9}’)
do
gzcat $j| cut -f6 -d ‘ ‘|cut -f4,5 -d ‘(‘|cut -f1 -d ‘)’|egrep “HOST”|sort|uniq >>detail.txt
done
done
cat detail.txt|sort|uniq>details_final.txt

[bilprodb4|BSCS_PROD_NEW] $ cat listener_bscspr34.log| grep -v SERVICE |cut -f4,6 -d ‘ ‘|cut -f6,7,10 -d ‘(‘|cut -f1,2,5 -d ‘)’|grep -v status|tail|sed -e “s/(//g” -e “s/)//g”
HOST=__jdbc__USER= HOST=172.23.129.155
HOST=__jdbc__USER= HOST=172.23.129.155
HOST=__jdbc__USER= HOST=172.23.129.155
HOST=__jdbc__USER= HOST=172.23.129.155
HOST=__jdbc__USER= HOST=172.23.129.155
HOST=__jdbc__USER= HOST=172.23.129.155
HOST=__jdbc__USER= HOST=172.23.129.155
HOST=__jdbc__USER= HOST=172.23.129.155
HOST=__jdbc__USER= HOST=172.23.129.155
HOST=__jdbc__USER= HOST=172.23.129.155

================================= Mining Listener Log for connection count=======================================
j===> for Hours
i===> for Minutes
for ((j=17; j<=22; j++));
do
for ((i=0; i<=59; i++));
do
sT=(printf "${j}:%02d " $i);
echo “18-SEP-2018 ${sT}”
grep “18-SEP-2018 “${sT}”” listener_bscspr28.log.19Sep2018083007| grep establish | wc -l
done
done

=MOD(ROW(A1),2)=0 ===> Gives TRUE/FALSE… Filter the rows based on TRUE and then proceed to cut/paste

select count(*),to_char(SAMPLE_TIME,’DD/MM/YY HH24:MI’) from v$active_session_history where to_char(SAMPLE_TIME,’DD/MM/YY HH24:MI’) between ’27/02/18 13:00′ and ’27/02/18 14:50′ group by to_char(SAMPLE_TIME,’DD/MM/YY HH24:MI’) order by 2
31-MAY-2017 08:30:11 0
31-MAY-2017 08:30:11 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:11 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:11 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:11 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:11 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:11 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:11 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:11 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:11 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:11 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:11 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:11 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:11 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:11 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:12 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:12 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:12 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:13 (HOST=172.21.8.11) 0
31-MAY-2017 08:30:13 (HOST=172.21.8.11) 0

================================================================================

==================== Solaris list processes running more than x days_back=========================
ps -A -o etime,pid,user,args|grep -v grep|grep -i tbs_free|grep -v ^” “|grep -v ^” 1-“|grep -v ^” 2-“|awk ‘{print “kill -9 “$2}’ > killprcs.sh

http://itknowledgeexchange.techtarget.com/itanswers/killing-old-unix-processes/

Note: the etime has the following format: “dd-hh:mm:ss”
where:
dd – is the number of days elapsed since the process started
hh – is hours; mm – is minutes and ss – is seconds.
Then: “4-08:58:37” means this process is running for 4 days; 8 hours; 58 minutes and 37 seconds.

Hence if you want to exclude the current then grep for the first column having double spaces at the start.
For ex:
For excluding current day process…. grep ” ” –> note double spaces
For excluding process of a day old… grep ” 1-” –> note single space before 1 followed by hyphen “-“

For excluding process of 2 days old.. grep ” 2-” –> note single space before 2 followed by hyphen “-“

============ To remove ^M character at the end of file and create a new file

nohup sed -e ‘s/^M//g’ stg.m_edms_delta.txt > stg.m_edms_delta.txt.1 &

Provide instance alert_{$ORACLE_SID}.log, lmon, lmd, lms, ckpt, lgwr, lck, dia, lmhb(11g only), and all others traces that are modified around incident time. A quick way to identify all traces and tar them up is to use incident time with the following example:

$ grep “2010-09-02 03” *.trc | awk -F: ‘{print $1}’ | sort -u |xargs tar cvf trace.hostname.date +%Y%m%d%H%M%S.tar

$ gzip trace*.tar

zipgrep –> to find strings in zip files

ls -lt *.arc|tail +10 |xargs rm -f —
Delete all files keeping most recent 10 files on disk

ls -ltr *.dmp |awk ‘{print “gzip ” $9}’
gzip UDR_KEY_HOME_201611_09.dmp
gzip UDR_KEY_HOME_201611_08.dmp
gzip UDR_KEY_HOME_201611_05.dmp
gzip UDR_KEY_HOME_201611_07.dmp
gzip UDR_KEY_HOME_201611_06.dmp
gzip UDR_KEY_HOME_201611_04.dmp
gzip UDR_KEY_HOME_201611_03.dmp
gzip UDR_KEY_HOME_201611_02.dmp
gzip UDR_KEY_HOME_201611_01.dmp
gzip UDR_LT_201611_1_05_part1.dmp
gzip UDR_LT_201611_1_04_part1.dmp
gzip UDR_LT_201611_1_08_part1.dmp
gzip UDR_LT_201611_1_06_part1.dmp
gzip UDR_LT_201611_1_07_part1.dmp
gzip UDR_LT_201611_1_03_part1.dmp
gzip UDR_LT_201611_1_01_part1.dmp
gzip UDR_LT_201611_1_02_part1.dmp

ALTER SYSTEM SET EVENTS ‘10046 TRACE NAME CONTEXT FOREVER, LEVEL 12’;

alter session set events ‘10046 trace name context off’;

UNIX95= ps -eo vsz,comm,args | sed 1d | sort -rn | more
ps -eo pid,ppid,cmd,%mem,%cpu –sort=-%mem | head

Find Oracle Home, when database is running:

AIX

$ ps -ef | grep smon
oraprod 145376 1 0 JUN 1 – 0:12 ora_smon_DBA1

From the above we could see the SID of this database is DBA1, now run the following to find the ORACLE_HOME

$ ls -l /proc/145376 /DBA1
lr-x—— 2 oraprod dba 0 Mar 23 19:31 DBA1 -> /u01/app/oracle/product/10.2.0/db/

The ORACLE_HOME is /u01/app/oracle/product/10.2.0/db

Linux (Redhat, SUSE, OEL & CentOS) & Solaris (Sparc 64, Sun x86 and x86-64)

$ pgrep -lf smon
15791 ora_smon_DBA1

From the above we could see the SID of this database is DBA1, now run the following to find the ORACLE_HOME

$ pwdx 15791
15791: /u01/app/oracle/product/10.2.0/db/dbs

The ORACLE_HOME is /u01/app/oracle/product/10.2.0/db

HPUX

$ ps -ef | grep smon

oraprod 25611 1 0 JUN 1 ? 0:24 ora_smon_DBA1

From the above we could see the SID of this database is DBA1 and process id is 25611, now run the following to find the ORACLE_HOME

$ pfiles 25611 | grep bin
25611: /u01/app/oracle/product/10.2.0/db/bin/oracle

The ORACLE_HOME is /u01/app/oracle/product/10.2.0/db

==================
cat lnsrctl_stop_start.sh
lsnrctl stop LISTENER && lsnrctl start LISTENER
lsnrctl stop LISTENER_BSCSPR && lsnrctl start LISTENER_BSCSPR
lsnrctl stop LISTENER_BSCSPR28 && lsnrctl start LISTENER_BSCSPR28
lsnrctl stop LISTENER_BSCSPR29 && lsnrctl start LISTENER_BSCSPR29
lsnrctl stop LISTENER_BSCSPR30 && lsnrctl start LISTENER_BSCSPR30
lsnrctl stop LISTENER_BSCSPR31 && lsnrctl start LISTENER_BSCSPR31

select sample_time, sql_id, event, current_obj#,count(*) from gv$active_session_history
where sample_time between to_date (’07-FEB-2017 18:00:00′,’DD-MON-YYYY HH24:MI:SS’) and
to_date (’07-FEB-2017 19:00:00′,’DD-MON-YYYY HH24:MI:SS’)
group by sample_time, sql_id, event, current_obj#
order by sample_time

=============
DG Broker:

  1. Enable Log shipping

2 .1 EDIT DATABASE ‘DBNAME_SB’ SET STATE=’APPLY-ON’;
2 .2 EDIT DATABASE ‘DBNAME’ SET STATE=’TRANSPORT-ON’;
2 .3 EDIT CONFIGURATION SET PROTECTION MODE AS ‘MAXAVAILABILITY’

Disable Log shipping:
EDIT DATABASE ‘DBNAME_SB’ SET STATE=’APPLY-OFF’;
edit database ‘DBNAME’ set state=’LOG-TRANSPORT-OFF’;

For disable the log transport when the database is in maximum performance mode

DGMGRL> edit database ‘DBNAME’ set state=TRANSPORT-OFF;

DGMGRL> edit database ‘DBNAME’ set state=TRANSPORT-ON;

Disable the log apply in Standby database.

DGMGRL> EDIT DATABASE SCUSTODIA SET STATE=’APPLY-OFF’; Standby

Disable to the log transport on the Primary

DGMGRL> EDIT DATABASE CUSTODIA SET STATE=’LOG-TRANSPORT-OFF’; Primay

=======================================
Enable Trace for session:

alter session set tracefile_identifier=’SQL_TRACE’;
alter session set timed_statistics = true;
alter session set statistics_level=all;
alter session set max_dump_file_size = unlimited;
alter session set events ‘10046 trace name context forever,level 12’;

oradebug setospid
oradebug unlimit
oradebug event 10046 trace name context forever, level 12
— wait for 15 min
oradebug event 10046 trace name context off
oradebug tracefile_name

select p.spid from v$process p, v$session s where p.addr=s.paddr and s.sid=&sid;

col sid format 999999
col username format a20
col osuser format a15
select b.spid,a.sid, a.serial#,a.username, a.osuser,a.sql_id,a.blocking_session
from v$session a, v$process b
where a.paddr= b.addr
and b.spid=’&spid’
order by b.spid;

select sid,serial#,username,program,sql_id,blocking_session from v$session where sid=

** Get Mountpoint details alone from path in filename *
select file_name,SUBSTR(file_name,1,INSTR(file_name,’/’,-1,2)) from dba_data_files where tablespace_name=’SYSTEM’ order by 1;

SUBSTR(FILE_NAME,1,INSTR(FILE_NAME,’/’,-1,2))

/oradata201/
/oradata202/
/oradata203/
/oradata204/
/oradata205/
/oradata206/

Add datafile to tablespace***
set linesize 300 pages 300
col file_name for a60
col File for a40
alter session set nls_date_format=’DD/MM/YY HH24:MI’;
select file_name,SUBSTR(file_name,(INSTR(file_name,’/’,-1,1)+1)) “File”,df.bytes/1024/1024/1024,creation_time,autoextensible “Extend”,maxbytes/1024/1024/1024 “Max Size” from dba_data_files df,v$datafile
where file#=file_id and tablespace_name=’&TSName’ order by creation_time,file_id;

set linesize 300 pages 300
col file_name for a60
col File for a40
select file_name,SUBSTR(file_name,(INSTR(file_name,’/’,-1,1)+1),length(file_name)) “File”,df.bytes/1024/1024/1024,creation_time,autoextensible “Extend”,maxbytes/1024/1024/1024 “Max Size” from dba_data_files df,v$datafile
where file#=file_id and tablespace_name=’&TSName’ order by creation_time,file_id;

select SUBSTR(name,1,(INSTR(name,’/’,-1,1)-1)) “Path”,SUBSTR(name,(INSTR(name,’/’,-1,1)+1),length(name)) “File” from v$tempfile;

* TEMP*
set linesize 300 pages 300
col file_name for a60
col File for a40
select file_name,SUBSTR(file_name,(INSTR(file_name,’/’,-1,1)+1)) “File”,df.bytes/1024/1024/1024,creation_time,autoextensible “Extend”,maxbytes/1024/1024/1024 “Max Size” from dba_temp_files df,v$tempfile
where file#=file_id and tablespace_name=’&TSName’ order by creation_time,file_id;

select file_name,bytes/1024/1024/1024,autoextensible,maxbytes/1024/1024/1024 from dba_data_files where tablespace_name = upper(‘&TS_NAME’) order by 1;
select start_time,end_time,input_type,status,output_bytes/1024/1024/1024,elapsed_seconds/60/60 “Hrs” from v$rman_backup_job_details where start_time>=sysdate-7 and INPUT_TYPE <>’ARCHIVELOG’;

Tablespace and log segement details
select ds.segment_name,sum(bytes)/1024/1024/1024,ds.owner,table_name from dba_segments ds, dba_lobs dl where ds.tablespace_name=’EAI_DATA_SMALL_AUTO’ and dl.segment_name(+)=ds.segment_name
2 having sum(bytes)/1024/1024/1024 > 5 group by ds.segment_name,ds.owner,table_name;

SEGMENT_NAME SUM(BYTES)/1024/1024/1024 OWNER TABLE_NAME


EAI_LOG_HIST 10.8183594 EAIPR3
SYS_LOB0000336227C00010$$ 53.6240234 EAIPR3 EX_EAI_REPUBLISH
SYS_LOB0000333219C00007$$ 1703.67285 EAIPR3 EAI_CORRELATION
EX_EAI_LOG 14.6757813 EAIPR3
SYS_LOB0000415258C00017$$ 40.4980469 EAI EVENT_QUEUE
EAI_SOURCE_REQUESTS 129.270508 EAIPR3
EAI_LOG 514.266663 EAIPR3
EAI_LOG_KEYS 18.6806641 EAIPR3
EAI_REPUBLISH 17.2910156 EAIPR3
EAI_CORRELATION 65.1855469 EAIPR3
SYS_LOB0000336077C00010$$ 62.2929688 EAIPR3 EAI_REPUBLISH

11 rows selected.

select ds.segment_name,sum(bytes)/1024/1024/1024,ds.owner,table_name from dba_segments ds, dba_lobs dl where ds.tablespace_name=’EAI_DATA_SMALL_AUTO’ and dl.segment_name=ds.segment_name(+)
2 having sum(bytes)/1024/1024/1024 > 5 group by ds.segment_name,ds.owner,table_name;

SEGMENT_NAME SUM(BYTES)/1024/1024/1024 OWNER TABLE_NAME


SYS_LOB0000336227C00010$$ 53.6240234 EAIPR3 EX_EAI_REPUBLISH
SYS_LOB0000333219C00007$$ 1703.69238 EAIPR3 EAI_CORRELATION
SYS_LOB0000415258C00017$$ 40.4980469 EAI EVENT_QUEUE
SYS_LOB0000336077C00010$$ 62.2929688 EAIPR3 EAI_REPUBLISH

========================== TEMP Usage ===========================

SELECT sysdate “TIME_STAMP”, vsu.username, vsu.sql_id, vsu.tablespace,
vsu.usage_mb, vst.sql_text, vp.spid
FROM
(
SELECT username, sqladdr, sqlhash, sql_id, tablespace, session_addr,
sum(blocks)*8192/1024/1024 “USAGE_MB”
FROM v$sort_usage
HAVING SUM(blocks)> 10000
GROUP BY username, sqladdr, sqlhash, sql_id, tablespace, session_addr
) “VSU”,
v$sqltext vst,
v$session vs,
v$process vp
WHERE vsu.sql_id = vst.sql_id
AND vsu.sqladdr = vst.address
AND vsu.sqlhash = vst.hash_value
AND vsu.session_addr = vs.saddr
AND vs.paddr = vp.addr
AND vst.piece = 0;

select sid, substr(program,1,19) prog, address, hash_value, b.sql_id, sql_child_number child, plan_hash_value, executions execs,
(elapsed_time/decode(nvl(executions,0),0,1,executions))/1000000 avg_etime
from v$session a, v$sqlarea b
where status = ‘ACTIVE’
and username is not null
and a.sql_id = b.sql_id
and audsid != SYS_CONTEXT(‘userenv’,’sessionid’) and a.sql_id=’&sql_id’;

select sid, sql_id, prev_sql_id from v$session where sid=<>; give sid from previous output and look for previous sql

* Query to check sessions consuming TEMP*

select sample_time,sql_id,max(TEMP_SPACE_ALLOCATED)/(102410241024) gig
from DBA_HIST_ACTIVE_SESS_HISTORY
where
to_char(sample_time,’DD/MM/YY HH24:MI:SS’) between ’13/03/18 19:00:00′ and ’13/03/18 21:00:00′)– > sysdate-2 and
TEMP_SPACE_ALLOCATED > (1010241024*1024)
group by sample_time,sql_id order by sample_time;

select sample_time,sql_id,max(TEMP_SPACE_ALLOCATED)/(102410241024) gig
from DBA_HIST_ACTIVE_SESS_HISTORY
where
to_char(sample_time,’DD/MM/YY HH24:MI:SS’) between ’13/03/18 19:00:00′ and ’13/03/18 21:00:00′ and
TEMP_SPACE_ALLOCATED > (1010241024*1024)
group by sample_time,sql_id order by sample_time;

SELECT sysdate “TIME_STAMP”, vsu.username, vs.sid, vp.spid, vs.sql_id, vst.sql_text, vsu.tablespace,
sum_blocksdt.block_size/1024/1024 usage_mb FROM ( SELECT username, sqladdr, sqlhash, sql_id, tablespace, session_addr, — sum(blocks)8192/1024/1024 “USAGE_MB”,
sum(blocks) sum_blocks
FROM v$sort_usage
HAVING SUM(blocks)> 1000
GROUP BY username, sqladdr, sqlhash, sql_id, tablespace, session_addr
) “VSU”,
v$sqltext vst,
v$session vs,
v$process vp,
dba_tablespaces dt
WHERE vs.sql_id = vst.sql_id
— AND vsu.sqladdr = vst.address
— AND vsu.sqlhash = vst.hash_value
AND vsu.session_addr = vs.saddr
AND vs.paddr = vp.addr
AND vst.piece = 0
AND dt.tablespace_name = vsu.tablespace
order by usage_mb;

///////////// 11g r2////////////////////
select k.inst_id “INST_ID”, ktssoses “SADDR”, sid “SID”, ktssosno “SERIAL#”, username “USERNAME”, osuser “OSUSER”,
ktssosqlid “SQL_ID”, ktssotsn “TABLESPACE”, decode(ktssocnt, 0, ‘PERMANENT’, 1, ‘TEMPORARY’) “CONTENTS”,
decode(ktssosegt, 1, ‘SORT’, 2, ‘HASH’, 3, ‘DATA’, 4, ‘INDEX’, 5, ‘LOB_DATA’, 6, ‘LOB_INDEX’ , ‘UNDEFINED’) “SEGTYPE”,
ktssofno “SEGFILE#”, ktssobno “SEGBLK#”, ktssoexts “EXTENTS”, ktssoblks “BLOCKS”, round(ktssoblks*p.value/1024/1024, 2) “SIZE_MB”,
ktssorfno “SEGRFNO#”
from x$ktsso k, v$session s, v$parameter p
where ktssoses = s.saddr and ktssosno = s.serial# and p.name = ‘db_block_size’
order by sid;
///////////////// Sessions with High Temp usage//////////////
cursor bigtemp_sids is
select * from (
select s.sid,
s.status,
s.sql_hash_value sesshash,
u.SQLHASH sorthash,
s.username,
u.tablespace,
sum(u.blocks*p.value/1024/1024) mbused ,
sum(u.extents) noexts,
nvl(s.module,s.program) proginfo,
floor(last_call_et/3600)||’:’||
floor(mod(last_call_et,3600)/60)||’:’||
mod(mod(last_call_et,3600),60) lastcallet
from v$sort_usage u,
v$session s,
v$parameter p
where u.session_addr = s.saddr
and p.name = ‘db_block_size’
group by s.sid,s.status,s.sql_hash_value,u.sqlhash,s.username,u.tablespace,
nvl(s.module,s.program),
floor(last_call_et/3600)||’:’||
floor(mod(last_call_et,3600)/60)||’:’||
mod(mod(last_call_et,3600),60)
order by 7 desc,3)
where rownum < 11;

— Identifying WHO is currently using TEMP Segments 10g onwards

SELECT sysdate,a.username, a.sid, a.serial#, a.osuser, (b.blocksd.block_size)/1048576 MB_used, c.sql_text FROM v$session a, v$tempseg_usage b, v$sqlarea c, (select block_size from dba_tablespaces where tablespace_name=’TEMP_DROP’) d WHERE b.tablespace = ‘TEMP_NEW’ and a.saddr = b.session_addr AND c.address= a.sql_address AND c.hash_value = a.sql_hash_value AND (b.blocksd.block_size)/1048576 > 1024
ORDER BY b.tablespace, 6 desc;

— Which sessions are using TEMP tablespace and how much space is being used by each session.
SELECT b.TABLESPACE
, b.segfile#
, b.segblk#
, ROUND ( ( ( b.blocks * p.VALUE ) / 1024 / 1024 ), 2 ) size_mb
, a.SID
, a.serial#
, a.username
, a.osuser
, a.program
, a.status
FROM v$session a
, v$sort_usage b
, v$process c
, v$parameter p
WHERE p.NAME = ‘db_block_size’
AND a.saddr = b.session_addr
AND a.paddr = c.addr
ORDER BY b.TABLESPACE, b.segfile#, b.segblk#, b.blocks;

select su.username
, ses.sid
, ses.serial#
, ses.sql_id
, ses.status
, su.tablespace
, ceil((su.blocks * dt.block_size) / 1048576) MB
from v$sort_usage su
, dba_tablespaces dt
, v$session ses
where su.tablespace = dt.tablespace_name
and su.session_addr = ses.saddr
order by 5 desc;

select * from V$SORT_SEGMENT

select * from V$SORT_USAGE

select * from gv$temp_extent_pool

–TEMP SPACE USAGE**
SELECT
b.sql_id, a.inst_id,b.tablespace, b.segfile#, b.segblk#, b.blocks, a.sid, a.serial#, a.username, a.osuser, a.status FROM
gv$session a,gv$sort_usage b WHERE a.saddr = b.session_addr ORDER BY b.tablespace, b.segfile#, b.segblk#, b.blocks;

SELECT inst_id,tablespace_name,total_blocks,used_blocks,free_blocks,total_blocks8/1024 as total_MB, used_blocks8/1024 as used_MB,free_blocks*8/1024 as free_MB
FROM gv$sort_segment;

select round(100*(u.tot/d.tot),2) “pct_temp_used” FROM
(select sum(u.blocks) tot from gv$tempseg_usage u) u,
(select sum(d.blocks) tot from dba_temp_files d) d;

select (s.tot_used_blocks/f.total_blocks)*100 as “percent used”
from (select sum(used_blocks) tot_used_blocks from
v$sort_segment where tablespace_name=’TEMP_DROP’) s,
(select sum(blocks) total_blocks from
dba_temp_files where tablespace_name=’TEMP_DROP’) f;

SELECT A.tablespace_name tablespace, D.gb_total,
SUM (A.used_blocks * D.block_size) / 1024 / 1024/1024 gb_used,
D.gb_total-SUM (A.used_blocks * D.block_size) / 1024 / 1024 /1024 gb_free
FROM v$sort_segment A,
(
SELECT B.name, C.block_size, SUM (C.bytes) / 1024 / 1024 /1024 gb_total
FROM v$tablespace B, v$tempfile C
WHERE B.ts#= C.ts#
GROUP BY B.name, C.block_size
) D
WHERE A.tablespace_name = D.name
GROUP by A.tablespace_name, D.gb_total

History of temp usage:
select sql_id,max(TEMP_SPACE_ALLOCATED)/(102410241024) gig
from DBA_HIST_ACTIVE_SESS_HISTORY
where
sample_time > sysdate-1 and
TEMP_SPACE_ALLOCATED > (5010241024*1024)
group by sql_id order by sql_id;

SELECT s.sid, s.username, u.tablespace, s.sql_hash_value||’/’||u.sqlhash hash_value, u.segtype, u.contents, u.blocks
FROM v$session s, v$tempseg_usage u
WHERE s.saddr=u.session_addr
order by u.blocks;

select b.Total_MB,
b.Total_MB – round(a.used_blocks8/1024) Current_Free_MB, round(used_blocks8/1024) Current_Used_MB,
round(max_used_blocks*8/1024) Max_used_MB
from v$sort_segment a,
(select round(sum(bytes)/1024/1024) Total_MB from dba_temp_files ) b;

col hash_value for a40
col tablespace for a10
col username for a15
set linesize 132 pagesize 1000

SELECT s.sid, s.username, u.tablespace, s.sql_hash_value||’/’||u.sqlhash hash_value, u.segtype, u.contents, u.blocks
FROM v$session s, v$tempseg_usage u
WHERE s.saddr=u.session_addr
order by u.blocks;

col hash_value for 999999999999
select hash_value, sorts, rows_processed/executions
from v$sql
where hash_value in (select hash_value from v$open_cursor where sid=&sid)
and sorts > 0
and PARSING_SCHEMA_NAME=’ILDBPRD’
order by rows_processed/executions;

SELECT S.sid || ‘,’ || S.serial# sid_serial, S.username, S.osuser, P.spid, S.module,
P.program, SUM (T.blocks) * TBS.block_size / 1024 / 1024 mb_used, T.tablespace,
COUNT(*) statements
FROM v$sort_usage T, v$session S, dba_tablespaces TBS, v$process P
WHERE T.session_addr = S.saddr
AND S.paddr = P.addr
AND T.tablespace = TBS.tablespace_name
GROUP BY S.sid, S.serial#, S.username, S.osuser, P.spid, S.module,
P.program, TBS.block_size, T.tablespace
ORDER BY sid_serial;

SELECT A.tablespace_name tablespace, D.mb_total,
SUM (A.used_blocks * D.block_size) / 1024 / 1024 mb_used,
D.mb_total – SUM (A.used_blocks * D.block_size) / 1024 / 1024 mb_free
FROM v$sort_segment A,
(
SELECT B.name, C.block_size, SUM (C.bytes) / 1024 / 1024 mb_total
FROM v$tablespace B, v$tempfile C
WHERE B.ts#= C.ts#
GROUP BY B.name, C.block_size
) D
WHERE A.tablespace_name = D.name
GROUP by A.tablespace_name, D.mb_total;

How Can Temporary Segment Usage Be Monitored Over Time? (Doc ID 364417.1)
How to Find Creator of a SORT or TEMPORARY SEGMENT or Users Performing Sorts in Oracle7 (Doc ID 232205.1)

column sum_max_mb format 999,999,999;
column temporary_tablespace format A20
WITH
pivot1 AS
(
SELECT
trunc(ash.sample_time,’MI’) sample_time,
ash.SESSION_ID,
ash.SESSION_SERIAL#,
ash.SQL_ID,
ash.sql_exec_id,
U.temporary_tablespace,
max(temp_space_allocated)/(1024*1024) max_temp_mb
FROM GV$ACTIVE_SESSION_HISTORY ash, dba_users U
WHERE
ash.user_id = U.user_id
and ash.session_type = ‘FOREGROUND’
and ash.temp_space_allocated > 0
GROUP BY
trunc(ash.sample_time,’MI’),
ash.SESSION_ID,
ash.SESSION_SERIAL#,
ash.SQL_ID,
ash.sql_exec_id,
U.temporary_tablespace
)
SELECT pivot1.session_id,pivot1.session_serial#,temporary_tablespace, sample_time, sum(max_temp_mb) sum_max_mb
from pivot1
GROUP BY sample_time, temporary_tablespace,pivot1.session_id,pivot1.session_serial#
ORDER BY temporary_tablespace, sample_time;

column max_temp_per_day_mb format 999,999,999;
column temp_max_size_mb format 999,999,999;
column temp_mb format 999,999,999.9
define DAYS_AGO=3
with
pivot1 as
(
select min(snap_id) AS begin_snap_id
from dba_hist_snapshot
where trunc( begin_interval_time, ‘DD’) > trunc(sysdate – &DAYS_AGO, ‘DD’)
),
pivot2 as
(
SELECT
trunc(ash.sample_time,’MI’) sample_time,
ash.SESSION_ID,
ash.SESSION_SERIAL#,
ash.SQL_ID,
ash.sql_exec_id,
U.temporary_tablespace,
max(temp_space_allocated)/(10241024) max_temp_per_sql_mb from dba_hist_active_sess_history ash INNER JOIN dba_users U ON ash.user_id = U.user_id where ash.session_type = ‘FOREGROUND’ and ash.temp_space_allocated > 0 and U.temporary_tablespace = ‘TEMP’ and snap_id > (select begin_snap_id from pivot1) group by trunc(ash.sample_time,’MI’) , ash.SESSION_ID, ash.SESSION_SERIAL#, ash.SQL_ID, ash.sql_exec_id, U.temporary_tablespace ), pivot3 as ( select temporary_tablespace, sample_time, sum(max_temp_per_sql_mb) total_temp_permin_mb from pivot2 group by temporary_tablespace, sample_time order by temporary_tablespace, sample_time ) select temporary_tablespace, DD.tablespace_size/(10241024) temp_max_size_mb, trunc(sample_time, ‘DD’) as day, max(total_temp_permin_mb) max_temp_per_day_mb
from pivot3
inner join dba_temp_free_space DD ON DD.tablespace_name = pivot3.temporary_tablespace
group by temporary_tablespace, DD.tablespace_size/(1024*1024) , trunc(sample_time, ‘DD’)
having trunc(sample_time, ‘DD’) >= to_date(’26-05-18′, ‘DD-MM-YY’)
order by temporary_tablespace, day;

COLUMN module format A20
COLUMN sql_opname format A20
COLUMN etime_secs FORMAT 999,999.9
COLUMN etime_mins FORMAT 999,999.9
COLUMN user_id FORMAT 999999
COLUMN sid FORMAT 99999
COLUMN serial# FORMAT 99999
COLUMN username FORMAT A25
COLUMN inst_id FORMAT 99
COLUMN sql_opname FORMAT A10
COLUMN sql_id FORMAT A13
COLUMN sql_exec_id FORMAT 9999999999
COLUMN max_temp_mb FORMAT 999,999,999
COLUMN sql_start_time FORMAT A21
COLUMN sql_end_time FORMAT A21

SELECT ASH.inst_id,
ASH.user_id,
ASH.session_id sid,
ASH.session_serial# serial#,
ASH.sql_id,
ASH.sql_exec_id,
ASH.sql_opname,
ASH.module,
MIN(sample_time) sql_start_time,
MAX(sample_time) sql_end_time,
((CAST(MAX(sample_time) AS DATE)) – (CAST(MIN(sample_time) AS DATE))) * (3600*24) etime_secs ,
((CAST(MAX(sample_time) AS DATE)) – (CAST(MIN(sample_time) AS DATE))) * (6024) etime_mins , MAX(temp_space_allocated)/(10241024) max_temp_mb
FROM gv$active_session_history ASH
WHERE ASH.session_type = ‘FOREGROUND’
AND ASH.sql_id IS NOT NULL
AND sample_time BETWEEN to_timestamp(’26-05-2018 09:00′, ‘DD-MM-YYYY HH24:MI’) AND to_timestamp(’26-05-2018 12:00′, ‘DD-MM-YYYY HH24:MI’)
GROUP BY ASH.inst_id,
ASH.user_id,
ASH.session_id,
ASH.session_serial#,
ASH.sql_id,
ASH.sql_opname,
ASH.sql_exec_id,
ASH.module
HAVING MAX(temp_space_allocated) > 5 order by ash.sql_id;

=====================================

alter tablespace &TS_NAME add datafile ‘&file_name’ size &Size+G;
select name,total_mb,free_mb from v$asm_diskgroup;

alter session set nls_date_format=’DD/MM/YY HH24:MI:SS’;
select start_time,end_time,input_type,status from v$rman_backup_job_details where start_time>sysdate-2;

select START_TIME,END_TIME,INPUT_BYTES/1024/1024/1024 “Input Size”,OUTPUT_BYTES/1024/1024/1024 “Output Size”,ELAPSED_SECONDS/60/60 “In Hours”,INPUT_TYPE,status from v$rman_backup_job_details where start_time > =sysdate-15 and input_type=’DB INCR’ order by start_time;

select START_TIME,END_TIME,INPUT_BYTES/1024/1024/1024 “Input Size”,OUTPUT_BYTES/1024/1024/1024 “Output Size”,ELAPSED_SECONDS/60/60 “In Hours”,INPUT_TYPE,status from v$rman_backup_job_details where start_time between ’31/01/21 00:00:00′ and ’02/03/21 00:00:00′ and input_type=’DB INCR’ order by start_time;

select name,DB_NAME,START_TIME,END_TIME,INPUT_BYTES/1024/1024/1024 “Input Size”,OUTPUT_BYTES/1024/1024/1024 “Output Size”,ELAPSED_SECONDS/60/60 “In Hours”,INPUT_TYPE,status from rman.rc_rman_backup_job_details,v$database where start_time between ’31/01/21 00:00:00′ and ’02/03/21 00:00:00′ and input_type=’DB INCR’ order by start_time

alter session set nls_date_format=’DD/MM/YY HH24:MI:SS’;
break on name skip page on db_name skip 1 on db_key skip
select name,DB_NAME,START_TIME,END_TIME,INPUT_BYTES/1024/1024/1024 “Input Size”,OUTPUT_BYTES/1024/1024/1024 “Output Size”,ELAPSED_SECONDS/60/60 “In Hours”,INPUT_TYPE,status from rman.rc_rman_backup_job_details,v$database where start_time between ’31/01/21 00:00:00′ and ’02/03/21 00:00:00′ and input_type in (‘DB INCR’,’DB FULL’) order by db_name,start_time;

===== Same DB name with different dbid >>>>> for DCS ===================
select rd.dbid,DB_NAME,START_TIME,END_TIME,INPUT_BYTES/1024/1024/1024 “Input Size”,OUTPUT_BYTES/1024/1024/1024 “Output Size”,ELAPSED_SECONDS/60/60 “In Hours”,INPUT_TYPE,status
from RMAN.rc_rman_backup_job_details bd,rman.RC_DATABASE rd
where rd.db_key=bd.db_key and start_time between ’31/01/21 00:00:00′ and ’02/03/21 00:00:00′ and
input_type in (‘DB INCR’,’DB FULL’) order by db_name,start_time;

SELECT s.recid, s.pieces, p.piece#, p.handle, s.multi_section, count(d.file#) count_df
FROM v$backup_set s
JOIN v$backup_piece p ON (p.recid = s.recid)
JOIN v$backup_datafile d ON (d.set_stamp = p.set_stamp AND
d.set_count = p.set_count)
–WHERE p.tag = ‘BACKUP01’
GROUP BY s.recid, s.pieces, p.piece#, p.handle, s.multi_section, p.compressed
ORDER BY s.recid, p.piece#;

SELECT ‘SET NEWNAME FOR DATAFILE ‘||FILE#||’ TO ”’||’/u02/data/dev/db1’||SUBSTR(NAME, INSTR(NAME, ‘/’, -1, 1),INSTR(NAME, ‘.’,1,2) – INSTR(NAME, ‘/’,-1,1)) ||’.dbf”;’ FROM V$DATAFILE;

$ ps -ef|grep pmon|grep -v grep|awk ‘{print $9}’
ps -ef|grep tns |grep -v grep|awk ‘{print $9″ “$10}’

#################### Find duplicate files in database ###################3

SELECT SUBSTR(file_name, instr(file_name, ‘/’, -1))
FROM (SELECT file_name FROM dba_data_files
UNION ALL
SELECT file_name FROM dba_temp_files
UNION ALL
SELECT member as file_name FROM v$logfile
UNION ALL
SELECT name as file_name FROM v$controlfile)
GROUP BY file_name
HAVING COUNT(1) > 1;

#

select start_time,end_time,input_type,status from v$rman_backup_job_details where start_time>=sysdate-2;

select BEGIN_TIME,END_TIME,MAXQUERYLEN,SSOLDERRCNT,NOSPACEERRCNT,UNEXPIREDBLKS,TUNED_UNDORETENTION,maxconcurrency from v$undostat where BEGIN_TIME>=sysdate-1

when looking for wait events remove last call et condition
select a.inst_id,a.sid,a.serial#,a.username,a.status,a.logon_time,a.last_call_et/60/60 ,a.blocking_session,sql_id,program,b.event,machine
from gv$session a,gv$session_wait b where a.username is not null and a.sid=b.sid and a.last_call_et/60/60 > 2 order by 7;

select sid,serial#,username,status,program,sql_id,logon_time from v$session where sid=&sid;
select sql_fulltext from v$sql where sql_id=’&Sql_ID’;

select sid,serial#,username,status,logon_time,last_call_et/60,sql_id,program from v$session where sid in (select distinct blocking_session from v$session
select addr,spid from v$process where spid=&spid;
select sid,serial#,username,logon_time,status,last_call_et/60,blocking_session,sql_id from v$session where paddr=’&ADDR’;
select sid,serial#,username,logon_time,status,last_call_et/60,blocking_session,sql_id,machine,program from v$session where paddr=’&ADDR’;
select sql_id,sql_fulltext from v$sql where sql_id in (select distinct sql_id from v$session where username is not null);

changing archivelog mode on 11g and 12c RAC
Posted on May 1, 2014 by Bjoern Rost
I hate being wrong. But it does keep happening. Last week I sat through a RAC installation (12c GI with 11gR2 database) with a client and one of the steps involved enabling archivelog mode for one of the databases. They shut down all instances, then started one of them in mount mode only.

srvctl stop database -d RAC
srvctl start instance -d RAC -i RAC1 -o mount
Just as they logged on to sqlplus and started typing “ALTER DATABASE ARCHIVELOG;” I proclaimed that this was not going to work unless they first changed CLUSTER_DATABASE in the spfile on restarted that instance. Blank looks. An enter key was pressed. To my surprise the database came back with:

SQL> ALTER DATABASE ARCHIVELOG;

Database altered.
Apparently this has changed after 10gR2 and it is not longer neccessary to fiddle with CLUSTER_DATABASE while switching archivelog mode. For years and dozens of installations I have done this extra step believing this was the way it has to be done. But not any more. I re-learned something.

====
http://dbaharrison.blogspot.in/2011/06/rman-duplicate-of-database-using.html

select segment_name,segment_type from dba_extents where file_id =&P1 and &P2 between block_id and block_id + blocks-1;

===========
Prior to Oracle Database 11g release 2 (11.2), if a Data Pump job is executed in parallel on an instance of a RAC database, the parallelism is employed only on that instance without utilizing the potentially valuable idle resources of other instances.
From Oracle Database 11.2 onwards, CLUSTER parameter can be employed to distribute the worker processes across multiple Oracle RAC instances to better utilize Oracle RAC resources.
To exercise more control over the instances on which the job should run, SERVICE_NAME parameter can be specified so that the Data Pump job runs only on those instances where the service is available.

If any Oracle RAC instance where the job is running dies or leaves the cluster, the job aborts and can be restarted at some future time.

Script to get schema size:

select obj.owner “Owner”, obj_cnt “Objects”,
decode(seg_size, NULL, 0, seg_size) “size MB”
from ( select owner, count(*) obj_cnt from dba_objects group by owner) obj,
( select owner, ceil(sum(bytes)/1024/1024) seg_size from dba_segments group by owner) segment
where obj.owner = segment.owner(+)

order by 3 desc, 2 desc, 1;

SCript to size of top 10 objects in database:

SELECT * FROM
(
select
SEGMENT_NAME,
SEGMENT_TYPE,
BYTES/1024/1024/1024 GB,
TABLESPACE_NAME
from
dba_segments
order by 3 desc
) WHERE
ROWNUM <= 10;

============

Object Growth Trend:

select * from table(dbms_space.OBJECT_GROWTH_TREND (‘ARUP’,’BOOKINGS’,’TABLE’));

Size of data and database:
select ‘segments’ “Type”, sum(bytes)/1024/1024 “Size in MB”
from dba_segments
where tablespace_name = ‘MYTBS’
union all
select ‘datafile’, sum(bytes)/1024/1024
from dba_data_files

where tablespace_name =’MYTBS’ ;

SELECT b.thread#,a.group#,
b.STATUS,
a.MEMBER,
b.BYTES/1024/1024 “Size (Mb)”
FROM v$logfile a,
v$log b

WHERE a.group# = b.group#;

Undo Usage:
You can find out why the UNDO Tablespace starts growing unexpectedly, by running the following queries.

Use this query to confirm the Extent size and the total bytes for those Extent sizes for the Active UNDO Segments. This query will also confirm if there are too many extents of a particular size, that can lead to ORA-1628 “max # of extents” errors.

SELECT segment_name, bytes “Extent_Size”, count(extent_id) “Extent_Count”, bytes * count(extent_id) “Extent_Bytes” FROM dba_undo_extents WHERE status = ‘ACTIVE’ group by segment_name, bytes order by 1, 3 desc;

Then, use this query to identify those users who are using the Active AUM UNDO Segments.

10g: AUM Segment Name Format: _SYSSMUxidusn$
SELECT s.sid, s.serial#, s.username, u.segment_name, count(u.extent_id) “Extent Count”, t.used_ublk, t.used_urec, s.program
FROM v$session s, v$transaction t, dba_undo_extents u
WHERE s.taddr = t.addr and u.segment_name = ‘_SYSSMU’||t.xidusn||’$’ and u.status = ‘ACTIVE’
GROUP BY s.sid, s.serial#, s.username, u.segment_name, t.used_ublk, t.used_urec, s.program
ORDER BY t.used_ublk desc, t.used_urec desc, s.sid, s.serial#, s.username, s.program;

11g: AUM Segment Name Format: SYSSMUxidusn_number$ SELECT s.sid, s.serial#, s.username, u.segment_name, count(u.extent_id) “Extent Count”, t.used_ublk, t.used_urec, s.program FROM v$session s, v$transaction t, dba_undo_extents u WHERE s.taddr = t.addr and u.segment_name like ‘_SYSSMU’||t.xidusn||’%$’ and u.status = ‘ACTIVE’
GROUP BY s.sid, s.serial#, s.username, u.segment_name, t.used_ublk, t.used_urec, s.program
ORDER BY t.used_ublk desc, t.used_urec desc, s.sid, s.serial#, s.username, s.program;

Finally, use this query to identify those UNDO Segments that are being used by transactions rolling back.

select b.name “UNDO Segment Name”, b.inst# “Instance ID”, b.status$ STATUS, a.ktuxesiz “UNDO Blocks”, a.ktuxeusn, a.ktuxeslt xid_slot, a.ktuxesqn xid_seq
from x$ktuxe a, undo$ b
where a.ktuxesta = ‘ACTIVE’ and a.ktuxecfl like ‘%DEAD%’ and a.ktuxeusn = b.us#;

Once you have identified the different transactions using the Active UNDO Segments, you know the cause of the UNDO Tablespace usage.

If Active UNDO Segments are being used by transactions rolling back, then your options are;

Wait for the rollback to complete. The time to rollback will depend on the transaction mix i.e. SELECT, INSERT, UPDATE, DELETE. A SELECT needs very little/no time to rollback, but an INSERT, UPDATE or DELETE will need the same time to rollback, as it took to execute.
Perform a Database Point-In-Time Recovery. This means restoring the database from backup and recovering with the Redo logs, until just before the time the transaction rolling back started.

For ORA-1628 “max # of extents” errors, please review:

Size of partitions

select segment_name, partition_name, blocks, bytes/1024/1024/1024 “GB” from dba_segments where tablespace_name = ‘TBS_DSP_LOG’ order by segment_name;

select segment_name,partition_name,bytes/1024/1024/1024 from dba_segments where segment_name in (‘CUST_CAMPAIGN_HISTORY’,’CUST_CAMPAIGN_STEP_HISTORY’) and partition_name like ‘SEP2016%’;

col “Tablespace” for a40
col “Used MB” for 99,999,999
col “Free MB” for 99,999,999
col “Total MB” for 99,999,999

select df.tablespace_name “Tablespace”,
totalusedspace “Used MB”,
(df.totalspace – tu.totalusedspace) “Free MB”,
df.totalspace “Total MB”,
round(100 * ( (df.totalspace – tu.totalusedspace)/ df.totalspace))
“Pct. Free”
from
(select tablespace_name,
round(sum(bytes) / 1048576) TotalSpace
from dba_data_files
group by tablespace_name) df,
(select round(sum(bytes)/(1024*1024)) totalusedspace, tablespace_name
from dba_segments
group by tablespace_name) tu

where df.tablespace_name = tu.tablespace_name order by df.tablespace_name ;

set linesize 132 tab off trimspool on
set pagesize 105
set pause off
set echo off
set feedb on

column “TOTAL ALLOC (MB)” format 9,999,990.00
column “TOTAL PHYS ALLOC (MB)” format 9,999,990.00
column “USED (MB)” format 9,999,990.00
column “FREE (MB)” format 9,999,990.00
column “% USED” format 990.00

select a.tablespace_name,
a.bytes_alloc/(102410241024) “TOTAL ALLOC (GB)”,
a.physical_bytes/(102410241024) “TOTAL PHYS ALLOC (GB)”,
nvl(b.tot_used,0)/(102410241024) “USED (GB)”,
(nvl(b.tot_used,0)/a.bytes_alloc)100 “% USED” from ( select tablespace_name, sum(bytes) physical_bytes, sum(decode(autoextensible,’NO’,bytes,’YES’,maxbytes)) bytes_alloc from dba_data_files group by tablespace_name ) a, ( select tablespace_name, sum(bytes) tot_used from dba_segments group by tablespace_name ) b where a.tablespace_name = b.tablespace_name (+) –and (nvl(b.tot_used,0)/a.bytes_alloc)100 > 10
and a.tablespace_name not in (select distinct tablespace_name from dba_temp_files)
and a.tablespace_name not like ‘UNDO%’
order by 1;
–order by 5

/

SELECT
SUBSTR(SS.USERNAME,1,8) USERNAME,
SS.OSUSER “USER”,
AR.MODULE || ‘ @ ‘ || SS.MACHINE CLIENT,
SS.PROCESS PID,
TO_CHAR(AR.LAST_LOAD_TIME, ‘DD-Mon HH24:MM:SS’) LOAD_TIME,
AR.DISK_READS DISK_READS,
AR.BUFFER_GETS BUFFER_GETS,
SUBSTR(SS.LOCKWAIT,1,10) LOCKWAIT,
W.EVENT EVENT,
SS.STATUS,
AR.SQL_fullTEXT SQL
FROM V$SESSION_WAIT W,
V$SQLAREA AR,
V$SESSION SS,
v$TIMER T
WHERE SS.SQL_ADDRESS = AR.ADDRESS
AND SS.SQL_HASH_VALUE = AR.HASH_VALUE
AND SS.SID = W.SID (+)
AND SS.STATUS = ‘ACTIVE’
AND W.EVENT != ‘client message’

ORDER BY SS.LOCKWAIT ASC, SS.USERNAME, AR.DISK_READS DESC

Memory used by sessions:

SET LINESIZE 145
SET PAGESIZE 9999

COLUMN sid FORMAT 999 HEADING ‘SID’
COLUMN oracle_username FORMAT a12 HEADING ‘Oracle User’ JUSTIFY right
COLUMN os_username FORMAT a9 HEADING ‘O/S User’ JUSTIFY right
COLUMN session_program FORMAT a18 HEADING ‘Session Program’ TRUNC
COLUMN session_machine FORMAT a8 HEADING ‘Machine’ JUSTIFY right TRUNC
COLUMN session_pga_memory FORMAT 9,999,999,999 HEADING ‘PGA Memory’
COLUMN session_pga_memory_max FORMAT 9,999,999,999 HEADING ‘PGA Memory Max’
COLUMN session_uga_memory FORMAT 9,999,999,999 HEADING ‘UGA Memory’
COLUMN session_uga_memory_max FORMAT 9,999,999,999 HEADING ‘UGA Memory MAX’

SELECT
s.sid sid
, lpad(s.username,12) oracle_username
, lpad(s.osuser,9) os_username
, s.program session_program
, lpad(s.machine,8) session_machine
, (select ss.value from v$sesstat ss, v$statname sn
where ss.sid = s.sid and
sn.statistic# = ss.statistic# and
sn.name = ‘session pga memory’) session_pga_memory
, (select ss.value from v$sesstat ss, v$statname sn
where ss.sid = s.sid and
sn.statistic# = ss.statistic# and
sn.name = ‘session pga memory max’) session_pga_memory_max
, (select ss.value from v$sesstat ss, v$statname sn
where ss.sid = s.sid and
sn.statistic# = ss.statistic# and
sn.name = ‘session uga memory’) session_uga_memory
, (select ss.value from v$sesstat ss, v$statname sn
where ss.sid = s.sid and
sn.statistic# = ss.statistic# and
sn.name = ‘session uga memory max’) session_uga_memory_max
FROM
v$session s
ORDER BY session_pga_memory DESC
/

======
Library cache wating

select sid, event, p1raw, seconds_in_wait,wait_time
from v$session_wait
where event = ‘library cache pin’
and state = ‘WAITING’;

SID,EVENT,P1RAW,SECONDS_IN_WAIT,WAIT_TIME
4962,library cache pin,070000031733BDF8,51,0

Identify which object is being waited for:
SELECT kglnaown “Owner”, kglnaobj “Object”
FROM x$kglob WHERE kglhdadr=’070000031733BDF8′;

Who is pinning the object?

SELECT s.sid, s.serial#, s.username, s.osuser, s.machine, s.status,
kglpnmod “Mode”, kglpnreq “Req”
FROM x$kglpn p, v$session s WHERE p.kglpnuse=s.saddr AND
kglpnhdl=’070000031733BDF8′;

Use the following script to generate SQL to kill off multiple sessions all waiting for the same library cache pin.

SELECT ‘alter system kill session ”’ || s.sid || ‘,’ || s.serial# || ”’;’
FROM x$kglpn p, v$session s
WHERE p.kglpnuse=s.saddr
AND kglpnhdl=’070000031733BDF8′

=====
SELECT * FROM
(SELECT
sql_fulltext,
sql_id,
child_number,
disk_reads,
executions,
first_load_time,
last_load_time
FROM v$sql
ORDER BY elapsed_time DESC)

WHERE ROWNUM < 10

— Top 10 CPU consumers in last 5 minutes


SQL> select * from
(
select session_id, session_serial#, count() from v$active_session_history where session_state= ‘ON CPU’ and sample_time > sysdate – interval ‘5’ minute group by session_id, session_serial# order by count() desc
)

where rownum <= 10;

— Who is that SID?


set lines 200
col username for a10
col osuser for a10
col machine for a10
col program for a10
col resource_consumer_group for a10
col client_info for a10

SQL> select serial#,
username,
osuser,
machine,
program,
resource_consumer_group,
client_info
from v$session where sid=&sid;

————————-

— What did that SID do?


SQL> select distinct sql_id, session_serial# from v$active_session_history
where sample_time > sysdate – interval ‘5’ minute

and session_id=&sid;

— Retrieve the SQL from the Library Cache:


col sql_text for a80
SQL> select sql_text from v$sql where sql_id=’&sqlid’

===================Segment/Schema growth=============
select * from (select to_char(end_interval_time, ‘MM/DD/YY’) mydate, segment_name,SEGMENT_TYPE, sum(space_used_delta) / 1024 / 1024 “Space used (MB)”, avg(c.bytes) / 1024 / 1024 “Total Object Size (MB)”,
round(sum(space_used_delta) / sum(c.bytes) * 100, 2) “Percent of Total Disk Usage”
from
dba_hist_snapshot sn,
dba_hist_seg_stat a,
dba_objects b,
dba_segments c
where begin_interval_time > trunc(sysdate) – 15
and sn.snap_id = a.snap_id
and b.object_id = a.obj#
and b.owner = c.owner
and b.object_name = c.segment_name
and C.owner in (‘EITC_SUBSCRIBER_PLAN_NEW_10’,
‘EITC_CONSUMER_MASS_REVCOS’,
‘EITC_CONSUMER_PREMIUM_REVCOC’,
‘EITC_CONSUMER_PLAN_NEW_10’,
‘EITC_ENTERPRISE_PLAN_NEW_10’,
‘EITC_CARRIER_PLAN_NEW_10’,
‘EITC_BROADCASTING_PLAN_10’,
‘EITC_HEADCOUNT_PLAN_10’,
‘EITC_STAFF_COST_PLAN_NEW_10’,
‘EITC_STAFF_COST_PLAN_ASSUMP_10’,
‘EITC_DEPARTMENTAL_OPEX_PLAN_10’,
‘EITC_CAPEX_PLAN_10’,
‘EITC_PL_CONSOLIDATION_NEW_10’,
‘EITC_BUDGET_TRANSFERS_10’,
‘EITC_REVENUE_COS_ACTUALS_10’)
group by to_char(end_interval_time, ‘MM/DD/YY’), segment_name, SEGMENT_TYPE)
order by segment_name,SEGMENT_TYPE, to_date(mydate, ‘MM/DD/YY’);

select * from (select c.owner,to_char(end_interval_time, ‘MM/DD/YY’) mydate, sum(space_used_delta) / 1024 / 1024 “Space used (MB)”, avg(c.bytes) / 1024 / 1024 “Total Object Size (MB)”,
round(sum(space_used_delta) / sum(c.bytes) * 100, 2) “Percent of Total Disk Usage”
from
dba_hist_snapshot sn,
dba_hist_seg_stat a,
dba_objects b,
dba_segments c
where begin_interval_time > trunc(sysdate) – 15
and sn.snap_id = a.snap_id
and b.object_id = a.obj#
and b.owner = c.owner
and b.object_name = c.segment_name
and C.owner in (‘EITC_SUBSCRIBER_PLAN_NEW_10’,
‘EITC_CONSUMER_MASS_REVCOS’,
‘EITC_CONSUMER_PREMIUM_REVCOC’,
‘EITC_CONSUMER_PLAN_NEW_10’,
‘EITC_ENTERPRISE_PLAN_NEW_10’,
‘EITC_CARRIER_PLAN_NEW_10’,
‘EITC_BROADCASTING_PLAN_10’,
‘EITC_HEADCOUNT_PLAN_10’,
‘EITC_STAFF_COST_PLAN_NEW_10’,
‘EITC_STAFF_COST_PLAN_ASSUMP_10’,
‘EITC_DEPARTMENTAL_OPEX_PLAN_10’,
‘EITC_CAPEX_PLAN_10’,
‘EITC_PL_CONSOLIDATION_NEW_10’,
‘EITC_BUDGET_TRANSFERS_10’,
‘EITC_REVENUE_COS_ACTUALS_10’)
group by to_char(end_interval_time, ‘MM/DD/YY’),c.owner)
order by owner, to_date(mydate, ‘MM/DD/YY’);

with snaps as (select min(snap_id) min_snap, max(snap_id) max_snap
from dba_hist_snapshot where begin_interval_time > ADD_MONTHS(sysdate,-1))
select
O.TABLESPACE_NAME,
O.OWNER||’.’||O.OBJECT_NAME oid,
o.object_type,
–min(SNAP_ID) MIN_OBJ_SNAP,
–max(SNAP_ID) max_obj_snap,
–MIN(H.SPACE_USED_TOTAL) min_space_used,
SUM(H.SPACE_USED_DELTA) SPACE_USED,
SUM(H.SPACE_ALLOCATED_DELTA) space_alloc
from
DBA_HIST_SEG_STAT H join dba_hist_seg_stat_obj o
on h.dbid=o.dbid and h.ts#=o.ts# and h.obj#=o.obj# and h.dataobj#=o.dataobj#
WHERE 1=1
AND H.SNAP_ID BETWEEN (SELECT MIN_SNAP FROM SNAPS) AND (SELECT MAX_SNAP FROM SNAPS)
and H.DBID = (select DBID from V$DATABASE)
and H.INSTANCE_NUMBER = (select INSTANCE_NUMBER from V$INSTANCE)
and O.OWNER != ‘** MISSING **’ — segments already gone
and O.OBJECT_NAME not like ‘BIN$%’ — recycle-bin
and O.OBJECT_NAME not like ‘SYS_%’ — LOBs, etc – not too representative
AND o.OWNER IN (‘EITC_SUBSCRIBER_PLAN_NEW_10’,
‘EITC_CONSUMER_MASS_REVCOS’,
‘EITC_CONSUMER_PREMIUM_REVCOC’,
‘EITC_CONSUMER_PLAN_NEW_10’,
‘EITC_ENTERPRISE_PLAN_NEW_10’,
‘EITC_CARRIER_PLAN_NEW_10’,
‘EITC_BROADCASTING_PLAN_10’,
‘EITC_HEADCOUNT_PLAN_10’,
‘EITC_STAFF_COST_PLAN_NEW_10’,
‘EITC_STAFF_COST_PLAN_ASSUMP_10’,
‘EITC_DEPARTMENTAL_OPEX_PLAN_10’,
‘EITC_CAPEX_PLAN_10’,
‘EITC_PL_CONSOLIDATION_NEW_10’,
‘EITC_BUDGET_TRANSFERS_10’,
‘EITC_REVENUE_COS_ACTUALS_10’)
group by
O.TABLESPACE_NAME,
O.OWNER||’.’||O.OBJECT_NAME,
o.OBJECT_TYPE

having SUM(H.SPACE_ALLOCATED_DELTA)>0

=============== Hidden Parameters =========
SET PAUSE ON
SET PAUSE ‘Press Return to Continue’
SET PAGESIZE 60
SET LINESIZE 300

COLUMN ksppinm FORMAT A50
COLUMN ksppstvl FORMAT A50

SELECT
ksppinm,
ksppstvl
FROM
x$ksppi a,
x$ksppsv b
WHERE
a.indx=b.indx
AND
substr(ksppinm,1,1) = ‘_’
ORDER BY ksppinm
/


select a.ksppinm name,
b.ksppstvl value,
b.ksppstdf deflt,
decode
(a.ksppity, 1,
‘boolean’, 2,
‘string’, 3,
‘number’, 4,
‘file’, a.ksppity) type,
a.ksppdesc description
from
sys.x$ksppi a,
sys.x$ksppcv b
where
a.indx = b.indx
and
a.ksppinm like ‘_%’ escape ‘\’
order by
name

col “Parameter” format a30
col “Session Value” format a20
col “Instance Value” format a20
col “Desc” format a70
select a.ksppinm “Parameter”, c.ksppstvl “Instance Value”, ksppdesc “Desc”
from sys.x$ksppi a, sys.x$ksppcv b, sys.x$ksppsv c
where a.indx = b.indx and a.indx = c.indx
and substr(ksppinm,1,1)=’_’
and a.ksppinm in (‘_fix_control’);

SQL> select a.ksppinm “Parameter”, c.ksppstvl “Instance Value”, ksppdesc “Desc”
from sys.x$ksppi a, sys.x$ksppcv b, sys.x$ksppsv c
where a.indx = b.indx and a.indx = c.indx
and substr(ksppinm,1,1)=’_’
and a.ksppinm in (‘&hidden_parameter’);

Parameter Instance Value


Desc

_fix_control
bug fix control parameter

================================

column “Percent of Total Disk Usage” justify right format 999.99
column “Space Used (MB)” justify right format 9,999,999.99
column “Total Object Size (MB)” justify right format 9,999,999.99
set linesize 150
set pages 80
set feedback off
set line 5000
column “SEGMENT_NAME” justify left format A30
column “TABLESPACE_NAME” justify left format A30
select * from (select c.TABLESPACE_NAME,c.segment_name,to_char(end_interval_time, ‘MM/DD/YY’) mydate,
sum(space_used_delta) / 1024 / 1024 “Space used (MB)”, avg(c.bytes) / 1024 / 1024 “Total Object Size (MB)”,
round(sum(space_used_delta) / sum(c.bytes) * 100, 2) “Percent of Total Disk Usage”
from
dba_hist_snapshot sn,
dba_hist_seg_stat a,
dba_objects b,
dba_segments c
where begin_interval_time > trunc(sysdate)-10
and sn.snap_id = a.snap_id
and b.object_id = a.obj#
and b.owner = c.owner
and b.owner=’&Owner’
and b.object_name = c.segment_name
group by c.TABLESPACE_NAME,c.segment_name,to_char(end_interval_time, ‘MM/DD/YY’)
order by c.TABLESPACE_NAME,c.segment_name,to_date(mydate, ‘MM/DD/YY’));

set feedback on
select * from (select c.TABLESPACE_NAME,c.segment_name “Object Name”,b.object_type,
sum(space_used_delta)/1024/1024 “Growth (MB)”
from dba_hist_snapshot sn,
dba_hist_seg_stat a,
dba_objects b,
dba_segments c
where begin_interval_time > trunc(sysdate)-&days_back
and sn.snap_id = a.snap_id
and b.object_id = a.obj#
and b.owner = c.owner
and b.object_name = c.segment_name
and c.owner =’&Owner’
group by c.TABLESPACE_NAME,c.segment_name,b.object_type)

order by 3 asc;

set pages 80
set feedback off
column “OBJECT_NAME” justify left format A30
column “SUBOBJECT_NAME” justify left format A30
column “OBJECT_TYPE” justify left format A30
column “Tablespace Name” justify left format A30
set line 5000
SELECT o.OWNER , o.OBJECT_NAME , o.SUBOBJECT_NAME , o.OBJECT_TYPE ,
t.NAME “Tablespace Name”, s.growth/(10241024) “Growth in MB”, (SELECT sum(bytes)/(10241024)
FROM dba_segments
WHERE segment_name=o.object_name) “Total Size(MB)”
FROM DBA_OBJECTS o,
( SELECT TS#,OBJ#,
SUM(SPACE_USED_DELTA) growth
FROM DBA_HIST_SEG_STAT
GROUP BY TS#,OBJ#
HAVING SUM(SPACE_USED_DELTA) > 0
ORDER BY 2 DESC ) s,
v$tablespace t
WHERE s.OBJ# = o.OBJECT_ID
AND s.TS#=t.TS#
AND o.OWNER=’&OWNER’
ORDER BY 6 DESC
/

SET lines 132 pages 66 feedback off
COLUMN tablespace_name format a15 heading ‘Tablespace|(TBS)|Name’
COLUMN autoextensible format a6 heading ‘Can|Auto|Extend’
COLUMN files_in_tablespace format 999 heading ‘Files|In|TBS’
COLUMN total_tablespace_space format 99,999,999,999 heading ‘Total|Current|TBS|Space’
COLUMN total_used_space format 99,999,999,999 heading ‘Total|Current|Used|Space’
COLUMN total_tablespace_free_space format 99,999,999,999 heading ‘Total|Current|Free|Space’
COLUMN total_used_pct format 999.99 heading ‘Total|Current|Used|PCT’
COLUMN total_free_pct format 999.99 heading ‘Total|Current|Free|PCT’
COLUMN max_size_of_tablespace format 99,999,999,999 heading ‘TBS|Max|Size’
COLUMN total_auto_used_pct format 999.99 heading ‘Total|Max|Used|PCT’
COLUMN total_auto_free_pct format 999.99 heading ‘Total|Max|Free|PCT’

TTITLE left _date center Tablespace Space Utilization Status Report skip 2

WITH tbs_auto AS
(SELECT DISTINCT tablespace_name, autoextensible
FROM dba_data_files
WHERE autoextensible = ‘YES’),
files AS
(SELECT tablespace_name, COUNT () tbs_files, SUM (BYTES) total_tbs_bytes FROM dba_data_files GROUP BY tablespace_name), fragments AS (SELECT tablespace_name, COUNT () tbs_fragments,
SUM (BYTES) total_tbs_free_bytes,
MAX (BYTES) max_free_chunk_bytes
FROM dba_free_space
GROUP BY tablespace_name),
AUTOEXTEND AS
(SELECT tablespace_name, SUM (size_to_grow) total_growth_tbs
FROM (SELECT tablespace_name, SUM (maxbytes) size_to_grow
FROM dba_data_files
WHERE autoextensible = ‘YES’
GROUP BY tablespace_name
UNION
SELECT tablespace_name, SUM (BYTES) size_to_grow
FROM dba_data_files
WHERE autoextensible = ‘NO’
GROUP BY tablespace_name)
GROUP BY tablespace_name)
SELECT a.tablespace_name,
CASE tbs_auto.autoextensible
WHEN ‘YES’
THEN ‘YES’
ELSE ‘NO’
END AS autoextensible,
files.tbs_files files_in_tablespace,
files.total_tbs_bytes total_tablespace_space,
(files.total_tbs_bytes – fragments.total_tbs_free_bytes
) total_used_space,
fragments.total_tbs_free_bytes total_tablespace_free_space,
( ( (files.total_tbs_bytes – fragments.total_tbs_free_bytes)
/ files.total_tbs_bytes
)
* 100
) total_used_pct,
((fragments.total_tbs_free_bytes / files.total_tbs_bytes) * 100
) total_free_pct,
AUTOEXTEND.total_growth_tbs max_size_of_tablespace,
( ( ( AUTOEXTEND.total_growth_tbs
– (AUTOEXTEND.total_growth_tbs – fragments.total_tbs_free_bytes
)
)
/ AUTOEXTEND.total_growth_tbs
)
* 100
) total_auto_used_pct,
( ( (AUTOEXTEND.total_growth_tbs – fragments.total_tbs_free_bytes)
/ AUTOEXTEND.total_growth_tbs
)
* 100
) total_auto_free_pct
FROM dba_tablespaces a, files, fragments, AUTOEXTEND, tbs_auto
WHERE a.tablespace_name = files.tablespace_name
AND a.tablespace_name = fragments.tablespace_name
AND a.tablespace_name = AUTOEXTEND.tablespace_name
AND a.tablespace_name = tbs_auto.tablespace_name(+);

========
Tablespace Growth Report
TABLESPACE GROWTH REPORT
Author JP Vijaykumar
Date Sept 8 2013
Modified Sept 14 2013

/* The period range for this report is dependent on
the snapshot retention period set for the db. */

–TABLESPACE GROWTH REPORT(USING PL/SQL PROCEDURE)
set serverout on size 1000000 timing on
declare
v_num number;
begin
dbms_output.put_line(‘DB_NAME,RUN_DATE,TS_NAME,ALLOC_GB,CURR_USED_GB,PREV_USED_GB,VARIANCE,%CHANGE’);
for c1 in (select name,tablespace_name from dba_tablespaces,v$database
where (tablespace_name like ‘%DATA%’ or
tablespace_name like ‘%INDEX%’ )
order by tablespace_name) loop
v_num :=0;
for c2 in (
select ss.run_time,ts.name,round(su.tablespace_sizedt.block_size/1024/1024/1024,2) alloc_size_gb, round(su.tablespace_usedsizedt.block_size/1024/1024/1024,2) used_size_gb
from
dba_hist_tbspc_space_usage su,
(select trunc(BEGIN_INTERVAL_TIME) run_time,max(snap_id) snap_id from dba_hist_snapshot
group by trunc(BEGIN_INTERVAL_TIME) ) ss,
v$tablespace ts,
dba_tablespaces dt
where su.snap_id = ss.snap_id
and su.tablespace_id = ts.ts#
and ts.name = c1.tablespace_name
and ts.name = dt.tablespace_name order by 1) loop
if (v_num = 0) then
dbms_output.put_line(c1.name||’,’||c2.run_time||’ ,’||c2.name||’, ‘||c2.alloc_size_gb||’, ‘||c2.used_size_gb||’ ,’||v_num||’,’||c2.used_size_gb||’, 0 %’);
elsif (v_num < c2.used_size_gb) then
dbms_output.put_line(c1.name||’,’||c2.run_time||’, ‘||c2.name||’, ‘||c2.alloc_size_gb||’ ,’||c2.used_size_gb||’ ,’||v_num||’,’||(c2.used_size_gb – v_num)||’, ‘||
round((c2.used_size_gb – v_num)100/v_num,2)||’ %’); elsif (v_num > c2.used_size_gb) then dbms_output.put_line(c1.name||’,’||c2.run_time||’, ‘||c2.name||’, ‘||c2.alloc_size_gb||’, ‘||c2.used_size_gb||’, ‘||v_num||’,’||(c2.used_size_gb – v_num)||’, -‘|| round((v_num – c2.used_size_gb)100/v_num,2)||’ %’);
else
dbms_output.put_line(c1.name||’,’||c2.run_time||’ ,’||c2.name||’ ,’||c2.alloc_size_gb||’ ,’||c2.used_size_gb||’,’||v_num||’,0, 0 %’);
end if;
v_num:=c2.used_size_gb;
end loop;
end loop;
end;

–TABLESPACE GROWTH REPORT(USING SQLPLUS)
set linesize 300
column name format a25
column variance format a20
alter session set nls_date_format=’yyyy-mm-dd’;
with t as (
select ss.run_time,ts.name,round(su.tablespace_sizedt.block_size/1024/1024/1024,2) alloc_size_gb, round(su.tablespace_usedsizedt.block_size/1024/1024/1024,2) used_size_gb
from
dba_hist_tbspc_space_usage su,
(select trunc(BEGIN_INTERVAL_TIME) run_time,max(snap_id) snap_id from dba_hist_snapshot
group by trunc(BEGIN_INTERVAL_TIME) ) ss,
v$tablespace ts,
dba_tablespaces dt
where su.snap_id = ss.snap_id
and su.tablespace_id = ts.ts#
and ts.name =upper(‘&TABLESPACE_NAME’)
and ts.name = dt.tablespace_name )
select e.run_time,e.name,e.alloc_size_gb,e.used_size_gb curr_used_size_gb,b.used_size_gb prev_used_size_gb,
case when e.used_size_gb > b.used_size_gb then to_char(e.used_size_gb – b.used_size_gb)
when e.used_size_gb = b.used_size_gb then ‘NO DATA GROWTH’ when e.used_size_gb < b.used_size_gb then ‘***DATA PURGED’ end variance
from t e, t b
where e.run_time = b.run_time + 1
order by 1;

–TO MAIL THE REPORT AS AN ATTACHMENT

!ls -1tr *csv|tail -1|awk ‘{print “uuencode ” $1,$1 “|mailx -s $ORACLE_SID jp[.]vijaykumar[@]gmail[.]com”}’|ksh

COLUMN username FORMAT a10 HEADING ‘Holding|User’
COLUMN session_id HEADING ‘SID’
COLUMN mode_held FORMAT a20 HEADING ‘Mode|Held’
COLUMN mode_requested FORMAT a20 HEADING ‘Mode|Requested’
COLUMN lock_id1 FORMAT a20 HEADING ‘Lock|ID1’
COLUMN lock_id2 FORMAT a20 HEADING ‘Lock|ID2’
COLUMN type HEADING ‘Lock|Type’
SET LINES 132 PAGES 59 FEEDBACK OFF ECHO OFF
TTITLE left _date center ‘Sessions Blocking Other Sessions Report’ skip 2
SELECT a.session_id, username, TYPE, mode_held, mode_requested, lock_id1,
lock_id2
FROM v$session b, dba_blockers c, dba_locks a
WHERE c.holding_session = a.session_id AND c.holding_session = b.sid

/

Index rebuild progress:

set lines 200
col “Index Operation” for a60 trunc
col “ETA Mins” format 999.99
col “Runtime Mins” format 999.99
select sess.sid as “Session ID”, sql.sql_text as “Index Operation”,
longops.totalwork, longops.sofar,
longops.elapsed_seconds/60 as “Runtime Mins”,
longops.time_remaining/60 as “ETA Mins”
from v$session sess, v$sql sql, v$session_longops longops
where
sess.sid=longops.sid
and sess.sql_address = sql.address
and sess.sql_address = longops.sql_address
and sess.status = ‘ACTIVE’
and longops.totalwork > longops.sofar
and sess.sid not in ( SELECT sys_context(‘USERENV’, ‘SID’) SID FROM DUAL)
and upper(sql.sql_text) like ‘%INDEX%’
order by 3, 4

;

Size of tables,indexes and LOB in a schema

COLUMN TABLE_NAME FORMAT A32
COLUMN OBJECT_NAME FORMAT A32
COLUMN OWNER FORMAT A30

SELECT
owner, table_name, TRUNC(sum(bytes)/1024/1024/1024) Gig
FROM
(SELECT segment_name table_name, owner, bytes
FROM dba_segments
WHERE segment_type = ‘TABLE’
UNION ALL
SELECT i.table_name, i.owner, s.bytes
FROM dba_indexes i, dba_segments s
WHERE s.segment_name = i.index_name
AND s.owner = i.owner
AND s.segment_type = ‘INDEX’
UNION ALL
SELECT l.table_name, l.owner, s.bytes
FROM dba_lobs l, dba_segments s
WHERE s.segment_name = l.segment_name
AND s.owner = l.owner
AND s.segment_type = ‘LOBSEGMENT’
UNION ALL
SELECT l.table_name, l.owner, s.bytes
FROM dba_lobs l, dba_segments s
WHERE s.segment_name = l.index_name
AND s.owner = l.owner
AND s.segment_type = ‘LOBINDEX’)
WHERE owner in UPPER(‘&owner’) and table_name in (‘&TABLENAME’)
–WHERE table_name in (‘&TABLENAME’)
GROUP BY table_name, owner
HAVING SUM(bytes)/1024/1024 > 10 /* Ignore really small tables */
ORDER BY SUM(bytes) desc
;


SELECT S.OWNER “Owner”,NVL(S.SEGMENT_NAME, ‘TABLE TOTAL SIZE’) “Segment name”,ROUND(SUM(S.BYTES)/1024/1024/1024,1) “Segment size (GB)”
FROM DBA_SEGMENTS S
WHERE S.SEGMENT_NAME IN (‘ICS_JMS_REPLICATION_ERR’,
‘ICS_DU_E_PMT_STATUS_RET’,
‘ICS_RECONNECTION_CR702’,
‘ICS_RECONNECTION_CR702_HIST’,
‘ICS_DU_E_PMT_STATUS_RET_HIST’)
AND S.OWNER =’ICSPRDADM’
OR S.SEGMENT_NAME IN (
(
SELECT L.SEGMENT_NAME FROM DBA_LOBS L WHERE L.TABLE_NAME in (‘ICS_JMS_REPLICATION_ERR’,
‘ICS_DU_E_PMT_STATUS_RET’,
‘ICS_RECONNECTION_CR702’,
‘ICS_RECONNECTION_CR702_HIST’,
‘ICS_DU_E_PMT_STATUS_RET_HIST’) AND L.OWNER =’ICSPRDADM’
)
)
or s.segment_name in (
(
SELECT di.index_name FROM DBA_INDEXES di where di.table_name in (‘ICS_JMS_REPLICATION_ERR’,
‘ICS_DU_E_PMT_STATUS_RET’,
‘ICS_RECONNECTION_CR702’,
‘ICS_RECONNECTION_CR702_HIST’,
‘ICS_DU_E_PMT_STATUS_RET_HIST’) AND di.OWNER =’ICSPRDADM’
)
)
GROUP BY S.OWNER,ROLLUP(S.SEGMENT_NAME)
ORDER BY 1,2,3;


======== Size of table and it’s indexes ===========

SELECT UPPER(‘&table_name’),
s.segment_type,
SUM(s.bytes)/1024/1024/1024 Gigs
FROM dba_segments s
WHERE (s.segment_name,s.segment_type)
IN (SELECT t.table_name,
‘TABLE’
FROM dba_tables t
WHERE t.table_name = UPPER(‘&table_name’)
UNION
SELECT i.index_name,
‘INDEX’
FROM dba_indexes i
WHERE i.table_name = UPPER(‘&table_name’)
)
GROUP BY s.segment_type
ORDER BY 1 DESC;

Gives Individual details as welll <<<<<<<<<<<<<<<<<<<<<<<<
break on segment_type;
SELECT s.segment_name,s.segment_type,
SUM(s.bytes)/1024/1024/1024 Gigs
FROM dba_segments s
WHERE (s.segment_name,s.segment_type)
IN (SELECT t.table_name,
‘TABLE’
FROM dba_tables t
WHERE t.table_name = UPPER(‘&table_name’)
UNION
SELECT i.index_name,
‘INDEX’
FROM dba_indexes i
WHERE i.table_name = UPPER(‘&&table_name’)
) and owner=’SIEBEL’
GROUP BY s.segment_name,s.segment_type
ORDER BY 2,3 DESC;

========================

History of processes over a period:

SELECT
to_char(TRUNC(s.begin_interval_time,’HH24′),’DD-MON-YYYY HH24:MI:SS’) snap_begin,
sum(r.current_utilization) sessions
FROM
dba_hist_resource_limit r,
dba_hist_snapshot s
WHERE ( TRUNC(s.begin_interval_time,’HH24′),s.snap_id ) IN
(
–Select the Maximum of the Snapshot IDs within an hour if more than one snapshot IDs
–have the same number of sessions within that hour , so then picking one of the snapIds
SELECT TRUNC(sn.begin_interval_time,’HH24′),MAX(rl.snap_id)
FROM dba_hist_resource_limit rl,dba_hist_snapshot sn
WHERE TRUNC(sn.begin_interval_time) >= TRUNC(sysdate-30)
AND rl.snap_id = sn.snap_id
AND rl.resource_name = ‘sessions’
AND rl.instance_number = sn.instance_number
AND ( TRUNC(sn.begin_interval_time,’HH24′),rl.CURRENT_UTILIZATION ) IN
(
SELECT TRUNC(s.begin_interval_time,’HH24′),MAX(r.CURRENT_UTILIZATION) “no_of_sess”
FROM dba_hist_resource_limit r,dba_hist_snapshot s
WHERE r.snap_id = s.snap_id
AND TRUNC(s.begin_interval_time) >= TRUNC(sysdate-30)
AND r.instance_number=s.instance_number
AND r.resource_name = ‘sessions’
GROUP BY TRUNC(s.begin_interval_time,’HH24′)
)
GROUP BY TRUNC(sn.begin_interval_time,’HH24′),CURRENT_UTILIZATION
)
AND r.snap_id = s.snap_id
AND r.instance_number = s.instance_number
AND r.resource_name = ‘sessions’
GROUP BY
to_char(TRUNC(s.begin_interval_time,’HH24′),’DD-MON-YYYY HH24:MI:SS’)
ORDER BY snap_begin;

select ss.SNAP_ID, l.CURRENT_UTILIZATION, l.MAX_UTILIZATION, to_char(BEGIN_INTERVAL_TIME,’dd-mm-yyyy HH24:MI’), to_char(END_INTERVAL_TIME,’dd-mm-yyyy HH24:MI’) from DBA_HIST_RESOURCE_LIMIT l, DBA_HIST_SNAPSHOT ss
where ss.SNAP_ID = l.SNAP_ID
and upper(l.RESOURCE_NAME) =’PROCESSES’ and BEGIN_INTERVAL_TIME>sysdate-30 order by 4;

For RAC:
select l.instance_number,ss.SNAP_ID, l.CURRENT_UTILIZATION, l.MAX_UTILIZATION, to_char(END_INTERVAL_TIME,’dd-mm-yyyy HH24:MI’) from DBA_HIST_RESOURCE_LIMIT l, DBA_HIST_SNAPSHOT ss
where ss.SNAP_ID = l.SNAP_ID
and upper(l.RESOURCE_NAME) =’PROCESSES’ and l.instance_number=ss.instance_number and END_INTERVAL_TIME>sysdate-1 order by 1,4;

set pages 999
set lines 90
column c1 heading ‘Event|Name’ format a30
column c2 heading ‘Total|Waits’ format 999,999,999
column c3 heading ‘Seconds|Waiting’ format 999,999
column c4 heading ‘Total|Timeouts’ format 999,999,999
column c5 heading ‘Average|Wait|(in secs)’ format 99.999
ttitle ‘System-wide Wait Analysis|for current wait events’
select
event c1,
total_waits c2,
time_waited / 100 c3,
total_timeouts c4,
average_wait /100 c5
from
sys.v_$system_event
where
event in (
‘SQLNet break/reset to client’, ‘SQLNet message from client’,
‘SQLNet message to client’, ‘SQLNet more data from dblink’)
and
event not like ‘%done%’
and
event not like ‘%Idle%’
order by
c2 desc
;

                                                                Average

Event Total Seconds Total Wait
Name Waits Waiting Timeouts (in secs)


SQL*Net message from client ############ ######## 0 .108

SQL*Net message to client ############ 37,506 0 .000

SQL*Net break/reset to client 297,437,080 76,345 0 .000

SQL*Net more data from dblink 15,416,077 27,355 0 .002

========

SELECT distinct a.sid,a.last_call_et, p1raw,
replace(utl_raw.cast_to_varchar2(hextoraw(SUBSTR (p1raw, 1, 2)))||
utl_raw.cast_to_varchar2(hextoraw(SUBSTR (p1raw, 3, 2)))||
utl_raw.cast_to_varchar2(hextoraw(SUBSTR (p1raw, 5, 2)))||
utl_raw.cast_to_varchar2(hextoraw(SUBSTR (p1raw, 7, 2)))||
utl_raw.cast_to_varchar2(hextoraw(SUBSTR (p1raw, 9, 2)))||
utl_raw.cast_to_varchar2(hextoraw(SUBSTR (p1raw, 11, 2)))||
utl_raw.cast_to_varchar2(hextoraw(SUBSTR (p1raw, 13, 2)))||
utl_raw.cast_to_varchar2(hextoraw(SUBSTR (p1raw, 15, 2))),chr(0),”) as decodep1raw,
b.sql_id,to_char(substr(b.sql_fulltext,1,4000)),a.*
FROM v$session a, v$sql b
WHERE event like ‘%dblink%’ and a.sql_id=b.sql_id;

=================

  1. To see the current internal memory settings, please run the following SQL statements:

COL NAME FORMAT A32
COL VALUE FORMAT A40
set echo on
SPOOL SGAPARAMS.TXT <————– Upload this one
select NAME, PLATFORM_ID, DATABASE_ROLE from v$database;
select * from V$version where banner like ‘Oracle Database%’;
select nam.ksppinm NAME, val.KSPPSTVL VALUE
from x$ksppi nam, x$ksppsv val
where nam.indx = val.indx and (nam.ksppinm like ‘%shared_pool%’ or nam.ksppinm like ‘_4031%’ or nam.ksppinm in (‘_kghdsidx_count’,’_ksmg_granule_size’,’_memory_imm_mode_without_autosga’,’_memory_broker_stat_interval’,’cursor_sharing’,’event’))
order by 1;
SELECT ksmchidx “SubPool”, sum(ksmchsiz) Bytes
FROM x$ksmsp
GROUP BY ksmchidx;
select name,value from v$system_parameter where name in ( ‘memory_max_target’, ‘memory_target’, ‘sga_max_size’, ‘sga_target’, ‘shared_pool_size’, ‘db_cache_size’, ‘large_pool_size’, ‘java_pool_size’, ‘pga_aggregate_target’, ‘workarea_size_policy’, ‘streams_pool_size’ ,’shared_pool_reserved_size’) ;
SPOOL OFF

  1. To see the current state of the Shared Pool, please run the following SQL statements:

SET PAGESIZE 900
SET LINESIZE 120
COL BYTES FORMAT 999999999999999
COL COMPONENT FORMAT A25
col parameter for a25
col “Session Value” for a30
col “Instance Value” for a30
set echo on
SPOOL SPINFO.TXT <————– Upload this one select NAME, PLATFORM_ID, DATABASE_ROLE from v$database; select * from V$version where banner like ‘Oracle Database%’; select INSTANCE_NAME, to_char(STARTUP_TIME,’DD/MM/YYYY HH24:MI:SS’) “STARTUP_TIME” from gv$instance; select REQUEST_FAILURES, LAST_FAILURE_SIZE from V$SHARED_POOL_RESERVED; /* Shared Pool Reserved 4031 information */ select REQUESTS, REQUEST_MISSES, free_space, avg_free_size, free_count, max_free_size from V$SHARED_POOL_RESERVED; select name, bytes from v$sgastat where pool = ‘shared pool’ and (bytes > 999999 or name = ‘free memory’) and rownum<=21
order by bytes desc ;

/* Total Shared Pool Usage / select sum(bytes)/1024/1024 “Total Shared Pool Usage (Mb)” from v$sgastat where pool = ‘shared pool’ and name != ‘free memory’; / Current SGA Buffer & Pool sizes */
select component, current_size/1024/1024 “CURRENT_SIZE (Mb)” from v$sga_dynamic_components;
select a.ksppinm “Parameter”, b.ksppstvl “Session Value”, c.ksppstvl “Instance Value”
from sys.x$ksppi a, sys.x$ksppcv b, sys.x$ksppsv c
where a.indx = b.indx and a.indx = c.indx and a.ksppinm in
(‘__shared_pool_size’,’__db_cache_size’,’__large_pool_size’,’__java_pool_size’,’__streams_pool_size’,’__pga_aggregate_target’,’__sga_target’,’memory_target’);
SPOOL OFF

spool advisory.txt ———-> Upload this one
show parameter db_cache_advice;
COLUMN size_for_estimate FORMAT 999,999,999,999 heading ‘Cache Size (MB)’
COLUMN buffers_for_estimate FORMAT 999,999,999,999 heading ‘Buffers’
COLUMN estd_physical_read_factor FORMAT 999.90 heading ‘Estd Phys|Read Factor’
COLUMN estd_physical_reads FORMAT 999,999,999,999 heading ‘Estd Phys| Reads’
SELECT size_for_estimate, buffers_for_estimate, estd_physical_read_factor, estd_physical_reads
FROM V$DB_CACHE_ADVICE
WHERE name = ‘DEFAULT’
AND block_size = (SELECT value FROM V$PARAMETER WHERE name = ‘db_block_size’)
AND advice_status = ‘ON’;
SELECT DISTINCT COMPONENT, MAX(round(TARGET_SIZE/1024/1024)) “MAXIMUM SIZE [MB]”
FROM DBA_HIST_MEMORY_RESIZE_OPS
GROUP BY COMPONENT
ORDER BY COMPONENT;
col sga_size_factor for 990.00
col estd_db_time_factor for 990.00
col ESTD_PHYSICAL_READS for 999,999,999,999
col sga_Size for 999,999
SELECT sga_size, sga_size_factor, estd_db_time_factor,ESTD_PHYSICAL_READS
FROM v$sga_target_advice
ORDER BY sga_size ASC;
col target_mb for 999,999
col ESTD_OVERALLOC_COUNT for 999,999,999
SELECT round(PGA_TARGET_FOR_ESTIMATE/1024/1024) target_mb,
ESTD_PGA_CACHE_HIT_PERCENTAGE cache_hit_perc,
ESTD_OVERALLOC_COUNT
FROM V$PGA_TARGET_ADVICE;
SELECT shared_pool_size_for_estimate “Size in MB”,
shared_pool_size_factor “Size Factor”,
estd_lc_time_saved “Time Saved in sec”
FROM v$shared_pool_advice;
spool off;

set markup html on
spool sga-resize.html <————– Upload this one
/* SGA Resize Operations */
set pages 9999
set lines 512
set numwidth 18
alter session set nls_date_format=’DD-MON-YYYY HH24:MI:SS’;
select start_time, end_time, component, oper_type, oper_mode, initial_size,
target_size, final_size, status
from v$sga_resize_ops;
select component, AVG(FINAL_SIZE) “AVG FINAL”, MEDIAN(FINAL_SIZE) “MEDIAN FINAL”, MAX(FINAL_SIZE) “MAX FINAL”
from v$sga_resize_ops
group by component;
set markup html off
spool off

================
How can we check in which datafile my object resides?

select owner,a.segment_name,a.file_id,b.file_name Datafile_name from dba_extents a,dba_data_files b where a.file_id=b.file_id and a.segment_name=’YOUR OBJECT NAME’;

How can we check what objects are there in a datafile?

select a.segment_name,a.file_id,b.file_name Datafile_name from dba_extents a,dba_data_files b where a.file_id=b.file_id and b.file_name=;

==========
Tablespace Free Space summary:

SET ECHO off
REM NAME: TFSFSSUM.SQL
REM USAGE:”@path/tfsfssum”
REM ————————————————————————
REM REQUIREMENTS:
REM SELECT ON DBA_FREE_SPACE< DBA_DATA_FILES
REM ————————————————————————
REM AUTHOR:
REM Cary Millsap, Oracle Corporation
REM (c)1994 Oracle Corporation
REM ————————————————————————
REM PURPOSE:
REM Displays tablespace free space and fragmentation for each
REM tablespace, Prints the total size, the amount of space available,
REM and a summary of freespace fragmentation in that tablespace.
REM ————————————————————————
REM EXAMPLE:
REM
REM Database Freespace Summary
REM
REM Free Largest Total Available Pct
REM Tablespace Frags Frag (KB) (KB) (KB) Used
REM —————- ——– ———— ———— ———— —-
REM DES2 1 30,210 40,960 30,210 26
REM DES2_I 1 22,848 30,720 22,848 26
REM RBS 16 51,198 59,392 55,748 6
REM SYSTEM 3 4,896 92,160 5,930 94
REM TEMP 5 130 550 548 0
REM TOOLS 10 76,358 117,760 87,402 26
REM USERS 1 46 1,024 46 96
REM ——– ———— ————
REM sum 37 342,566 202,732
REM
REM ————————————————————————
REM DISCLAIMER:
REM This script is provided for educational purposes only. It is NOT
REM supported by Oracle World Wide Technical Support.
REM The script has been tested and appears to work as intended.
REM You should always run new scripts on a test instance initially.
REM ————————————————————————
REM Main text of script follows:

ttitle –
center ‘Database Freespace Summary’ skip 2

comp sum of nfrags totsiz avasiz on report
break on report

col tsname format a30 justify c heading ‘Tablespace’
col nfrags format 999,990 justify c heading ‘Free|Frags’
col mxfrag format 999,999,990 justify c heading ‘Largest|Frag (KB)’
col totsiz format 999,999,990 justify c heading ‘Total|(KB)’
col avasiz format 999,999,990 justify c heading ‘Available|(KB)’
col pctusd format 990 justify c heading ‘Pct|Used’

select
total.tablespace_name tsname,
count(free.bytes) nfrags,
nvl(max(free.bytes)/1024,0) mxfrag,
total.bytes/1024 totsiz,
nvl(sum(free.bytes)/1024,0) avasiz,
(1-nvl(sum(free.bytes),0)/total.bytes)*100 pctusd
from
dba_data_files total,
dba_free_space free
where
total.tablespace_name = free.tablespace_name(+)
and total.file_id=free.file_id(+)
group by
total.tablespace_name,
total.bytes
/

===========================================

Sample Output from the tfsfssum.sql script:

Database Freespace Summary

               Free     Largest       Total      Available   Pct  

Tablespace Frags Frag (KB) (KB) (KB) Used


DES2 1 30,210 40,960 30,210 26
DES2_I 1 22,848 30,720 22,848 26
RBS 16 51,198 59,392 55,748 6
SYSTEM 3 4,896 92,160 5,930 94
TEMP 5 130 550 548 0
TOOLS 10 76,358 117,760 87,402 26
USERS 1 46 1,024 46 96
——– ———— ————
sum 37 342,566 202,732

================

Tablespace Fragmentation:

========

Script : tfstsfgm

SET ECHO off
REM NAME:TFSTSFRM.SQL
REM USAGE:”@path/tfstsfgm”
REM ————————————————————————
REM REQUIREMENTS:
REM SELECT ON DBA_FREE_SPACE
REM ————————————————————————
REM PURPOSE:
REM The following is a script that will determine how many extents
REM of contiguous free space you have in Oracle as well as the
REM total amount of free space you have in each tablespace. From
REM these results you can detect how fragmented your tablespace is.
REM
REM The ideal situation is to have one large free extent in your
REM tablespace. The more extents of free space there are in the
REM tablespace, the more likely you will run into fragmentation
REM problems. The size of the free extents is also very important.
REM If you have a lot of small extents (too small for any next
REM extent size) but the total bytes of free space is large, then
REM you may want to consider defragmentation options.
REM ————————————————————————
REM DISCLAIMER:
REM This script is provided for educational purposes only. It is NOT
REM supported by Oracle World Wide Technical Support.
REM The script has been tested and appears to work as intended.
REM You should always run new scripts on a test instance initially.
REM ————————————————————————
REM Main text of script follows:

create table SPACE_TEMP (
TABLESPACE_NAME CHAR(30),
CONTIGUOUS_BYTES NUMBER)
/

declare
cursor query is select *
from dba_free_space
order by tablespace_name, block_id;
this_row query%rowtype;
previous_row query%rowtype;
total number;

begin
open query;
fetch query into this_row;
previous_row := this_row;
total := previous_row.bytes;
loop
fetch query into this_row;
exit when query%notfound;
if this_row.block_id = previous_row.block_id + previous_row.blocks then
total := total + this_row.bytes;
insert into SPACE_TEMP (tablespace_name)
values (previous_row.tablespace_name);
else
insert into SPACE_TEMP values (previous_row.tablespace_name,
total);
total := this_row.bytes;
end if;
previous_row := this_row;
end loop;
insert into SPACE_TEMP values (previous_row.tablespace_name,
total);
end;
.
/

set pagesize 60
set newpage 0
set echo off
ttitle center ‘Contiguous Extents Report’ skip 3
break on “TABLESPACE NAME” skip page duplicate
spool contig_free_space.lis
rem
column “CONTIGUOUS BYTES” format 999,999,999,999,999
column “COUNT” format 9999999
column “TOTAL BYTES” format 999,999,999,999,999
column “TODAY” noprint new_value new_today format a1
rem
select TABLESPACE_NAME “TABLESPACE NAME”,
CONTIGUOUS_BYTES “CONTIGUOUS BYTES”
from SPACE_TEMP
where CONTIGUOUS_BYTES is not null
order by TABLESPACE_NAME, CONTIGUOUS_BYTES desc;

select tablespace_name, count(*) “# OF EXTENTS”,
sum(contiguous_bytes) “TOTAL BYTES”
from space_temp
group by tablespace_name;

spool off

drop table SPACE_TEMP
/

=====================

Script to Report Space Distribution and utilization (Doc ID 135677.1)

Import Progress:

Troubleshoot Import job status: https://community.oracle.com/blogs/dearDBA/2015/07/22/impdp-stalls-for-no-apparent-reason

2) Querying DBA_DATAPUMP_JOBS view:-

1
select * from dba_datapump_jobs;
The STATE column of the above view would give you the status of the JOB to show whether EXPDP or IMPDP jobs are still running, or have terminated with either a success or failure status.

3) Querying V$SESSION_LONGOPS & V$SESSION views:-

SELECT b.username, a.sid, b.opname, b.target,
round(b.SOFAR*100/b.TOTALWORK,0) || ‘%’ as “%DONE”, b.TIME_REMAINING,
to_char(b.start_time,’YYYY/MM/DD HH24:MI:SS’) start_time
FROM v$session_longops b, v$session a
WHERE a.sid = b.sid and b.start_time >=sysdate-1 ORDER BY 6;

4) Querying V$SESSION_LONGOPS & V$DATAPUMP_JOB views:-

SELECT sl.sid, sl.serial#, sl.sofar, sl.totalwork, dp.owner_name, dp.state, dp.job_mode
FROM v$session_longops sl, v$datapump_job dp
WHERE sl.opname = dp.job_name
AND sl.sofar != sl.totalwork;

5) Querying all the related views with a single query:-

select x.job_name,b.state,b.job_mode,b.degree
, x.owner_name,z.sql_text, p.message
, p.totalwork, p.sofar
, round((p.sofar/p.totalwork)*100,2) done
, p.time_remaining
from dba_datapump_jobs b
left join dba_datapump_sessions x on (x.job_name = b.job_name)
left join v$session y on (y.saddr = x.saddr)
left join v$sql z on (y.sql_id = z.sql_id)
left join v$session_longops p ON (p.sql_id = y.sql_id)
WHERE y.module=’Data Pump Worker’
AND p.time_remaining > 0;
6) Use the following procedure and replace the JOB_OWNER & JOB_NAME as per your env. which you fetch from import.log:-

;;;
Import: Release 12.1.0.2.0 – Production on Thu Jun 29 00:29:09 2017

Copyright (c) 1982, 2014, Oracle and/or its affiliates. All rights reserved.
;;;
Connected to: Oracle Database 12c Enterprise Edition Release 12.1.0.2.0 – 64bit Production
With the Partitioning, OLAP, Advanced Analytics and Real Application Testing options
Master table “SYSTEM”.”SYS_IMPORT_FULL_04″ successfully loaded/unloaded
Here the JOB_OWNER is SYSTEM and JOB_NAME is SYS_IMPORT_FULL_04.

And below is the procedure:-

SET SERVEROUTPUT ON
DECLARE
ind NUMBER;
h1 NUMBER;
percent_done NUMBER;
job_state VARCHAR2(30);
js ku$_JobStatus;
ws ku$_WorkerStatusList;
sts ku$_Status;
BEGIN
h1 := DBMS_DATAPUMP.attach(‘SYS_IMPORT_SCHEMA_01’, ‘SYS’);
dbms_datapump.get_status(h1,
dbms_datapump.ku$_status_job_error +
dbms_datapump.ku$_status_job_status +
dbms_datapump.ku$_status_wip, 0, job_state, sts);
js := sts.job_status;
ws := js.worker_status_list;
dbms_output.put_line(‘*** Job percent done = ‘ ||
to_char(js.percent_done));
dbms_output.put_line(‘restarts – ‘||js.restart_count);
ind := ws.first;
while ind is not null loop
dbms_output.put_line(‘rows completed – ‘||ws(ind).completed_rows);
ind := ws.next(ind);
end loop;
DBMS_DATAPUMP.detach(h1);
end;
/
7) Also for any errors you can check the alert log and query the DBA_RESUMABLE view.

1
select name, sql_text, error_msg from dba_resumable;

That’s all what I can think of at the moment, would add the queries to this post if I find another view which can be used to get the information of the datapump jobs.

Progress of schema stats:

select
total_tables,
round((done_tables/total_tables)100,2) “%tables|done”, round((done_ROWS/total_rows)100,2) “%rows|done”
from (
select
count(*) total_tables,
sum(num_rows) total_rows,
sum(decode(sign(sysdate-last_analyzed-1/2),-1,1)) done_tables,
sum(decode(sign(sysdate-last_analyzed-1/2),-1,num_rows)) done_rows
from dba_tables
where owner=upper(‘CCF_ADMIN’));

================================

SQL> select table_name,partition_name,STATS_UPDATE_TIME from dba_tab_stats_history where partition_name=’UDR_LT_20180106′; ==> History of stats gathered
select OPERATION,TARGET,START_TIME,END_TIME from dba_optstat_operations where START_TIME>=sysdate-1;

number of objects per day which had their statistics changed (including GATHER_STATS_JOB and manual gathering) you can use
SET lines 150
SET pages 200
SELECT TO_CHAR(stats_update_time,’yyyy-mm-dd’) AS stats_update_time, COUNT(*)
FROM dba_tab_stats_history
GROUP BY TO_CHAR(stats_update_time,’yyyy-mm-dd’)
ORDER BY 1 DESC;

SELECT client_name, window_name, jobs_created, jobs_started, jobs_completed FROM dba_autotask_client_history WHERE client_name like ‘%stats%’;

SELECT * FROM dba_autotask_client_history WHERE client_name like ‘%stats%’; —> Optimizer stats collection

Select client_name, JOB_SCHEDULER_STATUS from DBA_AUTOTASK_CLIENT_JOB where client_name=’auto optimizer stats collection’;

SELECT client_name, window_name, jobs_created, jobs_started, jobs_completed FROM dba_autotask_client_history WHERE client_name like ‘%stats%’;

=============================

List Sessions Details for a given period

— List Session Details for a Given Time Period

— s_time format = ’22/OCT/2014 04:00:00.000′
— e_time format = ’23/OCT/2014 04:00:00.000′

— inst_no = Instance Number for RAC. Use 1 for non RAC

SET PAUSE ON
SET PAUSE ‘Press Return To Continue’
SET HEADING ON
SET LINESIZE 300
SET PAGESIZE 60

COLUMN Sample_Time FOR A12
COLUMN username FOR A20
COLUMN sql_text FOR A40
COLUMN program FOR A40
COLUMN module FOR A40

SELECT
sample_time,
u.username,
h.program,
h.module,
s.sql_text
FROM
DBA_HIST_ACTIVE_SESS_HISTORY h,
DBA_USERS u,
DBA_HIST_SQLTEXT s
WHERE sample_time
BETWEEN ‘&s_time’ and ‘&e_time’
AND
INSTANCE_NUMBER=&inst_no
AND h.user_id=u.user_id
AND h.sql_id = s.sql_iD
ORDER BY 1
/

Updateed version of above

compute count of instance_number on instance_number
break on instance_number
—->>>> TO get count

SET PAUSE ON
SET PAUSE ‘Press Return To Continue’
SET HEADING ON
SET LINESIZE 300
SET PAGESIZE 60

COLUMN Sample_Time FOR A12
COLUMN username FOR A20
COLUMN sql_text FOR A40
COLUMN program FOR A40
COLUMN module FOR A40

SELECT
h.Instance_number,to_char(sample_time,’DD/MM/YY HH24:MI’),u.username,h.program,h.module,s.sql_text FROM
DBA_HIST_ACTIVE_SESS_HISTORY h,
DBA_USERS u,
DBA_HIST_SQLTEXT s
WHERE to_char(sample_time,’DD/MM/YY HH24:MI’)
BETWEEN ’19/09/17 14:00′ and ’19/09/17 21:00′
AND
INSTANCE_NUMBER in (1,2)
and u.username in (‘NBA’,’EMAGINEROOT’,’EMAGNBAROOT’)
AND h.user_id=u.user_id
AND h.sql_id = s.sql_iD
ORDER BY 1
/
***8
compute count of instance_number on instance_number
break on instance_number
Number of user sessions connected to database historically:

SELECT
to_char(sample_time,’DD/MM/YY HH24:MI:SS’),u.username,h.module,count(u.username) FROM
DBA_HIST_ACTIVE_SESS_HISTORY h,
DBA_USERS u
WHERE to_char(sample_time,’DD/MM/YY HH24:MI:SS’)
BETWEEN ’26/09/17 15:12:00′ and ’26/09/17 15:21:07′
and (u.username <>’SYS’ or u.username is not null)
AND h.user_id=u.user_id
group by to_char(sample_time,’DD/MM/YY HH24:MI:SS’),u.username,h.module
ORDER BY 1;
/

COLUMN Sample_Time FOR A12
COLUMN username FOR A20
COLUMN sql_text FOR A40
COLUMN program FOR A40
COLUMN module FOR A40

SELECT
instance_number,
to_char(sample_time,’DD/MM/YY HH24:MI:SS’),
u.username,
h.program,
h.module,
h.SQL_ID,
s.sql_text
FROM
DBA_HIST_ACTIVE_SESS_HISTORY h,
DBA_USERS u,
DBA_HIST_SQLTEXT s
WHERE to_char(sample_time,’DD/MM/YY HH24:MI:SS’)
BETWEEN ’10/05/21 14:00:00′ and ’10/05/21 14:15:00′
AND (u.username <>’SYS’ and u.username is not null)
and INSTANCE_NUMBER in (1,2)
AND h.user_id=u.user_id
and h.sql_id=s.sql_id
ORDER BY 1,2;

select SAMPLE_ID,SAMPLE_TIME,username,SQL_ID,BLOCKING_SESSION,BLOCKING_SESSION_STATUS,BLOCKING_SESSION_SERIAL#,machine from dba_hist_active_sess_history h,
dba_users u where u.user_id=h.user_id and to_char(SAMPLE_TIME,’DD/MM/YY HH24:MI’) between ’25/01/18 22:30′ and ’25/01/18 23:00′ and BLOCKING_SESSION is not null;

SELECT distinct a.sql_id, a.blocking_session,a.blocking_session_serial#,
u.username,s.sql_text,a.module
FROM V$ACTIVE_SESSION_HISTORY a, v$sql s,dba_users u
where a.sql_id=s.sql_id
and blocking_session is not null
–and a.user_id=u.user_id
and a.sample_time between to_date(’26/09/17 15:12′, ‘dd/mm/yy hh24:mi’)

and to_date(’26/09/17 15:23′, ‘dd/mm/yy hh24:mi’);

SELECT extract(day from snap_interval) 2460+extract(hour from snap_interval) *60+extract(minute from snap_interval) snapshot_Interval,
extract(day from retention) *24*60+extract(hour from retention) *60+extract(minute from retention) retention_Interval

FROM dba_hist_wr_control;

Traces sql query in our session: (11g)

BEGIN
DBMS_SESSION.session_trace_enable (waits => TRUE,
binds => TRUE,
plan_stat => ‘all_executions’
);
END;

In V$SESSION, the columns SQL_TRACE, SQL_TRACE_WAITS, SQL_TRACE_BINDS show the status of tracing for a session, and in V$PROCESS, the columns TRACEID and TRACEFILE show the name of the trace file and the TRACEFILE_IDENTIFIER in effect for a given server process.

The following query (11g only) shows the values of these columns for the current session:

SELECT s.sql_trace, s.sql_trace_waits, s.sql_trace_binds,
traceid, tracefile
FROM v$session s JOIN v$process p ON (p.addr = s.paddr)
WHERE audsid = USERENV (‘SESSIONID’)
/

===================== SNAG COMMANDS =======================
select grantee,granted_role from dba_role_privs where granted_role=’DBA’;
select name,value,DESCRIPTION,update_comment from v$parameter WHERE name like ‘%audit%’ or name like ‘optimizer_capture_sql_plan_baselines’ or name
like ‘optimizer_use_sql_plan_baselines’ or name like ‘db_%block_%’ or name like ‘db_cach%’;
select JOB_NAME from dba_scheduler_jobs;
select job,what from dba_jobs;
select distinct autoextensible from dba_data_files;
select count(*) from gv$lock where block=1;
show parameter log_archive
select segment_name,tablespace_name from dba_segments where segment_name=’AUD$’;
SELECT * FROM NLS_DATABASE_PARAMETERS;
SELECT value$ FROM sys.props$ WHERE name = ‘NLS_CHARACTERSET’ ;
select name from v$controlfile
union all
select member from v$logfile
union all
select to_char(bytes/1024/1024/1024) “GB” from v$log;

Select *from dba_stmt_audit_opts;
Select *from dba_priv_audit_opts;
Select *from dba_obj_audit_opts;

SELECT username,userhost,returncode,timestamp
FROM dba_audit_session where timestamp>=sysdate-2
ORDER BY sessionid DESC;

  select sessionid, to_char(timestamp#,'DD-MON-YY:HH24:MI:SS') login,

userid, to_char(logoff$time,’DD-MON-YY:HH24:MI:SS’) logoff from sys.aud$ where login<sysdate-90 and userid in (”);

SELECT username,userhost,returncode,timestamp
FROM dba_audit_session
ORDER BY sessionid DESC;

SELECT USERNAME, OS_USERNAME, USERHOST, EXTENDED_TIMESTAMP
FROM SYS.DBA_AUDIT_SESSION WHERE returncode != 0 and username = ‘&Account_Locked’
and EXTENDED_TIMESTAMP > (systimestamp-2) order by 4 desc

SELECT aud2.*
FROM dba_audit_session aud1, dba_audit_session aud2
WHERE aud1.returncode = 28000
and aud2.extended_timestamp < aud1.extended_timestamp and aud1.username = aud2.username and aud1.EXTENDED_TIMESTAMP > (systimestamp-7)
and aud1.username=’&Account_Locked’
/

SELECT USERNAME, LOGOFF_TIME, LOGOFF_LREAD, LOGOFF_PREAD,LOGOFF_LWRITE, LOGOFF_DLOCK FROM DBA_AUDIT_SESSION;
select * from all_def_audit_opts;
select CLIENT_NAME,STATUS from dba_autotask_client;
select file_name,autoextensible from dba_temp_files;
SELECT extract(day from snap_interval) 2460+extract(hour from snap_interval) *60+extract(minute from snap_interval) snapshot_Interval,
extract(day from retention) *24*60+extract(hour from retention) *60+extract(minute from retention) retention_Interval
FROM dba_hist_wr_control;
select name,type,total_mb,free_mb,usable_file_mb from v$asm_diskgroup;
select name Diskgroup,round(total_mb/1024,2) “Total_GB”,round(free_mb/1024,2) “FREE GB”,round(total_mb/1024-free_mb/1024,2) “USED GB” ,round(((free_mb/total_mb)*100),2) “Available%” from v$asm_diskgroup;

select name Diskgroup,round(total_mb/1024/1024,2) “Total_TB”,round(free_mb/1024/1024,2) “FREE TB”,round(total_mb/1024/1024-free_mb/1024/1024,2) “USED TB”,round(((free_mb/total_mb)*100),2) “Available%” from v$asm_diskgroup;

==========================

select dbid, instance_number, snap_id, table_name, error_number
from DBA_HIST_SNAP_ERROR
order by dbid, instance_number, snap_id
/

select dbid, instance_number, startup_time, min(snap_id), max(snap_id)
from DBA_HIST_SNAPSHOT
group by dbid, instance_number, startup_time
order by dbid, instance_number, startup_time

/

Enable Trace at database level/session level:

http://www.orafaq.com/wiki/SQL_Trace

https://oracle-base.com/articles/misc/sql-trace-10046-trcsess-and-tkprof

=================
If TFA is not installed (:():

Datatbase logs & trace files:

cd $(orabase)/diag/rdbms
tar cf – $(find . -name ‘*.trc’ -exec egrep ” {} \; grep -v bucket) | gzip > /tmp/database_trace_files.tar.gz

ASM logs & trace files:

cd $(orabase)/diag/asm/+asm/
tar cf – $(find . -name “*.trc” -exec egrep “” {} \; grep -v bucket) | gzip > /tmp/asm_trace_files.tar.gz

OS logs:

/var/adm/messages* or /var/log/messages* or ‘errpt -a’ or Windows System Event Viewer log (saved as .TXT file)

============== Purge awr snapshots manually=====
conn / as sysdba
exec DBMS_WORKLOAD_REPOSITORY.DROP_SNAPSHOT_RANGE(low_snap_id => 1, high_snap_id => 17933, dbid => 3585145995);
conn / as sysdba;
set linesize 135
col owner format a10
col segment_name format a30
col partition_name format a30
SELECT owner,
segment_name,
partition_name,
segment_type,
bytes/1024/1024/1024 Size_GB
FROM dba_segments
WHERE segment_name=’WRH$_ACTIVE_SESSION_HISTORY’;
=================================================== TOP 20 Tables in database =================

select top50.owner, top50.table_name, meg, a.num_rows
from dba_tables a,
(Select * from (
SELECT
owner, table_name, TRUNC(sum(bytes)/1024/1024) Meg
FROM
(SELECT segment_name table_name, owner, bytes
FROM dba_segments
WHERE segment_type like ‘TABLE%’
UNION ALL
SELECT i.table_name, i.owner, s.bytes
FROM dba_indexes i, dba_segments s
WHERE s.segment_name = i.index_name
AND s.owner = i.owner
AND s.segment_type like ‘INDEX%’
UNION ALL
SELECT l.table_name, l.owner, s.bytes
FROM dba_lobs l, dba_segments s
WHERE s.segment_name = l.segment_name
AND s.owner = l.owner
AND s.segment_type = ‘LOBSEGMENT’
UNION ALL
SELECT l.table_name, l.owner, s.bytes
FROM dba_lobs l, dba_segments s
WHERE s.segment_name = l.index_name
AND s.owner = l.owner
AND s.segment_type = ‘LOBINDEX’)
GROUP BY table_name, owner
HAVING SUM(bytes)/1024/1024 > 10 /* Ignore small tables */
ORDER BY SUM(bytes) desc
) where rownum < 51) top50
where top50.owner =a.owner
and top50.table_name = a.table_name
order by meg desc, num_rows desc;
================ Top Objects in Tablespace============

SELECT tablespace_name, owner, segment_type “Object Type”,segment_name “Object_Name”,
COUNT(owner) “Number of Objects”,
ROUND(SUM(bytes)/1024/1024/1024, 2) “Total Size in Gb”
FROM sys.dba_segments
WHERE tablespace_name IN (‘&Tabelspace_Name’)
GROUP BY tablespace_name, owner, segment_type,segment_name
ORDER BY tablespace_name, owner, segment_type,segment_name;

======================

IO Calibrate:

spool iocalib.log
set time on timing on
set serveroutput on
set echo on
DECLARE
lat INTEGER;
iops INTEGER;
mbps INTEGER;
BEGIN
DBMS_RESOURCE_MANAGER.CALIBRATE_IO(336, 20, iops, mbps, lat);
DBMS_OUTPUT.PUT_LINE (‘max_iops = ‘ || iops);
DBMS_OUTPUT.PUT_LINE (‘latency = ‘ || lat);
dbms_output.put_line(‘max_mbps = ‘ || mbps);
end;
/
spool off

======================================

Matching LOB Indexes and Segments

Matching LOB Indexes and Segments

COL owner FORMAT A18 HEADING “Owner”
COL TABLE_NAME FORMAT A20 HEADING “Table|Name”
COL column_name FORMAT A30 HEADING “Column|Name”
COL segment_name FORMAT A26 HEADING “Segment Name”
COL segment_type FORMAT A10 HEADING “Segment|Type”
COL bytes HEADING “Segment|Bytes”

SELECT l.owner
, l.table_name
, l.column_name
, s.segment_name
, s.segment_type
, s.bytes
FROM dba_lobs l
, dba_segments s
WHERE REGEXP_SUBSTR(l.segment_name,'([[:alnum:]]|[[:punct:]])+’
, CASE
WHEN REGEXP_INSTR(s.segment_name,'[[:digit:]]’,1) > 0
THEN REGEXP_INSTR(s.segment_name,'[[:digit:]]’,1)
ELSE 1
END) =
REGEXP_SUBSTR(s.segment_name,'([[:alnum:]]|[[:punct:]])+’
, CASE
WHEN REGEXP_INSTR(s.segment_name,'[[:digit:]]’,1) > 0
THEN REGEXP_INSTR(s.segment_name,'[[:digit:]]’,1)
ELSE 1
END)
AND l.table_name = UPPER(‘&table_name’)
AND l.owner = UPPER(‘&owner’)
ORDER BY l.column_name, s.segment_name;

============= Tablespace Usage =============
http://shahiddba.blogspot.in/2012/05/script-to-monitor-tablespacesdatafiles.html

Select t.tablespace, t.totalspace as ” Totalspace(MB)”, round((t.totalspace-fs.freespace),2) as “Used Space(MB)”, fs.freespace as “Freespace(MB)”,
round(((t.totalspace-fs.freespace)/t.totalspace)100,2) as “% Used”, round((fs.freespace/t.totalspace)100,2) as “% Free” from
(select round(sum(d.bytes)/(10241024)) as totalspace, d.tablespace_name tablespace from dba_data_files d group by d.tablespace_name) t, (select round(sum(f.bytes)/(10241024)) as freespace, f.tablespace_name tablespace from
dba_free_space f group by f.tablespace_name) fs where t.tablespace=fs.tablespace order by 6;

============================
SQL> set linesize 300 pages 300
SQL> select * from V$IO_CALIBRATION_STATUS;

STATUS CALIBRATION_TIME


READY 05-OCT-17 01.56.40.667 PM

SQL> select * from DBA_RSRC_IO_CALIBRATE;

START_TIME END_TIME MAX_IOPS MAX_MBPS MAX_PMBPS LATENCY NUM_PHYSICAL_DISKS


05-OCT-17 01.42.50.169564 PM 05-OCT-17 01.56.40.667182 PM 8948 627 118 0 336

After I/O calibration has been successfully completed, if the query or statement in question meets or exceeds the serial execution time represented by the parallel_min_time_threshold parameter, then Auto DOP will set the degree of parallelism, regardless of whether any of the objects are explicitly set for parallel execution. You may find that once I/O calibration is completed, some tasks may take longer to complete because they are queued. This may occur because the parallel resources allocated may not be sufficient for all the queries and statements that qualify for parallel execution. There is a Metalink Note, document id 1393405.1, that explains how to delete the I/O calibration statistics. In summary, there is a single table, RESOURCE_IO_CALIBRATE$, that the two views are based on. Deleting the data from this table clears the I/O calibration statistics, so that Auto DOP will no longer function.”

Should you run CALIBRATE_IO on your system? Most likely the answer is “yes”, but it isn’t run for the storage ‘statistics’ it gathers, it’s run to enable Auto DOP and parallel queueing. There are third-party utilities which are better and much more consistent to generate usable (to the DBA) storage metrics; unfortunately these utilities won’t generate the data Oracle needs to make Auto DOP functional. So it seems that CALIBRATE_IO is a ‘necessary evil’; it all comes down to knowing why CALIBRATE_IO should be run. Hopefully you now have that knowledge.

https://docs.oracle.com/database/121/TGDBA/pfgrf_iodesign.htm#TGDBA95222http://arup.blogspot.in/2008/08/resource-manager-io-calibration-in-11g.htmlhttp://dbastreet.com/blog/?p=695

==================

col name format a25;
col p1 format a10;
col p2 format a10;
col p3 format a10;
SELECT NAME, PARAMETER1 P1, PARAMETER2 P2, PARAMETER3 P3
FROM V$EVENT_NAME
WHERE NAME = ‘&event_name’;

Enter value for event_name: latch: cache buffers chains
old 3: WHERE NAME = ‘&event_name’
new 3: WHERE NAME = ‘latch: cache buffers chains’

NAME P1 P2 P3


latch: cache buffers chai address number tries
ns

================== Full Table Scan =================
select name, value from v$mystat ms, v$statname sn where ms.statistic# = sn.statistic# and name like ‘%table scan%’;

select ss.username||'(‘||se.sid||’) ‘ “User Process”,
sum(decode(name,’table scans (short tables)’,value)) “Short Scans”,
sum(decode(name,’table scans (long tables)’, value)) “Long Scans”,
sum(decode(name,’table scan rows gotten’,value)) “Rows Retreived”,
round((sum(decode(name,’table scan rows gotten’,value)) – (sum(decode(name,’table scans (short tables)’,value)) * 5)) / (sum(decode(name,’table scans (long tables)’, value))),2) “Long Scans Length”
from v$session ss,
v$sesstat se,
v$statname sn
where se.statistic# = sn.statistic#
and (name like ‘%table scans (short tables)%’
or name like ‘%table scans (long tables)%’
or name like ‘%table scan rows gotten%’)
and se.sid = ss.sid
and ss.username is not null
group by ss.username||'(‘||se.sid||’) ‘
having sum(decode(name,’table scans (long tables)’, value)) != 0
order by 3 desc;

ttile ‘Large Full-table scans|Per Snapshot Period’
col c1 heading ‘Begin|Interval|time’ format a20
col c4 heading ‘FTS|Count’ format 999,999
break on c1 skip 2
break on c2 skip 2
select to_char(sn.begin_interval_time,’yy-mm-dd hh24′) c1, count(1) c4
from dba_hist_sql_plan p, dba_hist_sqlstat s, dba_hist_snapshot sn,dba_segments o
where p.object_owner <> ‘SYS’ and p.object_owner = o.owner
and p.object_name = o.segment_name and o.blocks > 1000
and p.operation like ‘%TABLE ACCESS%’ and p.options like ‘%FULL%’
and p.sql_id = s.sql_id and s.snap_id = sn.snap_id
group by to_char(sn.begin_interval_time,’yy-mm-dd hh24′)
order by 1;

SELECT ss.username
|| ‘(‘
|| se.sid
|| ‘) ‘ “User Process”,
SUM (DECODE (NAME, ‘table scans (short tables)’, VALUE)) “Short Scans”,
SUM (DECODE (NAME, ‘table scans (long tables)’, VALUE)) “Long Scans”,
SUM (DECODE (NAME, ‘table scan rows gotten’, VALUE)) “Rows Retrieved”
FROM v$session ss, v$sesstat se, v$statname sn
WHERE se.statistic# = sn.statistic#
AND ( NAME LIKE ‘%table scans (short tables)%’
OR NAME LIKE ‘%table scans (long tables)%’
OR NAME LIKE ‘%table scan rows gotten%’
)
AND se.sid = ss.sid
AND ss.username IS NOT NULL
GROUP BY ss.username
|| ‘(‘
|| se.sid
|| ‘) ‘;

select snap_id,to_char(sample_time,’DD/MM/YY HH24:MI’),sql_id,sql_plan_options,count(SQL_ID) from dba_hist_active_sess_history where sql_plan_options=’FULL’ and to_char(sample_time,’DD/MM/YY HH24:MI’)
between ’28/04/18 12:00′ and ’28/04/18 12:20′ group by snap_id,to_char(sample_time,’DD/MM/YY HH24:MI’),sql_id,sql_plan_options order by count(SQL_ID);

select * from
(
select
sql_id,
sql_plan_hash_value,
event,sql_exec_id,
sql_exec_start,current_obj#,
sql_plan_line_id,
sql_plan_operation,
sql_plan_options,
SUM (delta_read_io_requests) lio_read ,
SUM (delta_read_io_bytes) pio_read ,
count(*) count_1
from
dba_hist_active_sess_history
where
sql_id=’&SQL_ID’
and
snap_id >= &Start_Snap
and
snap_id <= &End_snap
group by
sql_id,
sql_plan_hash_value,
event,sql_exec_id,
sql_exec_start,
current_obj#,
sql_plan_line_id,
sql_plan_operation,
sql_plan_options
)
order by count_1 desc;

=========================== History of parameter changes=============
set linesize 155
col time for a15
col parameter_name format a50
col old_value format a30
col new_value format a30
break on instance skip 3
select instance_number instance, snap_id, time, parameter_name, old_value, new_value from (
select a.snap_id,to_char(end_interval_time,’DD-MON-YY HH24:MI’) TIME, a.instance_number, parameter_name, value new_value,
lag(parameter_name,1) over (partition by parameter_name, a.instance_number order by a.snap_id) old_pname,
lag(value,1) over (partition by parameter_name, a.instance_number order by a.snap_id) old_value ,
decode(substr(parameter_name,1,2),’__’,2,1) calc_flag
from dba_hist_parameter a, dba_Hist_snapshot b , v$instance v
where a.snap_id=b.snap_id
and a.instance_number=b.instance_number
and parameter_name like nvl(‘&parameter_name’,parameter_name)
and a.instance_number like nvl(‘&instance_number’,v.instance_number)
)
where
new_value != old_value
and calc_flag not in (decode(‘&show_calculated’,’Y’,3,2))
order by 1,2
/


COLUMN snap_time FORMAT A20
COLUMN name FORMAT A20
COLUMN old_value FORMAT A20
COLUMN new_value FORMAT A20
COLUMN diff FORMAT A20

select
to_char(s.begin_interval_time, ‘DD-MON-YYYY HH24:MI:SS’) snap_time,
p.instance_number,
p.snap_id,
p.name,
p.old_value,
p.new_value,
decode(trim(translate(p.new_value,’0123456789′,’ ‘)),”,
trim(to_char(to_number(p.new_value)-to_number(p.old_value),’999999999999990′)),”) diff
from
(select dbid,
instance_number,
snap_id,
parameter_name name,
lag(trim(lower(value)))
over (
partition by dbid,
instance_number,
parameter_name
order by snap_id
) old_value,
trim(lower(value)) new_value,
decode(nvl(lag(trim(lower(value)))
over (
partition by dbid,
instance_number,
parameter_name
order by snap_id
),
trim(lower(value))),
trim(lower(value)), ‘~NO~CHANGE~’,
trim(lower(value))) diff
from dba_hist_parameter
) p,
dba_hist_snapshot s
where s.begin_interval_time between trunc(sysdate – &&V_NBR_DAYS) and sysdate
and p.dbid = s.dbid
and p.instance_number = s.instance_number
and p.snap_id = s.snap_id
and p.diff <> ‘~NO~CHANGE~’
and p.name=’&&V_PARAM_NAME’
order by snap_time, instance_number

/

@$ORACLE_HOME/rdbms/admin/utlsyxsz.sql –> To estimate sysaux size
========================================================== Parallel query ============

select qcsid,count(*) from v$px_session group by qcsid
/

===================================================================================
cat worstquery_disk.sql
select b.username username, a.disk_reads reads,
a.executions exec, a.disk_reads /decode
(a.executions, 0, 1,a.executions) rds_exec_ratio,
a.sql_id
from V$sqlarea a, dba_users b
where a.parsing_user_id = b.user_id
and a.disk_reads > 100000
order by a.disk_reads desc
/

===================================================================================
cat FreqFTS.sql
SELECT
b.owner,object_type mytype,
object_name myname,
blocks,
COUNT(1) buffers,
AVG(tch) avg_touches
FROM
sys.x_$bh a,
dba_objects b,
dba_segments s
WHERE
a.obj = b.object_id
AND
b.object_name = s.segment_name
AND
b.owner NOT IN (‘SYS’,’SYSTEM’,’SYSMAN’)
GROUP BY
object_name,
object_type,
blocks,
obj,b.owner
HAVING
AVG(tch) > 5
AND
COUNT(1) > 20 ORDER BY 6 DESC

/

cat sqliocontribuor.sql
select
username,SQL_HASH_VALUE,
sum(round(100 * total_user_io/total_io,2)) tot_io_pct
from
(select
b.sid sid,SQL_HASH_VALUE SQL_HASH_VALUE,
nvl(b.username,p.name) username,
sum(value) total_user_io
from
sys.v_$statname c,
sys.v_$sesstat a,
sys.v_$session b,
sys.v_$bgprocess p
where
a.statistic#=c.statistic# and
p.paddr (+) = b.paddr and
b.sid=a.sid and
c.name in (‘physical reads’,
‘physical reads direct’,
‘physical writes direct (lob)’,
‘physical reads direct (lob)’)
group by
b.sid, nvl(b.username,p.name),SQL_HASH_VALUE),
(select
sum(value) total_io
from
sys.v_$statname c,
sys.v_$sesstat a
where
a.statistic#=c.statistic#
and
c.name in (‘physical reads’,
‘physical reads direct’,
‘physical writes direct (lob)’,
‘physical reads direct (lob)’))
where username is not null
group by username,SQL_HASH_VALUE
having sum(round(100 * total_user_io/total_io,2)) > 0
order by
1 desc

/

cat sswt.sql
col event form a15
col p2 form 9999
col sid form 999
col pid form 999
col wait form 999
select a.name,b.pid,b.spid,c.sid,substr(d.event,1,15) event,
d.p1,d.p2,d.wait_time “Wait”,d.seconds_in_wait “Total Wait”
from v$shared_server a,v$process b,V$session c,V$session_wait d
where a.paddr=b.addr
and b.addr=c.paddr(+)
and nvl(c.sid,0)=d.sid(+)
order by 1

================================== Highest partition details================

CREATE OR REPLACE FUNCTION part_hv_to_date (p_table_owner IN VARCHAR2,
p_table_name IN VARCHAR2,
p_partition_name IN VARCHAR2)
RETURN DATE
AS
l_high_value VARCHAR2(32767);
l_date DATE;
BEGIN
SELECT high_value
INTO l_high_value
FROM all_tab_partitions
WHERE table_owner = p_table_owner
AND table_name = p_table_name
AND partition_name = p_partition_name;
EXECUTE IMMEDIATE ‘SELECT ‘ || l_high_value || ‘ FROM dual’ INTO l_date;
RETURN l_date;
END;
/
SELECT PARTITION_NAME,high_value
FROM DBA_TAB_PARTITIONS P
WHERE TABLE_OWNER = ‘EAIPR3’
AND TABLE_NAME = ‘EAI_LOG’
AND PART_HV_TO_DATE(TABLE_OWNER, TABLE_NAME, PARTITION_NAME) = (
SELECT MAX(PART_HV_TO_DATE(TABLE_OWNER, TABLE_NAME, PARTITION_NAME))
FROM DBA_TAB_PARTITIONS
WHERE TABLE_OWNER = P.TABLE_OWNER

AND TABLE_NAME = P.TABLE_NAME);

select s.sid
,s.serial#
,s.username
,s.machine
,s.status
,t.status “Transaction Status”
,s.lockwait
,t.used_ublk
,t.used_urec
,t.start_time
from v$transaction t
inner join v$session s on t.addr = s.taddr;


select /*+RULE */ s.username, o.name “Locked object”, l.sid, s.serial#,
p.spid, l.type, round(l.ctime/60,0) “Minutes”,
decode(l.lmode,’1′,’-‘,’2′,’RS’,’3′,’RX’,’4′,’S’,’5′,’SRX’,’6′,’X’) “Mode”,
substr(s.program,greatest((length(s.program) – 19),1),20) “Program name”,
substr(s.osuser,1,10)”OS user”,
substr(s.machine,1,12) “Machine”, substr(s.terminal,1,10) “Terminal”
from v$process p, sys.obj$ o, v$session s, v$lock l, v$locked_object lo
where l.sid = lo.session_id and l.sid > 5
and (l.id2 = lo.xidsqn or l.id1 = lo.object_id)
and s.sid = lo.session_id and o.obj# = lo.object_id
and p.addr = s.paddr
order by l.ctime;


SELECT count(*)
FROM v$transaction t, v$rollname r, v$session s
WHERE t.xidusn = r.usn
AND t.ses_addr = s.saddr;

A value > 0 indicates there are open transactions

col name format a8
col username format a8
col osuser format a8
col start_time format a17
col status format a12
tti ‘Active transactions’

SELECT username, terminal, osuser,
t.start_time, r.name, t.used_ublk “ROLLB BLKS”,
DECODE(t.SPACE, ‘YES’, ‘SPACE TX’,
DECODE(t.recursive, ‘YES’, ‘RECURSIVE TX’,
DECODE(t.noundo, ‘YES’, ‘NO UNDO TX’, t.status)
)) status
FROM sys.v_$transaction t, sys.v_$rollname r, sys.v_$session s
WHERE t.xidusn = r.usn
AND t.ses_addr = s.saddr
/

—————– Block TOAD and other tools ———–

CONNECT / AS SYSDBA;

CREATE OR REPLACE TRIGGER block_tools_from_prod
AFTER LOGON ON DATABASE
DECLARE
v_prog sys.v_$session.program%TYPE;
BEGIN
SELECT program INTO v_prog
FROM sys.v_$session
WHERE audsid = USERENV(‘SESSIONID’)
AND audsid != 0 — Don’t Check SYS Connections
AND ROWNUM = 1; — Parallel processes will have the same AUDSID’s

IF UPPER(v_prog) LIKE ‘%TOAD%’ OR UPPER(v_prog) LIKE ‘%T.O.A.D%’ OR — Toad
UPPER(v_prog) LIKE ‘%SQLNAV%’ OR — SQL Navigator
UPPER(v_prog) LIKE ‘%PLSQLDEV%’ OR — PLSQL Developer
UPPER(v_prog) LIKE ‘%BUSOBJ%’ OR — Business Objects
UPPER(v_prog) LIKE ‘%EXCEL%’ — MS-Excel plug-in
THEN
RAISE_APPLICATION_ERROR(-20000, ‘Development tools are not allowed here.’);
END IF;
END;
/

SHOW ERRORS

column “Rollback Segment” format a16
column “Size (Kb)” format 9,999,999
column “Gets” format 999,999,990
column “Waits” format 9,999,990
column “% Waits” format 90.00
column “# Shrinks” format 999,990
column “# Extends” format 999,990

Prompt
Prompt ROLLBACK Segment Statistics…

SELECT rn.Name “Rollback Segment”, rs.RSSize/1024 “Size (KB)”, rs.Gets “Gets”,
rs.waits “Waits”, (rs.Waits/rs.Gets)*100 “% Waits”,
rs.Shrinks “# Shrinks”, rs.EXTENDS “# Extends”
FROM sys.v_$RollName rn, sys.v_$RollStat rs
WHERE rn.usn = rs.usn
/

=========
select
s.sid,s.serial#,
substr(s.username,1,18) username,
substr(s.program,1,15) program,
decode(s.command,
0,’No Command’,
1,’Create Table’,
2,’Insert’,
3,’Select’,
6,’Update’,
7,’Delete’,
9,’Create Index’,
15,’Alter Table’,
21,’Create View’,
23,’Validate Index’,
35,’Alter Database’,
39,’Create Tablespace’,
41,’Drop Tablespace’,
40,’Alter Tablespace’,
53,’Drop User’,
62,’Analyze Table’,
63,’Analyze Index’,
s.command||’: Other’) command
from
v$session s,
v$process p,
v$transaction t,
v$rollstat r,
v$rollname n
where s.paddr = p.addr
and s.taddr = t.addr (+)
and t.xidusn = r.usn (+)
and r.usn = n.usn (+)
order by 1;

========= DBA_SCHEDULER_JOB – Details======

select log_id, job_name, status,
to_char(log_date, ‘DD-MON-YYYY HH24:MI’) log_date,
to_char(ACTUAL_START_DATE, ‘DD-MON-YYYY HH24:MI’) Actual_Start,
RUN_DURATION
from dba_scheduler_job_run_details
where job_name like ‘ANALYSE%’ order by 2,4;

SELECT job_name,
job_name,
avg(EXTRACT( DAY FROM run_duration )246060 + EXTRACT( HOUR FROM run_duration )6060 + EXTRACT( MINUTE FROM run_duration )60 + EXTRACT( SECOND FROM run_duration )) “AVG”,
cpu_used
FROM dba_scheduler_job_run_details
GROUP BY job_name, job_name,cpu_used
HAVING avg(EXTRACT( DAY FROM run_duration )246060 + EXTRACT( HOUR FROM run_duration )6060 + EXTRACT( MINUTE FROM run_duration )60 + EXTRACT( SECOND FROM run_duration )) > 0
ORDER BY 3 DESC;

SELECT owner,
job_name,
count(*) execution_count,
max(
( extract(hour from run_duration) * 60 ) +
( extract(minute from run_duration) )) max_minutes,
min(
( extract(hour from run_duration) * 60 ) +
( extract(minute from run_duration) )) min_minutes,
round(avg(
( extract(hour from run_duration) * 60 ) +
( extract(minute from run_duration) ))) avg_minutes
FROM dba_scheduler_job_run_details
–WHERE owner in (‘FACTORY’, ‘GARAGE’)
–and actual_start_date > SYSDATE – 1.01
group by owner, job_name
order by 4 desc

COLUMN log_date FORMAT A35
COLUMN owner FORMAT A20
COLUMN job_name FORMAT A30
COLUMN error FORMAT A20
COLUMN req_start_date FORMAT A35
COLUMN actual_start_date FORMAT A35
COLUMN run_duration FORMAT A20
COLUMN credential_owner FORMAT A20
COLUMN credential_name FORMAT A20
COLUMN additional_info FORMAT A30

SELECT log_date,
owner,
job_name,
status
error,
req_start_date,
actual_start_date,
run_duration,
credential_owner,
credential_name,
additional_info
FROM dba_scheduler_job_run_details
WHERE job_name = DECODE(UPPER(‘&1’), ‘ALL’, job_name, UPPER(‘&1’))
ORDER BY log_date;

SET lines 150
col OPERATION FOR a30
col TARGET FOR a40
col START_TIME FOR a40
col END_TIME FOR a40
SELECT * FROM dba_optstat_operations where start_Time>=sysdate-30 and (target like ‘SYSADM%’ or target like ‘DUADM%’) ORDER BY target,start_time DESC;
select OPERATION,TARGET,to_char(START_TIME,’DD/MM/YY HH24:MI’),to_char(END_TIME,’DD/MM/YY HH24:MI’) from dba_optstat_operations where start_Time>=sysdate-30 and (target like ‘SYSADM%’ or target like ‘DUADM%’) ORDER BY target,start_time;

SELECT owner, table_name, stats_update_time
FROM dba_tab_stats_history
WHERE owner=’WCMS’
AND table_name=’DM_SYSOBJECT_R’
ORDER BY owner, table_name, stats_update_time DESC;

SELECT TO_CHAR(stats_update_time,’yyyy-mm-dd’) AS stats_update_time, COUNT(*)
FROM dba_tab_stats_history
GROUP BY TO_CHAR(stats_update_time,’yyyy-mm-dd’)
ORDER BY 1 DESC;

============Tablespace Growth Report (check all one by one) ================

Script-1 :

SELECT TO_CHAR (sp.begin_interval_time,’DD-MM-YYYY’) days
, ts.tsname
, max(round((tsu.tablespace_size* dt.block_size )/(10241024),2) ) cur_size_MB , max(round((tsu.tablespace_usedsize dt.block_size )/(1024*1024),2)) usedsize_MB
FROM DBA_HIST_TBSPC_SPACE_USAGE tsu
, DBA_HIST_TABLESPACE_STAT ts
, DBA_HIST_SNAPSHOT sp
, DBA_TABLESPACES dt
WHERE tsu.tablespace_id= ts.ts#
AND tsu.snap_id = sp.snap_id
AND ts.tsname = dt.tablespace_name
AND ts.tsname NOT IN (‘SYSAUX’,’SYSTEM’)
GROUP BY TO_CHAR (sp.begin_interval_time,’DD-MM-YYYY’), ts.tsname
ORDER BY ts.tsname, days;

Script-2:

column “Percent of Total Disk Usage” justify right format 999.99
column “Space Used (MB)” justify right format 9,999,999.99
column “Total Object Size (MB)” justify right format 9,999,999.99
set linesize 150
set pages 80
set feedback off
set line 5000
column “SEGMENT_NAME” justify left format A30
column “TABLESPACE_NAME” justify left format A30
select * from (select c.TABLESPACE_NAME,c.segment_name,to_char(end_interval_time, ‘MM/DD/YY’) mydate, sum(space_used_delta) / 1024 / 1024 “Space used (MB)”, avg(c.bytes) / 1024 / 1024 “Total Object Size (MB)”,
round(sum(space_used_delta) / sum(c.bytes) * 100, 2) “Percent of Total Disk Usage”
from
dba_hist_snapshot sn,
dba_hist_seg_stat a,
dba_objects b,
dba_segments c
where begin_interval_time > trunc(sysdate) – 60
and sn.snap_id = a.snap_id
and b.object_id = a.obj#
and b.owner = c.owner
and b.object_name = c.segment_name
and c.segment_name = ‘S_PARTY’
group by c.TABLESPACE_NAME,c.segment_name,to_char(end_interval_time, ‘MM/DD/YY’)
order by c.TABLESPACE_NAME,c.segment_name,to_date(mydate, ‘MM/DD/YY’));

Script-3:
set pages 80
set feedback off
column “OBJECT_NAME” justify left format A30
column “SUBOBJECT_NAME” justify left format A30
column “OBJECT_TYPE” justify left format A30
column “Tablespace Name” justify left format A30
set line 5000
SELECT o.OWNER , o.OBJECT_NAME , o.SUBOBJECT_NAME , o.OBJECT_TYPE ,
t.NAME “Tablespace Name”, s.growth/(10241024) “Growth in MB”, (SELECT sum(bytes)/(10241024)
FROM dba_segments
WHERE segment_name=o.object_name) “Total Size(MB)”
FROM DBA_OBJECTS o,
( SELECT TS#,OBJ#,
SUM(SPACE_USED_DELTA) growth
FROM DBA_HIST_SEG_STAT
GROUP BY TS#,OBJ#
HAVING SUM(SPACE_USED_DELTA) > 0
ORDER BY 2 DESC ) s,
v$tablespace t
WHERE s.OBJ# = o.OBJECT_ID
AND s.TS#=t.TS#
AND o.OWNER=’&UserName’
ORDER BY 6 DESC
/

set feedback on
select * from (select c.TABLESPACE_NAME,c.segment_name “Object Name”,b.object_type,
sum(space_used_delta) / 1024 / 1024 /1024 “Growth (MB)”
from dba_hist_snapshot sn,
dba_hist_seg_stat a,
dba_objects b,
dba_segments c
where begin_interval_time > trunc(sysdate)-&days_back
and sn.snap_id = a.snap_id
and b.object_id = a.obj#
and b.owner = c.owner
and b.object_name = c.segment_name
and c.owner =’&Owner’
group by c.TABLESPACE_NAME,c.segment_name,b.object_type)
order by 3 asc;

SELECT b.tsname tablespace_name
, MAX(b.used_size_mb) cur_used_size_mb
, round(AVG(inc_used_size_mb),2)avg_increas_mb
FROM (
SELECT a.days, a.tsname, used_size_mb
, used_size_mb – LAG (used_size_mb,1) OVER ( PARTITION BY a.tsname ORDER BY a.tsname,a.days) inc_used_size_mb
FROM (
SELECT TO_CHAR(sp.begin_interval_time,’MM-DD-YYYY’) days
,ts.tsname
,MAX(round((tsu.tablespace_usedsize* dt.block_size )/(1024*1024),2)) used_size_mb
FROM DBA_HIST_TBSPC_SPACE_USAGE tsu, DBA_HIST_TABLESPACE_STAT ts
,DBA_HIST_SNAPSHOT sp, DBA_TABLESPACES dt
WHERE tsu.tablespace_id= ts.ts# AND tsu.snap_id = sp.snap_id
AND ts.tsname = dt.tablespace_name AND sp.begin_interval_time > sysdate-60
GROUP BY TO_CHAR(sp.begin_interval_time,’MM-DD-YYYY’), ts.tsname
ORDER BY ts.tsname, days
) A
) b GROUP BY b.tsname ORDER BY b.tsname
/

SET MARKUP HTML ON ENTMAP ON SPOOL ON PREFORMAT OFF;
set linesize 125
set numwidth 20
set pagesize 50
COL NAME FOR A30
col SNAP_ID for 9999999
set serveroutput off;
SPOOL TBS_TREND.xls;
set verify off;
set echo off;

SELECT
distinct DHSS.SNAP_ID,VTS.NAME,
TO_CHAR(DHSS.END_INTERVAL_TIME, ‘DD-MM HH:MI’) AS SNAP_Time,
ROUND((DHTS.TABLESPACE_USEDSIZE8192)/1024/1024) AS USED_MB, ROUND((DHTS.TABLESPACE_SIZE8192)/1024/1024) AS SIZE_MB
FROM DBA_HIST_TBSPC_SPACE_USAGE DHTS,V$TABLESPACE VTS,DBA_HIST_SNAPSHOT DHSS
WHERE VTS.TS#=DHTS.TABLESPACE_ID
AND DHTS.SNAP_ID=DHSS.SNAP_ID
ORDER BY 1;
SPOOL OFF;

============= DBA_JOBS==========
SET LINESIZE 152
SET PAGESIZE 100COLUMN JOB FORMAT 99,999 HEADING “Job #”
COLUMN schema_user FORMAT a12 HEADING “Job Owner”
COLUMN log_user FORMAT a12 HEADING “Job Exec”
COLUMN broken FORMAT a3 HEADING “BKN”
COLUMN last FORMAT a17 HEADING “Last Run”
COLUMN this FORMAT a17 HEADING “This Run”
COLUMN next FORMAT a17 HEADING “Next Run”
COLUMN time FORMAT 99,999 HEADING “Time Mins”
COLUMN what FORMAT a40 HEADING “Job Action”

select job, schema_user,
log_user,
decode(broken,’Y’,’Yes’,’N’,’No’,’—‘) “broken”,
to_char(LAST_DATE,’DD-MON-YYYY HH24:MI’) “last”,
to_char(THIS_DATE,’DD-MON-YYYY HH24:MI’) “this”,
to_char(NEXT_DATE,’DD-MON-YYYY HH24:MI’) “next”,
ROUND(TOTAL_TIME/60,0) “time”,
substr(what,1,40) “what”
from dba_jobs
where substr(what,1,7) !=’declare’
union all
select job,
schema_user,
log_user,
decode(broken,’Y’,’Yes’,’N’,’No’,’—‘) “broken”,
to_char(LAST_DATE,’DD-MON-YYYY HH24:MI’) “last”,
to_char(THIS_DATE,’DD-MON-YYYY HH24:MI’) “this”,
to_char(NEXT_DATE,’DD-MON-YYYY HH24:MI’) “next”,
ROUND(TOTAL_TIME/60,0) “time”,
substr(what,instr(what,’MSG’),20) “what”
from dba_jobs
where substr(what,1,7) =’declare’
order by 1
/

select o.object_name, o.owner, sum(s.space_allocated_delta),sum(s.DB_BLOCK_CHANGES_DELTA)
from DBA_HIST_SEG_STAT s, dba_objects o, dba_hist_snapshot ss
where s.obj# = o.object_id
and s.SPACE_ALLOCATED_DELTA >0
and s.SPACE_USED_DELTA >0
and ss.SNAP_ID = s.SNAP_ID
and ss.BEGIN_INTERVAL_TIME > sysdate-1
group by o.object_name, o.owner
order by sum(s.SPACE_ALLOCATED_DELTA) desc;

Here is a script that tells you how much REDO was generated per day:
SELECT A., ROUND(A.COUNT#B.MOY#/1024/1024/1024/1024) TB_PERDAY FROM ( SELECT TO_CHAR(FIRST_TIME,’YYYY-MM-DD’) Date_Generated,COUNT(1)
COUNT#, MIN(RECID) MIN#, MAX(RECID) MAX# from v$log_history group by to_char(first_time,’YYYY-MM-DD’) order by 1 desc ) a,
( select avg(bytes) moy#, count(1) count#,max(bytes) max_bytes, min(bytes) min_bytes from v$log ) b;

set pagesize 120;
set linesize 200;
col day for a8;
spool archivelog.lst

PROMPT Archive log distribution per hours on each day …

select
thread#,to_char(first_time,’YY-MM-DD’) day,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’00’,1,0)),’999′) “00”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’01’,1,0)),’999′) “01”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’02’,1,0)),’999′) “02”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’03’,1,0)),’999′) “03”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’04’,1,0)),’999′) “04”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’05’,1,0)),’999′) “05”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’06’,1,0)),’999′) “06”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’07’,1,0)),’999′) “07”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’08’,1,0)),’999′) “08”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’09’,1,0)),’999′) “09”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’10’,1,0)),’999′) “10”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’11’,1,0)),’999′) “11”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’12’,1,0)),’999′) “12”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’13’,1,0)),’999′) “13”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’14’,1,0)),’999′) “14”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’15’,1,0)),’999′) “15”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’16’,1,0)),’999′) “16”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’17’,1,0)),’999′) “17”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’18’,1,0)),’999′) “18”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’19’,1,0)),’999′) “19”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’20’,1,0)),’999′) “20”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’21’,1,0)),’999′) “21”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’22’,1,0)),’999′) “22”,
to_char(sum(decode(substr(to_char(first_time,’HH24′),1,2),’23’,1,0)),’999′) “23”,
COUNT(*) TOT
from v$log_history
group by thread#,to_char(first_time,’YY-MM-DD’)
order by day ;

to know archives generated in a month day wise

select to_char(first_time,’DD-MON’),count(*) from v$log_history
where to_char(first_time,’MON’)=’DEC’
group by to_char(first_time,’DD-MON’) order by 1;

to know archives generated in a day

select to_char(first_time,’DD-MON’),count(*) from v$log_history
where to_char(first_time,’DD’)=01
group by to_char(first_time,’DD-MON’);

to know archives generated in a day including time

select to_char(first_time,’DD-MON:hh24:mi:ss’) from v$log_history
where to_char(first_time,’DD’)=10;

to know archives generated on specific date in hourly basis

select to_char(first_time,’hh24′),count(*) from v$log_history
where to_char(first_time,’dd-mm-yy’) = ’01-02-20′
group by to_char(first_time,’hh24′) order by 1;

SELECT TRUNC(COMPLETION_TIME) ARCHIVED_DATE,
SUM(BLOCKS * BLOCK_SIZE) / 1024 / 1024 /1024 SIZE_IN_GB
FROM V$ARCHIVED_LOG where trunc(completion_time)>=sysdate-30
GROUP BY TRUNC(COMPLETION_TIME)
ORDER BY 1;

 alter session set nls_date_format = 'DD-MM-YY HH24';

SELECT TRUNC(COMPLETION_TIME, ‘HH’) ARCHIVED_DATE_HOUR,
SUM(BLOCKS * BLOCK_SIZE)/1024/1024/1024 SIZE_IN_GB
FROM V$ARCHIVED_LOG
GROUP BY TRUNC(COMPLETION_TIME, ‘HH’)
ORDER BY 1;

SELECT TRUNC(COMPLETION_TIME, ‘HH’) ARCHIVED_DATE_HOUR,destination,
SUM(BLOCKS * BLOCK_SIZE)/1024/1024/1024 SIZE_IN_GB
FROM V$ARCHIVED_LOG al,v$archive_dest ad where ad.destination is not null and completion_time>=sysdate-1
GROUP BY TRUNC(COMPLETION_TIME, ‘HH’),destination
ORDER BY 1;

select trunc(COMPLETION_TIME,’DD’) Day, thread#,
round(sum(BLOCKSBLOCK_SIZE)/1024/1024/1024) GB, count() Archives_Generated from v$archived_log
group by trunc(COMPLETION_TIME,’DD’),thread# order by 1;

Archivelog generation

Script:

set linesize 200 pagesize 1000
column day format a3
column total format 9999
column h00 format 999
column h01 format 999
column h02 format 999
column h03 format 999
column h04 format 999
column h04 format 999
column h05 format 999
column h06 format 999
column h07 format 999
column h08 format 999
column h09 format 999
column h10 format 999
column h11 format 999
column h12 format 999
column h13 format 999
column h14 format 999
column h15 format 999
column h16 format 999
column h17 format 999
column h18 format 999
column h19 format 999
column h20 format 999
column h21 format 999
column h22 format 999
column h23 format 999
column h24 format 999
break on report
compute max of “total” on report
compute max of “h01” on report
compute max of “h02” on report
compute max of “h03” on report
compute max of “h04” on report
compute max of “h05” on report
compute max of “h06” on report
compute max of “h07” on report
compute max of “h08” on report
compute max of “h09” on report
compute max of “h10” on report
compute max of “h11” on report
compute max of “h12” on report
compute max of “h13” on report
compute max of “h14” on report
compute max of “h15” on report
compute max of “h16” on report
compute max of “h17” on report
compute max of “h18” on report
compute max of “h19” on report
compute max of “h20” on report
compute max of “h21” on report
compute max of “h22” on report
compute max of “h23” on report
compute sum of NUM on report
compute sum of GB on report
compute sum of MB on report
compute sum of KB on report

REM Script to Report the Redo Log Switch History

alter session set nls_date_format=’DD MON YYYY’;
select thread#, trunc(completion_time) as “date”, to_char(completion_time,’Dy’) as “Day”, count(1) as “total”,
sum(decode(to_char(completion_time,’HH24′),’00’,1,0)) as “h00”,
sum(decode(to_char(completion_time,’HH24′),’01’,1,0)) as “h01”,
sum(decode(to_char(completion_time,’HH24′),’02’,1,0)) as “h02”,
sum(decode(to_char(completion_time,’HH24′),’03’,1,0)) as “h03”,
sum(decode(to_char(completion_time,’HH24′),’04’,1,0)) as “h04”,
sum(decode(to_char(completion_time,’HH24′),’05’,1,0)) as “h05”,
sum(decode(to_char(completion_time,’HH24′),’06’,1,0)) as “h06”,
sum(decode(to_char(completion_time,’HH24′),’07’,1,0)) as “h07”,
sum(decode(to_char(completion_time,’HH24′),’08’,1,0)) as “h08”,
sum(decode(to_char(completion_time,’HH24′),’09’,1,0)) as “h09”,
sum(decode(to_char(completion_time,’HH24′),’10’,1,0)) as “h10”,
sum(decode(to_char(completion_time,’HH24′),’11’,1,0)) as “h11”,
sum(decode(to_char(completion_time,’HH24′),’12’,1,0)) as “h12”,
sum(decode(to_char(completion_time,’HH24′),’13’,1,0)) as “h13”,
sum(decode(to_char(completion_time,’HH24′),’14’,1,0)) as “h14”,
sum(decode(to_char(completion_time,’HH24′),’15’,1,0)) as “h15”,
sum(decode(to_char(completion_time,’HH24′),’16’,1,0)) as “h16”,
sum(decode(to_char(completion_time,’HH24′),’17’,1,0)) as “h17”,
sum(decode(to_char(completion_time,’HH24′),’18’,1,0)) as “h18”,
sum(decode(to_char(completion_time,’HH24′),’19’,1,0)) as “h19”,
sum(decode(to_char(completion_time,’HH24′),’20’,1,0)) as “h20”,
sum(decode(to_char(completion_time,’HH24′),’21’,1,0)) as “h21”,
sum(decode(to_char(completion_time,’HH24′),’22’,1,0)) as “h22”,
sum(decode(to_char(completion_time,’HH24′),’23’,1,0)) as “h23”
from
v$archived_log
where first_time > trunc(sysdate-60)
and dest_id = (select dest_id from V$ARCHIVE_DEST_STATUS where status=’VALID’ and type=’LOCAL’)
group by thread#, trunc(completion_time), to_char(completion_time, ‘Dy’) order by 2,1;

REM Script to calculate the archive log size generated per day for each Instances.

select THREAD#, trunc(completion_time) as “DATE”
, count(1) num
, trunc(sum(blocksblock_size)/1024/1024/1024) as GB –, trunc(sum(blocksblock_size)/1024/1024) as MB
–, sum(blocks*block_size)/1024 as KB
from v$archived_log
where first_time > trunc(sysdate-60)
and dest_id = (select dest_id from V$ARCHIVE_DEST_STATUS where status=’VALID’ and type=’LOCAL’)
group by thread#, trunc(completion_time)
order by 2,1
;

alter session set nls_date_format=’DD/MM/YY HH24:MI’;
compute sum of SIZE_IN_GB on Hourly
break on hourly skip 1
column “Hourly” noprint
SELECT sequence#,to_char(COMPLETION_TIME,’DD/MM/YY HH’) “Hourly”,
SUM(BLOCKS * BLOCK_SIZE) / 1024 / 1024 /1024 SIZE_IN_GB
FROM V$ARCHIVED_LOG where completion_time between (’31/03/2020 23:30′) and (’02/04/2020 00:00′)
GROUP BY sequence#,to_char(COMPLETION_TIME,’DD/MM/YY HH’) ORDER BY 1;

===================== Who is using dblink =================
http://jkstill.blogspot.com/2010/03/whos-using-database-link.html
===================== Row cache lock ======================
https://aprakash.wordpress.com/2010/05/07/row-cache-lock-an-interesting-case/

set linesize 160
set pagesize 2000

SELECT
FILE#,
PHYRDS,
PHYWRTS,
PHYBLKRD,
PHYBLKWRT,
SINGLEBLKRDS,
READTIM,
WRITETIM,
SINGLEBLKRDTIM,
AVGIOTIM,
LSTIOTIM,
MINIOTIM,
MAXIORTM,
MAXIOWTM
FROM
V$FILESTAT
WHERE
FILE# IN (6,7);

SELECT
FILE#,
MAX(DECODE(SINGLEBLKRDTIM_MILLI,1,SINGLEBLKRDS,0)) MILLI1,
MAX(DECODE(SINGLEBLKRDTIM_MILLI,2,SINGLEBLKRDS,0)) MILLI2,
MAX(DECODE(SINGLEBLKRDTIM_MILLI,4,SINGLEBLKRDS,0)) MILLI4,
MAX(DECODE(SINGLEBLKRDTIM_MILLI,8,SINGLEBLKRDS,0)) MILLI8,
MAX(DECODE(SINGLEBLKRDTIM_MILLI,16,SINGLEBLKRDS,0)) MILLI16,
MAX(DECODE(SINGLEBLKRDTIM_MILLI,32,SINGLEBLKRDS,0)) MILLI32,
MAX(DECODE(SINGLEBLKRDTIM_MILLI,64,SINGLEBLKRDS,0)) MILLI64,
MAX(DECODE(SINGLEBLKRDTIM_MILLI,128,SINGLEBLKRDS,0)) MILLI128,
MAX(DECODE(SINGLEBLKRDTIM_MILLI,256,SINGLEBLKRDS,0)) MILLI256,
MAX(DECODE(SINGLEBLKRDTIM_MILLI,512,SINGLEBLKRDS,0)) MILLI512,
MAX(DECODE(SINGLEBLKRDTIM_MILLI,1024,SINGLEBLKRDS,0)) MILLI1024,
MAX(DECODE(SINGLEBLKRDTIM_MILLI,2048,SINGLEBLKRDS,0)) MILLI2048,
MAX(DECODE(SINGLEBLKRDTIM_MILLI,4096,SINGLEBLKRDS,0)) MILLI4096
FROM
V$FILE_HISTOGRAM
WHERE
FILE# IN (6,7)
GROUP BY
FILE#
ORDER BY
FILE#;

select * from ( select name,phyrds, phywrts, readtim, writetim
from v$filestat a, v$datafile b where a.file#=b.file#
order by readtim desc) where rownum < 6;

============ Shared Pools free memory components =================
/* Shared Pool Memory Allocations by Size */
select name, bytes from v$sgastat
where pool = ‘shared pool’ and (bytes > 999999 or name = ‘free memory’)
order by bytes desc;

/* Shared Pool parameters */
select nam.ksppinm NAME, val.KSPPSTVL VALUE
from x$ksppi nam, x$ksppsv val
where nam.indx = val.indx and (nam.ksppinm like ‘%shared_pool%’ or nam.ksppinm like ‘_4031%’ or nam.ksppinm in (‘_kghdsidx_count’,’_ksmg_granule_size’,’_memory_broker_stat_interval’,’cursor_sharing’,’event’))
order by 1;

============ Estimate size BCT File =========================

SELECT((
(SELECT SUM(ceil(bytes /(7686 * 1024 * 1024))) * 8 bitmap_ext
FROM v$datafile) +
(SELECT ceil(VALUE / 252) file_descr_ext
FROM v$parameter
WHERE name = ‘db_files’) + 1)

  • 32 + 1088) / 1024 bct_file_size_mb
    FROM dual;

========= Top segments ===========

with segment_rollup as (
select owner, table_name, owner segment_owner, table_name segment_name from dba_tables
union all
select table_owner, table_name, owner segment_owner, index_name segment_name from dba_indexes
union all
select owner, table_name, owner segment_owner, segment_name from dba_lobs
union all
select owner, table_name, owner segment_owner, index_name segment_name from dba_lobs
), ranked_tables as (
select rank() over (order by sum(blocks) desc) rank, sum(blocks) blocks, r.owner, r.table_name
from segment_rollup r, dba_segments s
where s.owner=r.segment_owner and s.segment_name=r.segment_name
group by r.owner, r.table_name
)
select rank, round(blocks*8/1024) mb, table_name
from ranked_tables
where rank<=20;

SELECT * FROM
(
select
SEGMENT_NAME,
SEGMENT_TYPE,
BYTES/1024/1024/1024 GB,
TABLESPACE_NAME
from
dba_segments
order by 3 desc
) WHERE
ROWNUM <= 20
/

select segment_name,segment_type,sum(bytes)/1024/1024/1024 “GB”,tablespace_name from dba_segments group by segment_name,segment_type,tablespace_name having sum(bytes)/1024/1024/1024 > 5 order by 3 desc;

================== Find and delete duplicate rows ================
https://blogs.oracle.com/sql/how-to-find-and-delete-duplicate-rows-with-sql

===================AWR Retention========

set linesize 120
set pagesize 100
col ash form a30
col retention form a30
col snap form a30

COLUMN “Item” FORMAT A25
COLUMN “Space Used (GB)” FORMAT 999.99
COLUMN “Schema” FORMAT A25
COLUMN “Move Procedure” FORMAT A40

SELECT occupant_name “Item”,
space_usage_kbytes/1048576 “Space Used (GB)”,
schema_name “Schema”,
move_procedure “Move Procedure”
FROM v$sysaux_occupants
WHERE occupant_name = ‘SM/AWR’
ORDER BY 1
/

select sysdate – a.sample_time ash,
sysdate – s.begin_interval_time snap,
c.RETENTION
from sys.wrm$_wr_control c,
(
select db.dbid,
min(w.sample_time) sample_time
from sys.v_$database db,
sys.Wrh$_active_session_history w
where w.dbid = db.dbid group by db.dbid
) a,
(
select db.dbid,
min(r.begin_interval_time) begin_interval_time
from sys.v_$database db,
sys.wrm$_snapshot r
where r.dbid = db.dbid
group by db.dbid
) s
where a.dbid = s.dbid
and c.dbid = a.dbid;

select table_name, count(*)
from dba_tab_partitions
where table_name like ‘WRH$%’
and table_owner = ‘SYS’
group by table_name
order by 1;

SELECT extract(day from snap_interval) 2460+extract(hour from snap_interval) *60+extract(minute from snap_interval) snapshot_Interval,
extract(day from retention) *24*60+extract(hour from retention) *60+extract(minute from retention) retention_Interval
FROM dba_hist_wr_control;

SNAPSHOT_INTERVAL RETENTION_INTERVAL


           30              86400

SQL> SELECT extract(minute from snap_interval) snapshot_Interval_Mins,extract(day from retention) retention_Interval_Days FROM dba_hist_wr_control;

SNAPSHOT_INTERVAL_MINS RETENTION_INTERVAL_DAYS


                30                      60

Item Space Used (GB) Schema Move Procedure


SM/AWR 511.40 SYS

SQL> SELECT extract(minute from snap_interval) snapshot_Interval_Mins,extract(day from retention) retention_Interval_Days FROM dba_hist_wr_control;

SNAPSHOT_INTERVAL_MINS RETENTION_INTERVAL_DAYS


                30                      90

exec dbms_workload_repository.modify_snapshot_settings(interval => 30,retention => 86400);
|
|——> In Minutes
SQL> l

SELECT
snap_id, begin_interval_time, end_interval_time
FROM
SYS.WRM$_SNAPSHOT
WHERE
snap_id = ( SELECT MIN (snap_id) FROM SYS.WRM$_SNAPSHOT)
UNION
SELECT
snap_id, begin_interval_time, end_interval_time
FROM
SYS.WRM$_SNAPSHOT
WHERE
snap_id = ( SELECT MAX (snap_id) FROM SYS.WRM$_SNAPSHOT)
/

BEGIN
dbms_workload_repository.drop_snapshot_range(low_snap_id => 42764, high_snap_id=>42770);
END;
/

======================== Tracing 10046 in Oracle==========
https://oracle-base.com/articles/misc/sql-trace-10046-trcsess-and-tkprof
https://antognini.ch/2012/08/event-10046-full-list-of-levels/

http://www.nazmulhuda.info/how-to-collect-10046-trace-sql_trace-diagnostics-for-performance-issues?tmpl=%2Fsystem%2Fapp%2Ftemplates%2Fprint%2F&showPrintDialog=1
This document is taken from Oracle Support Doc ID 376442.1
http://www.juliandyke.com/Diagnostics/Trace/EnablingTrace.php

Enabling trace for session.
1.alter session set timed_statistics = true;
2.alter session set statistics_level=all;
3.alter session set max_dump_file_size = unlimited;
4.alter session set events ‘10046 trace name context forever,level 12’ ; 12 will trace wait_events and bind variables;

Disabling trace for a session.
1.alter session set events ‘10046 trace name context off’;

Tracing existing process
•Find SID of that session
•Find its OS pid from the following (This does not work for Windows)
•select p.PID,p.SPID,s.SID from v$process p,v$session s where s.paddr = p.addr and s.sid = &SESSION_ID;
•oradebug setospid spid??? (spid from above query)
•oradebug unlimit
•oradebug event 10046 trace name context forever,level 12;

OR
•Find SID of that session
•Find its Orable pid from the following (This does work for Windows)
select p.PID,p.SPID,s.SID from v$process p,v$session s where s.paddr = p.addr and s.sid = &SESSION_ID;
•oradebug setorapid pid??? (pid from above query)
•oradebug unlimit
•oradebug event 10046 trace name context forever,level 12;

Disable Tracing of existing session
•oradebug event 10046 trace name context off

Enabling Trace on user login with trigger

CREATE OR REPLACE TRIGGER SYS.set_trace AFTER LOGON ON DATABASE WHEN (USER like ‘&USERNAME’) DECLARE lcommand varchar(200);

BEGIN

EXECUTE IMMEDIATE ‘alter session set statistics_level=ALL’;

EXECUTE IMMEDIATE ‘alter session set max_dump_file_size=UNLIMITED’;

EXECUTE IMMEDIATE ‘alter session set events ”10046 trace name context forever, level 12”’;

END set_trace;

Enabling trace on whole instance.

alter system set events ‘10046 trace name context forever,level 12’;

Disabling trace on whole instance

alter system set events ‘10046 trace name context off’;

How to interpret trace file

Trace file is created in user_dump_dest.There is TKPROF traditional way to make trace file more readable but what I `d like are the following 2 tools which create HTML report.
•ORASRP (oracledba.ru/orasrp) : OpenSource , available in Windows and Linux
•TRCANLZR (Metalink note:224270.1) : Creates stored-procedure and bunch of tables in Oracle database, Available on any platform which oracle is available.

=========================================

CREATE OR REPLACE TRIGGER SYS.TRACE_WMS
AFTER LOGON ON DATABASE
BEGIN
IF user =’WMS’ THEN
EXECUTE IMMEDIATE ‘ALTER SESSION SET EVENTS ”10046 trace name context forever, level 12”’;
EXECUTE IMMEDIATE ‘alter session set max_dump_file_size=unlimited’;
END IF;
END;
/

ALTER TRIGGER SYS.TRACE_BCH DISABLE;

CREATE OR REPLACE TRIGGER SYS.TRACE_PTH_USER
after logon on database
BEGIN

if (user=’PTH’) then

            dbms_monitor.session_trace_enable(

                            null,

                            null,

                            true,

                            true

            );

end if;
end;
/

SQL> ORADEBUG SETOSPID 15151; — Debug session with the specified OS process.
SQL> ORADEBUG SETORAPID 123456; — Debug session with the specified Oracle process ID.

SQL> ORADEBUG EVENT 10046 TRACE NAME CONTEXT FOREVER, LEVEL 12;
SQL> ORADEBUG TRACEFILE_NAME; — Display the current trace file.
SQL> ORADEBUG EVENT 10046 TRACE NAME CONTEXT OFF;

select ‘ORADEBUG SETORAPID ‘||p.PID||’;’ from v$process p,v$session s where s.paddr = p.addr and s.username=’WMS’ and s.status=’ACTIVE’;

================================== AWR I/O Explained================
“Av Reads/s”, “Av Writes/s” give you the load.
• “Av Rd(ms)”, “Av Buf Wt(ms)” give you the performances of IO subsystem seen by Oracle (this is what you have to compare with your benchmark on the IO subsystem – the hardware vendor gives ideal figures you never reach in real world and they are meaningless if you have disk arrays and several layers between the physical disk and the Oracle files)
• “Buffer Waits” gives you the consequence of the previous 2 on your client.

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

SELECT
o.object_type,i.instance_name db,COUNT()8192/1024/1024 meg_in_memory,
o.owner||’.’||o.OBJECT_NAME Object_in_Memory
FROM DBA_OBJECTS o, gV$BH bh, gv$instance i
WHERE o.DATA_OBJECT_ID = bh.OBJD
and bh.status<>’free’
and bh.inst_id = i.inst_id
and o.object_name like upper(‘%%’)
GROUP BY o.owner||’.’||o.OBJECT_NAME,o.object_type,i.instance_name
having count()>0 ORDER BY COUNT()

select to_char(c.BEGIN_INTERVAL_TIME,’YYYY-MM-DD Day’) “DATE”,a.object_name,
sum(b.PHYSICAL_READS_DELTA) total_daily_physical_reads
from dba_objects a,dba_hist_seg_stat b,sys.wRM$_SNAPSHOT c
where a.object_id=b.OBJ#
— and b.SNAP_ID >(select max(SNAP_ID)-24*8 from sys.wRM$_SNAPSHOT)
and upper(a.object_name) like upper(‘%’) and b.PHYSICAL_READS_DELTA>0
and c.instance_number=(select instance_number from v$instance)
and c.snap_id=b.snap_id
group by to_char(c.BEGIN_INTERVAL_TIME,’YYYY-MM-DD Day’),a.object_name

order by 1,3

select filename, file#, snap_id
,round(phyrds_d) “Reads”
,round(phyrds_d/interval_seconds) “Av Reads/s”
,round(readtim_d10/nullif(phyrds_d, 0)) “Av Rd(ms)” ,round(phyblkrd_d/nullif(phyrds_d, 0)) “Av Blks/Rd” ,round(singleblkrds_d/interval_seconds) “1-bk Rds/s” ,round(singleblkrdtim_d10/nullif(singleblkrds_d, 0)) “Av 1-bk Rd(ms)”
,round(phywrts_d) “Writes”
,round(phywrts_d/interval_seconds) “Av Writes/s”
,round(writetim_d10/nullif(phywrts_d, 0)) “Av Wr(ms)” — * Not in AWR
,round(phyblkwrt_d/nullif(phywrts_d, 0)) “*Av Blks/Wr” — * Not in AWR
,round(wait_count_d) “Buffer Waits”
,round(time_d10/nullif(wait_count_d, 0)) “Av Buf Wt(ms)” — in CentiSeconds from ( select phyrds – lag(phyrds) over(partition by file# order by snap_id) phyrds_d ,phywrts – lag(phywrts) over(partition by file# order by snap_id) phywrts_d ,singleblkrds – lag(singleblkrds) over(partition by file# order by snap_id) singleblkrds_d ,readtim – lag(readtim) over(partition by file# order by snap_id) readtim_d ,writetim – lag(writetim) over(partition by file# order by snap_id) writetim_d ,singleblkrdtim – lag(singleblkrdtim) over(partition by file# order by snap_id) singleblkrdtim_d ,phyblkrd – lag(phyblkrd) over(partition by file# order by snap_id) phyblkrd_d ,phyblkwrt – lag(phyblkwrt) over(partition by file# order by snap_id) phyblkwrt_d ,wait_count – lag(wait_count) over(partition by file# order by snap_id) wait_count_d ,time – lag(time) over(partition by file# order by snap_id) time_d ,interval_seconds ,t.
from dba_hist_filestatxs t
,(select snap_id s_snap_id
,((sysdate + (end_interval_time – begin_interval_time)) – sysdate)*86400 interval_seconds
from dba_hist_snapshot)
where t.snap_id = s_snap_id
and tsname = ‘TEST_TBS’
and snap_id >= (select max(snap_id) from dba_hist_snapshot) – 2
);

============== Recommended Redolog size =====================
SELECT
(SELECT ROUND(AVG(BYTES) / 1024 / 1024, 2) FROM V$LOG) AS “Redo size (MB)”,
ROUND((20 / AVERAGE_PERIOD) * (SELECT AVG(BYTES)
FROM V$LOG) / 1024 / 1024, 2) AS “Recommended Size (MB)”
FROM (SELECT AVG((NEXT_TIME – FIRST_TIME) * 24 * 60) AS AVERAGE_PERIOD
FROM V$ARCHIVED_LOG
WHERE FIRST_TIME > SYSDATE – 7);

SELECT v1.thread#, v3.curr_val “Current Size”,
round((20/ v1.average_period) * (v2.reco1)/ 1024 / 1024, 2) AS “Recommended Size (MB)”
FROM (
SELECT thread#, AVG((NEXT_TIME – FIRST_TIME) * 24 * 60) AS AVERAGE_PERIOD FROM V$ARCHIVED_LOG
WHERE FIRST_TIME > SYSDATE – 3
AND TO_CHAR(FIRST_TIME, ‘HH24:MI’) >= ’12:00′
AND TO_CHAR(FIRST_TIME, ‘HH24:MI’) <= ’14:00′
group by thread#
)V1,
(
SELECT thread#, AVG(BYTES) reco1 FROM V$LOG group by thread#
)V2,
(
SELECT thread#, ROUND(AVG(BYTES) / 1024 / 1024, 2) curr_val FROM V$LOG group by thread#
)V3
where v1.thread# = v2.thread#

and v2.thread# = v3.thread#;

========================================================Dependant DDL================
select DBMS_METADATA.GET_DEPENDENT_DDL(‘INDEX’, ‘WRH$_LATCH_CHILDREN’, ‘SYS’) from dual;
=====================================================================================

Redolog:
set linesize 300
column REDOLOG_FILE_NAME format a50
SELECT
a.GROUP#,
a.THREAD#,
a.SEQUENCE#,
a.ARCHIVED,
a.STATUS,
b.MEMBER AS REDOLOG_FILE_NAME,
(a.BYTES/1024/1024/1024) AS SIZE_GB
FROM v$log a
JOIN v$logfile b ON a.Group#=b.Group#
ORDER BY a.GROUP# ASC;

===================================== Parallel Session Details ============
https://community.toadworld.com/platforms/oracle/w/wiki/792.direct-path-read
select decode(ownerid,2147483644,’PARENT’,’CHILD’) stmt_level,
audsid,
sid,
serial#,
username,
osuser,
process,
sql_hash_value,
sql_address
from v$session
where type <> ‘BACKGROUND’
and audsid in (select audsid
from v$session
group by audsid
having count(*) > 1)
order by audsid, stmt_level desc, sid, username, osuser;

select decode(a.qcserial#, null, ‘PARENT’, ‘CHILD’) stmt_level,
a.sid,
a.serial#,
b.username,
b.osuser,
b.sql_hash_value,
b.sql_address,
a.degree,
a.req_degree
from v$px_session a, v$session b
where a.sid = b.sid
order by a.qcsid, stmt_level desc;

select a.name, b.sid, b.value
from v$statname a, v$sesstat b
where a.statistic# = b.statistic#
and b.value > 0
and a.name = ‘physical reads direct’
order by b.value;

Use this query to get the SQL hash value and other user information when there is a direct path read wait:
select a.username,
a.osuser,
to_char(a.logon_time,’MMDD/HH24MISS’) as logon_time,
a.sid,
to_char(sysdate,’MMDD/HH24MI’) as sample_time,
b.event,
a.sql_hash_value
from v$session a, v$session_wait b
where a.sid = b.sid
and b.event = ‘direct path read’;

Use this query to determine the type of sort that is taking place (if any).
select b.sid,
b.serial#,
b.username,
b.osuser,
decode(a.ktssosegt,
1,’SORT’, 2,’HASH’, 3,’DATA’,
4,’INDEX’,5,’LOB_DATA’,6,’LOB_INDEX’,
‘UNDEFINED’) as sort_type
from x$ktsso a, v$session b
where a.inst_id = userenv(‘instance’)
and a.ktssoses = b.saddr
and a.ktssosno = b.serial#
and b.sid in (select sid
from v$session_wait
where event = ‘direct path read’);

Use this query to get the name of the object that is being scanned by parallel query slaves (if P1 is not a TEMPFILE).

select segment_name, partition_name, segment_type, tablespace_name
from dba_extents a, v$session_wait b
where b.p2 between a.block_id and (a.block_id + a.blocks – 1)
and a.file_id = b.p1
and b.event = ‘direct path read’;

=============<<<<< https://weidongzhou.wordpress.com/2016/04/06/direct-path-read-vs-buffer-cache-read/ >>>>>====================
The table needs to be analyzed before getting best results for below query

col table_name for a30
col table_owner for a20

select *
from ( select m.table_owner
, m.table_name
, t.last_analyzed
, m.inserts
, m.updates
, m.deletes
, t.num_rows
, ( m.inserts + m.updates + m.deletes )*100 / case when t.num_rows is null or t.num_rows = 0 then 1 else t.num_rows end “Change Factor%”
from dba_tab_modifications m
, dba_tables t
where t.owner = m.table_owner
and t.table_name = m.table_name
and m.inserts + m.updates + m.deletes > 1
and m.table_owner=’EMAGINEROOT’
and m.table_name = ‘EME_PENDING_QUEUE’
order by “Change Factor%” desc
)
;

col component for a25
col size_MB for 999,999
col stth head “Small Table|Threshold in Blks” for 99,999,999
col stth_size head “Small Table|Threshold|in MB” for 999,999
select component, current_size/(1024*1024) size_MB,
current_size/8192 * 0.02 stth,
current_size/(1024*1024) * 0.02 stth_size
from v$sga_dynamic_components
where
component like ‘DEFAULT buffer cache%’;

============== Statistics history for a table ==============
col owner for a12
col object_name for a25
col object_type for a15
col subobject_name for a25
col obj# for 999999
col save_time for a20
col analyze_time for a20
select o.owner, o.object_name, o.subobject_name, th.obj#, o.object_type,
to_char(analyzetime, ‘yyyy-mm-dd hh24:mi:ss’) analyze_time,
rowcnt, blkcnt, avgrln, samplesize, samplesize,
to_char(savtime, ‘yyyy-mm-dd hh24:mi:ss’) save_time
from sys.WRI$_OPTSTAT_TAB_HISTORY th,
dba_objects o
where
o.object_id = th.obj#
and o.owner = ‘MONETA_USAGE1’
and o.object_name = ‘U_MSC_PREPAID_02’
order by th.analyzetime desc;

========================

set lines 400
alter session set nls_date_format=’dd-mon-rr hh24:mi:ss’;
select recid, file#, to_char(creation_change#)crscn,
incremental_level lvl, to_char(incremental_change#) incrscn,
to_char(checkpoint_change#) ckpscn, checkpoint_time ckptime,
completion_time endtime, USED_CHANGE_TRACKING bct,
blocks_read read, block_size bsz, blocks wrtn
from v$backup_datafile
where file# > 0
and completion_time > sysdate-2 and incremental_change#=creation_change#;

==================<<<<<<<<<<<<<<<<< Block change tracing ==================== select file#, avg(datafile_blocks), avg(blocks_read), avg(blocks_read/datafile_blocks) * 100 as “% read for backup” from v$backup_datafile where incremental_level > 0 and used_change_tracking = ‘YES’
group by file# order by file#;

Understanding why an RMAN incremental backup will suddenly increase in size (Doc ID 1273492.1)

set lines 800
set pages 100
alter session set nls_date_format=’dd-mon-rr hh24:mi:ss’;
select recid, file#, to_char(creation_change#), incremental_level, to_char(incremental_change#) inc#,to_char(checkpoint_change#) ckp#, datafile_blocks BLKS, block_size blksz, blocks_read READ,
round((blocks_read/datafile_blocks) * 100,2) “%READ”,
blocks WRTN, round((blocks/datafile_blocks)*100,2) “%WRTN”,
completion_time, used_change_tracking
from v$backup_datafile where completion_time > sysdate-30;

===============================
When troubleshooting archivelog deletion policy issue, here is a better query which counts the number of backups for each sequence:

set linesize 200 pagesize 1000
column is_recovery_dest_file format a21
select
deleted,status,is_recovery_dest_file,thread#,min(sequence#),max(sequence#),min(first_time),max(next_time),count(distinct sequence#),archived,applied,backup_count,count(“x$kccagf”)
from (
select deleted,thread#,sequence#,status,name ,first_time, next_time,case x$kccagf.rectype when 11 then recid end “x$kccagf”
,count(case archived when ‘YES’ then ‘YES’ end)over(partition by thread#,sequence#) archived
,count(case applied when ‘YES’ then ‘YES’ end)over(partition by thread#,sequence#) applied
,sum(backup_count)over(partition by thread#,sequence#) backup_count
,listagg(is_recovery_dest_file||’:’||dest_id,’,’)within group(order by dest_id)over(partition by thread#,sequence#) is_recovery_dest_file
from v$archived_log left outer join sys.x$kccagf using(recid)
) group by deleted,status,is_recovery_dest_file,thread#,archived,applied,backup_count

order by max(sequence#),min(sequence#),thread#,deleted desc,status;

Usage of SET DBID in RMAN

You should only run the SET DBID command in the following specialized circumstances:
•You are not connected to a recovery catalog and want to restore the control file or server parameter file (SPFILE)
•You are connected to a recovery catalog want to restore the control file, but the database name is not unique in the recovery catalog.
•The server parameter (SPFILE) file is lost and you want to restore it.

Oracle documentation states, “Because the repository is not available when you restore the control file, run the SET DBID command to identify the target database.”

Because the repository is not available when you restore the control file, run the SET DBID command to identify the target database. You should only run the SET DBID command in the following specialized circumstances:
•You are not connected to a recovery catalog and want to restore the control file or server parameter file.
•You are connected to a recovery catalog and want to restore the control file, but the database name is not unique in the recovery catalog.
•The server parameter file is lost and you want to restore it.

To recover the database with an autobackup of the control file without a recovery catalog:
•Start RMAN and connect to the target database. For example, run: CONNECT TARGET /

•Start the target instance without mounting the database. For example: STARTUP NOMOUNT;

•Set the database identifier for the target database with SET DBID. RMAN displays the DBID whenever you connect to the target. You can also obtain it by inspecting saved RMAN log files, querying the catalog, or looking at the filenames of control file autobackup. (refer to “Restoring When Multiple Databases in the Catalog Share the Same Name: Example”). For example, run: SET DBID 676549873;

============ Job Run Status ===========
COL log_id FORMAT 9999 HEADING ‘Log#’
COL log_date FORMAT A32 HEADING ‘Log Date’
COL owner FORMAT A06 HEADING ‘Owner’
COL job_name FORMAT A20 HEADING ‘Job’
COL status FORMAT A10 HEADING ‘Status’
COL actual_start_date FORMAT A32 HEADING ‘Actual|Start|Date’
COL error# FORMAT 999999 HEADING ‘Error|Nbr’

TTITLE ‘Scheduled Tasks That Failed:’

select
log_date,
job_name,
status,
req_start_date,
actual_start_date,
run_duration
from
dba_scheduler_job_run_details
where
— job_name = ‘MYJOB’
status <> ‘SUCCEEDED’
order by
actual_start_date;

=============================

Script to find table partition column details…..

UNDEF ENTER_OWNER_NAME
UNDEF ENTER_TABLE_NAME
COLUMN OWNER FORMAT A15 HEADING ‘Partition|Owner’
COLUMN NAME FORMAT A15 HEADING ‘Partition|Name’
COLUMN COLUMN_NAME FORMAT a20 HEADING ‘Column|Name’
COLUMN COLUMN_POSITION FORMAT 999 HEADING ‘Key|Pos’
SET lines 78
TTITLE left _date center “Table Partition Key Report” skip 2
SELECT owner, NAME, column_name, column_position
FROM dba_part_key_columns
WHERE owner LIKE UPPER (‘&&ENTER_OWNER_NAME’)
AND NAME LIKE UPPER (‘&&ENTER_TABLE_NAME’)

ORDER BY owner, NAME;

select process,pid,status,sequence#,client_process,block#, active_agents, known_agents,BLOCKS FROM gv$managed_standby ORDER BY thread#, pid;

select DEST_ID,DEST_NAME,DESTINATION,TARGET,STATUS,ERROR from v$archive_dest where dest_id < 5;

SELECT ARCH.THREAD# “Thread”, ARCH.SEQUENCE# “Last Sequence Received”, APPL.SEQUENCE# “Last Sequence Applied”, (ARCH.SEQUENCE# – APPL.SEQUENCE#) “Difference”
FROM (SELECT THREAD# ,SEQUENCE# FROM V$ARCHIVED_LOG WHERE (THREAD#,FIRST_TIME ) IN (SELECT THREAD#,MAX(FIRST_TIME) FROM V$ARCHIVED_LOG GROUP BY THREAD#)) ARCH,
(SELECT THREAD# ,SEQUENCE# FROM V$LOG_HISTORY WHERE (THREAD#,FIRST_TIME ) IN (SELECT THREAD#,MAX(FIRST_TIME) FROM V$LOG_HISTORY GROUP BY THREAD#)) APPL WHERE
ARCH.THREAD# = APPL.THREAD# ORDER BY 1;

select to_char(timestamp, ‘dd-mon-yyyy, hh:mm’), message from v$dataguard_status;

===================== Audit logon/logoff ============
https://community.broadcom.com/symantecenterprise/communities/community-home/librarydocuments/viewdocument?DocumentKey=cb87b35f-dccb-4dda-ba7f-94806afb4f35&CommunityKey=1ecf5f55-9545-44d6-b0f4-4e4a7f5f5e68&tab=librarydocuments

col os_username for a15
col username for a18
col userhost for a15
col avg_sess_cpu for 99,999
col total for 999,999

select os_username, username, userhost, action_name action, count() from dba_audit_trail where timestamp between to_timestamp( ‘2016-03-02 14:00’, ‘yyyy-mm-dd hh24:mi’) and to_timestamp( ‘2016-03-02 15:00’, ‘yyyy-mm-dd hh24:mi’) having count() > 100
group by os_username, username, userhost, action_name
order by os_username, username, userhost;

SELECT username, extended_timestamp, owner, obj_name, action,action_name,
SQL_TEXT,
RETURNCODE
FROM
dba_audit_trail
where extended_timestamp between to_date(’02-07-2019 22:12′,’dd-mm-yyyy hh24:mi:ss’) and to_date(’02-07-2019 22:37′,’dd-mm-yyyy

=====================

Tablespace quotas:

SELECT * FROM dba_sys_privs WHERE privilege = ‘UNLIMITED TABLESPACE’;

select
username ownr,
tablespace_name name,
decode(greatest(max_bytes, -1),
-1, ‘Unrestricted’,
to_char(max_bytes/1024, ‘999,999,990’)
) qota,
bytes/1024 used
from
dba_ts_quotas
where
max_bytes!=0
or
bytes!=0
order by
1,2
/

SELECT
grantee,
privilege,
DECODE(p,’=>’||grantee,’direct’,p) path
FROM (
SELECT
grantee,
privilege,
SYS_CONNECT_BY_PATH(grantee, ‘=>’) p
FROM (
SELECT
grantee,
privilege
FROM dba_sys_privs
UNION ALL
SELECT
grantee,
granted_role privilege
FROM
dba_role_privs)
START WITH privilege = ‘UNLIMITED TABLESPACE’
CONNECT BY PRIOR grantee = privilege )
WHERE
(grantee IN (SELECT username FROM dba_users)
OR grantee = ‘PUBLIC’);

SELECT
username,
tablespace_name,
privilege
FROM (
SELECT
grantee username, ‘Any Tablespace’ tablespace_name, privilege
FROM (
— first get the users with direct grants
SELECT
p1.grantee grantee, privilege
FROM
dba_sys_privs p1
WHERE
p1.privilege=’UNLIMITED TABLESPACE’
UNION ALL
— and then the ones with UNLIMITED TABLESPACE through a role…
SELECT
r3.grantee, granted_role privilege
FROM
dba_role_privs r3
START WITH r3.granted_role IN (
SELECT
DISTINCT p4.grantee
FROM
dba_role_privs r4, dba_sys_privs p4
WHERE
r4.granted_role=p4.grantee
AND p4.privilege = ‘UNLIMITED TABLESPACE’)
CONNECT BY PRIOR grantee = granted_role)
— we just whant to see the users not the roles
WHERE grantee IN (SELECT username FROM dba_users) OR grantee = ‘PUBLIC’
UNION ALL
— list the user with unimited quota on a dedicated tablespace
SELECT
username,tablespace_name,’DBA_TS_QUOTA’ privilege
FROM
dba_ts_quotas
WHERE
max_bytes = -1 )
WHERE tablespace_name LIKE UPPER(‘SYSTEM’)
OR tablespace_name = ‘Any Tablespace’;

============ Create partition monthly automatically in 10g… From 11g partitions are created automatically========
create or replace procedure p_part_maintenance_mnthy
as
declare
v_partition_name all_tab_partitions.partition_name%type;
v_limit varchar2(50);
begin

select to_char(trunc(add_months(sysdate,1),’MM’),’MonYYYY’)
into v_partition_name
from dual; –eg. output Oct2012

select to_char(trunc(add_months(sysdate,2),’MM’),’dd-mon-yyyy’)
into v_limit
from dual; — eg. output 01-nov-2012

execute immediate ‘ALTER TABLE sales ADD PARTITION ‘ || v_partition_name ||
‘ VALUES LESS THAN (‘ || v_limit || ‘) tablespace XYZ’;

end;
/

================== Update Scheduler Job (Job Action) ============
OLD:

JObaction:
DBMS_STATS.set_table_prefs(”ANALYTICS”,”C_TCD”, ”INCREMENTAL”, ”TRUE”);
DBMS_STATS.gather_table_stats(”ANALYTICS”,”C_TCD”, granularity => ”AUTO”, cascade => TRUE,degree => 6,estimate_percent => DBMS_STATS.AUTO_SAMPLE_SIZE);

New Job Action:
begin
DBMS_SCHEDULER.SET_ATTRIBUTE
( name => ‘SYS.GATHER_TABLE_INCREMENTAL_STATS’
,attribute => ‘JOB_ACTION’
,value => ‘BEGIN
DBMS_STATS.set_table_prefs(”ANALYTICS”,”C_TCD”, ”INCREMENTAL”, ”TRUE”);
dbms_stats.unlock_table_stats(”ANALYTICS”,”C_TCD”);
DBMS_STATS.gather_table_stats(”ANALYTICS”,”C_TCD”, granularity => ”AUTO”, cascade => TRUE,degree => 6,estimate_percent => DBMS_STATS.AUTO_SAMPLE_SIZE);
dbms_stats.lock_table_stats(”ANALYTICS”,”C_TCD”);
END;
‘);
end;
/
===================================================================== TABLE HAS INCREMENTAL Statistics or not=============
select o.name, c.name, decode(bitand(h.spare2,8),8 ,’yes’,’no’) incremental
from sys.hist_head$ h,sys.obj$ o, sys.col$ c
where h.obj#=o.obj#
and o.obj# = c.obj#
and h.intcol#=c.intcol#
and o.name in (‘CS_SALESTRANSACTION’,
‘CS_CLASSIFICATION’,
‘CS_CREDIT’,
‘CS_SALESORDER’)
and o.subname is null;

===================== LOB Retention======
show parameter undo_retention; ===> Undo
•Specifiy PCTVERSION when creating a table with a lob segment:
The Oracle documentation states the following here:
PCTVERSION is the percentage of all used LOB data space that can be occupied by old versions of LOB data pages.
As soon as old versions of LOB data pages start to occupy more than the PCTVERSION amount of used LOB space, Oracle Database tries to reclaim the old versions and reuse them.
In other words, PCTVERSION is the percent of used LOB data blocks that is available for versioning old LOB data..

As an alternative to the PCTVERSION parameter, you can specify the RETENTION parameter in the LOB storage clause of the CREATE TABLE or ALTER TABLE statement.
Doing so, configures the LOB column to store old versions of LOB data for a period of time, rather than using a percentage of the table space.

The 2 possibilities are mutually exclusive, either PCTVERSION or RETENTION can be set. Until now

select nvl(retention,0), count(*)
from dba_lobs
where owner = ‘SAPSR3’

group by retention;

col sid form 999 trunc
col username form a8 trunc
col osuser form a12 trunc
col program form a15 trunc head “Client|Program”
col block_gets head “DB|Block|Gets” form 999999999
col consistent_gets head “Consi|stent|Gets” form 999999999
col Physical_reads head “Phys|ical|Reads” form 999999999
col block_changes head “Block|Changes” form 9999999
col consistent_changes head “Consi|stent|Chan|ges” form 9999999

select si.sid,substr(se.username,1,10) username,substr(osuser,1,12) osuser,
substr(program||module,1,15) program, si.block_gets,si.consistent_gets,
si.physical_reads,si.block_changes, si.consistent_changes
from v$sess_io si, v$session se
where si.sid=se.sid
and si.physical_reads > 1000
and si.block_changes > 100
order by 7,6,5

================ Hanganalzye=========
Hanganalyze
sqlplus ‘/ as sysdba’
oradebug setmypid
oradebug unlimit
oradebug hanganalyze 3
— Wait one minute before getting the second hanganalyze
oradebug hanganalyze 3
oradebug tracefile_name
exit
Systemstate
sqlplus ‘/ as sysdba’
oradebug setmypid
oradebug unlimit
oradebug dump systemstate 258
oradebug dump systemstate 258
oradebug tracefile_name
exit Hanganalyze

[‎11/‎1/‎2018 6:27 PM] Sunil Potluri:
1-Form of Execution for Single Instance Environments:

sqlplus / as sysdba

oradebug setmypid

oradebug unlimit

oradebug hanganalyze 3

oradebug dump systemstate 258

2 – Form of execution for RAC ambitions:

oradebug setmypid

oradebug unlimit

oradebug -g all hanganalyze 3

oradebug -g all dump systemstate 258
========================================== Shrink table progress =====================
select
b.sid
,a.event
,a.WAIT_TIME
,round(d.read_value/1024/1024/1024,2) read_gb
,round(d.write_value/1024/1024/1024,2) write_gb
,round(d.undo_value/1024/1024/1024,2) undo_gb
,e.current_undo
,c.SQL_TEXT
from v$session_wait a
join v$session b on a.sid = b.sid
join v$sqlarea c on b.SQL_ID = c.SQL_ID
join (select * from (select s.sid, t.name, s.value from v$sesstat s inner join v$statname t on s.statistic#=t.statistic#)
pivot (sum(value) as value for name in (
‘undo change vector size’ UNDO
,’physical read total bytes’ READ
,’physical write total bytes’ WRITE
))) d on a.sid=d.sid
join (select b.sid, sum(a.USED_UBLK) current_undo from V$TRANSACTION a join v$session b on a.SES_ADDR=b.saddr group by b.sid) e on e.sid=a.sid
where upper(c.sql_text) like ‘ALTER TABLE%SHRINK SPACE%’
and b.sid != (select sys_context(‘USERENV’,’SID’) from dual)
;

select a.event , a.WAIT_TIME , c.SQL_TEXT , c.PHYSICAL_READ_BYTES / 1024 / 1024 / 1024 “GB_READ” , c.PHYSICAL_WRITE_BYTES / 1024 / 1024 / 1024 “GB_WRITE”
from v$session_wait a , v$session b , v$sql c
where a.SID = &SID
and a.sid = b.sid
and b.SQL_ID = c.SQL_ID;

EVENT WAIT_TIME SQL_TEXT GB_READ GB_WRITE


db file sequential read 0 alter table USLAM_SE 11.1685791 0

EVENT WAIT_TIME SQL_TEXT GB_READ GB_WRITE


log file switch (checkpoint in 0 alter table USLAM_SE 6.09918213 0
complete)

EVENT WAIT_TIME SQL_TEXT GB_READ GB_WRITE


enq: CR – block range reuse ck 0 alter table USLAM_SE 6.16873932 0
pt
EVENT WAIT_TIME SQL_TEXT GB_READ GB_WRITE


reliable message -1 alter table USLAM_SE 6.52017975 0

=======================================================================================

rem
rem Script: lock_types.sql
rem Author: Jonathan Lewis
rem Dated: Mar 2018
rem Usage: start lock_types {lock type}
rem

define m_lock_type=’&1′

column display new_value m_display

select
case when substr(version,1,2) = ’12’
then
‘display_name’
else
‘name’
end display
from
v$instance
;

set linesize 160
set pagesize 60
set trimspool on

column type format a4
column name format a32
column description format a132
column id1_tag format a32
column id2_tag format a32
column is_user format a4 heading “User”
column is_recycle format a4 heading “Rcyc”

set feedback off
break on report skip 1

spool lock_types

select *
from V$lock_type
where type = upper(‘&m_lock_type’)
order by
type
;

column name format a42
column parameter1 format a9
column parameter2 format a24
column parameter3 format a22
column wait_class format a14
column display_name format a42

select
eve.name,
eve.parameter1,
eve.parameter2,
eve.parameter3,
eve.wait_class,
nullif(eve.&m_display, eve.name) display_name
from
v$event_name eve
where
eve.name like ‘enq: ‘ || upper(‘&m_lock_type’) || ‘%’
order by
nullif(eve.wait_class,’Other’),
eve.name
;

set feedback on

Removing a file will not be enough to clear the space. The process that was writing to that file also need to be stopped to release the space. In the future, cat /dev/null to large files that you plan on removing first as that will reduce the size of the file and the process should recognize that.

A restart of your process should fix your issue.

File removal fails to free up disk space (Also df and du show different output)(1001624.1)

CURRENT Issue Clarification:
removing a file doesn’t clear the space

CURRENT Business Impact:

customer to provide business impact, if any
================
Delta chages for table:
SELECT *
FROM ( SELECT m.table_owner
, m.table_name
, t.last_analyzed
, m.inserts
, m.updates
, m.deletes
, t.num_rows
, ( m.inserts + m.updates + m.deletes ) / CASE WHEN t.num_rows IS NULL OR t.num_rows = 0 THEN 1 ELSE t.num_rows END “Change Factor”
FROM dba_tab_modifications m
, dba_tables t
WHERE t.owner = m.table_owner
AND t.table_name = m.table_name
AND m.inserts + m.updates + m.deletes > 1
AND m.table_owner=’&Enter_Table_Owner’
AND m.table_name like ‘&Enter_Table_Name’
ORDER BY “Change Factor” DESC
)
/
===================== expdp in 12c with pdb ===========
userid=”sunilp/summer#1234@ncmajor” sunilp created with DBA permission and ncmajor in tnsentry
directory=rdb_exp
dumpfile=oaim_major_exp_%U.dmp
logfile=oaim_major_exp.log
schemas=OAIM_MAJOR
exclude=statistics
metrics=y
logtime=all
parallel=4
================ Time between redolog switch=========
select b.recid,
to_char(b.first_time, ‘dd-mon-yy hh:mi:ss’) start_time,
a.recid,
to_char(a.first_time, ‘dd-mon-yy hh:mi:ss’) end_time,
round(((a.first_time-b.first_time)25)60,2) minutes
from v$log_history a, v$log_history b
where a.recid = b.recid + 1
order by a.first_time asc

============== Optimal redo log size =========
SELECT
(SELECT ROUND(AVG(BYTES) / 1024 / 1024, 2) FROM V$LOG) AS “Redo size (MB)”,
ROUND((20 / AVERAGE_PERIOD) * (SELECT AVG(BYTES)
FROM V$LOG) / 1024 / 1024, 2) AS “Recommended Size (MB)”
FROM (SELECT AVG((NEXT_TIME – FIRST_TIME) * 24 * 60) AS AVERAGE_PERIOD
FROM V$ARCHIVED_LOG

WHERE FIRST_TIME > SYSDATE – 300);

** Redo logs size
column REDOLOG_FILE_NAME format a50
SELECT
a.GROUP#,
a.THREAD#,
a.SEQUENCE#,
a.ARCHIVED,
a.STATUS,
b.MEMBER AS REDOLOG_FILE_NAME,
(a.BYTES/1024/1024) AS SIZE_MB
FROM v$log a
JOIN v$logfile b ON a.Group#=b.Group#
ORDER BY a.GROUP# ;
============================ Tablespace Free space =============
select
t1.tablespace_name,
(nvl(t1.Total_size_mb1,0)+nvl(t1.Total_size_mb2,0)+nvl(t1.Total_size_mb3,0)) as tot_size_mb,
(nvl(t1.free_space_mb1,0) + nvl(t2.free_space_mb2,0)) free_size_mb,
round(((nvl(t1.free_space_mb1,0) + nvl(t2.free_space_mb2,0))/(nvl(t1.Total_size_mb1,0)+nvl(t1.Total_size_mb2,0)
+nvl(t1.Total_size_mb3,0)))100)as perecnt_free from ( select tablespace_name, round(sum((case when autoextensible=’YES’ and bytes<=maxbytes then maxbytes else 0 end)/1048576)) as Total_size_mb1, round(sum((case when autoextensible=’YES’ and bytes>maxbytes then bytes else 0 end)/1048576)) as Total_size_mb2, round(sum((case when autoextensible=’NO’ then bytes else 0 end)/1048576)) as Total_size_mb3, round(sum((case when autoextensible=’YES’ and bytes100)<=13 and round(nvl(t1.free_space_mb1,0) + nvl(t2.free_space_mb2,0))<=30000

);

=============================Script to find Table and it’s partition statistics=====================
select
all_part.owner as schema_name,
all_part.table_name,
NVL(all_tab.partition_name,’N/A’),
all_tab.num_rows,
all_tab.last_analyzed
from
dba_part_tables all_part,
dba_tab_partitions all_tab
where all_part.table_name = all_tab.table_name and
all_tab.partition_name = all_tab.partition_name and
all_part.owner=all_tab.table_owner and
all_part.owner =’TCDBPRD’ and all_part.table_name=’CS_MEASUREMENT’

order by all_part.table_name,all_tab.partition_name;

Truncate HTML FILE and generate new html file
On meyicmlvdb1 create a new HTML file which contains only the first 10,000 lines of the SQLT XTRACT report:

sed -n -e ‘1,10000p’ sqlt_s95404_main.html > mini_main.html

====================== Max partition dynamically==============
select
‘exec sys.dbms_stats.gather_table_stats(ownname=> ‘ ||chr(39)||TABLE_owner||chr(39)||’,tabname=> ‘ ||chr(39)||TABLE_NAME||chr(39)||’,partname=> ‘ ||chr(39)||PARTITION_NAME||chr(39)||’,degree=> 8,cascade=>true,force=>true);’
FROM DBA_TAB_partitions WHERE TABLE_OWNER IN (‘ICSPRDADM’) AND table_name in (‘ICS_CR_E_INT_STATUS_RET’)
and
substr(partition_name,25) = to_char(sysdate,’yyyymm’);

====================== Optimized RMAN backups ==================
https://docs.oracle.com/cd/B28359_01/backup.111/b28270/rcmbckad.htm#BRADV89581

===========================RMAN Log data from database=========================
select output from v$rman_output where session_recid >=(select max(session_recid)-4 from v$rman_status) order by recid;

select object_type,mbytes_processed, start_time, end_time,status
from v$rman_status
where session_recid = (select max(session_recid) from v$rman_status)
and operation !=’RMAN’
order by recid;

=============== PGA and SGA usage by hourwise============
select sn.INSTANCE_NUMBER, sga.allo sga, pga.allo pga,(sga.allo+pga.allo) tot,trunc(SN.END_INTERVAL_TIME,’mi’) time
from
(select snap_id,INSTANCE_NUMBER,round(sum(bytes)/1024/1024/1024,3) allo
from DBA_HIST_SGASTAT
group by snap_id,INSTANCE_NUMBER) sga
,(select snap_id,INSTANCE_NUMBER,round(sum(value)/1024/1024/1024,3) allo
from DBA_HIST_PGASTAT where name = ‘total PGA allocated’
group by snap_id,INSTANCE_NUMBER) pga
, dba_hist_snapshot sn
where sn.snap_id=sga.snap_id
and sn.INSTANCE_NUMBER=sga.INSTANCE_NUMBER
and sn.snap_id=pga.snap_id
and sn.INSTANCE_NUMBER=pga.INSTANCE_NUMBER
order by sn.snap_id desc, sn.INSTANCE_NUMBER
;


WITH
pga_data as
(
SELECT /*+ MATERIALIZED */
sample_time,
nvl(sum(ash.pga_allocated/1024/1024),0) AS sum_pga_mb
FROM
dba_hist_active_sess_history ash,
dba_users u
WHERE ash.user_id = u.user_id
AND u.username LIKE ‘&Username’
AND sample_time > SYSDATE-&Days
AND sample_time < SYSDATE GROUP BY action, sample_time ), cal_data AS ( SELECT trunc(SYSDATE, ‘MI’) – (LEVEL/(2460)&Mins) AS date_min, trunc(SYSDATE, ‘MI’) – ((LEVEL-1)/(2460)&Mins) AS date_max FROM dual CONNECT BY LEVEL < (2460&&Days/&Mins)+1 ORDER BY date_min ) SELECT /*+ NO_MERGE(h) NO_MERGE(c) */ to_char(c.date_min, ‘YYYY-MM-DD HH24:MI:SS’) date_min, trunc(nvl(avg(sum_pga_mb),0), 2) avg_pga_mb, trunc(nvl(min(sum_pga_mb),0), 2) min_pga_mb, trunc(nvl(max(sum_pga_mb),0), 2) max_pga_mb FROM pga_data h, cal_data c WHERE h.sample_time (+) >= c.date_min
AND h.sample_time (+) < c.date_max
GROUP BY c.date_min order by 1;

===================== Talespace Name for Indexes related to table ===============
select distinct s.tablespace_name,segment_name from dba_segments s,dba_indexes i where i.table_name=’ICS_CR_T_EXEMPTION’ and i.index_name=s.segment_name and segment_type like ‘INDEX%’;

nohup split -b1024m expdp_schemas_SDE_NE_LB.dmp.gz expdp_schemas_SDE_NE_LB_ & –> Split file expdp_schemas_SDE_NE_LB.dmp.gz into sizes of 1GB each with prefix “expdp_schemas_SDE_NE_LB_”. Original file be intact

===================== cpu utilization in linux ===========
mpstat -P ALL
mpstat 1 5
ps -auxf | sort -nr -k 3 | head -10

============

https://coskan.wordpress.com/2007/09/14/what-i-learned-about-shared-pool-management/
COLUMN component FORMAT A30

SELECT component,
ROUND(current_size/1024/1024) AS current_size_mb,
ROUND(min_size/1024/1024) AS min_size_mb,
ROUND(max_size/1024/1024) AS max_size_mb
FROM v$sga_dynamic_components
WHERE current_size != 0
ORDER BY component;

column oper_type format a12
column component format a25
column parameter format a22
column timed_at format a10

select
to_char(start_time,’hh24:mi:ss’) timed_at,
oper_type,
component,
parameter,
oper_mode,
initial_size,
final_size
from
v$sga_resize_ops
where
start_time between trunc(sysdate-1) and trunc(sysdate)
order by
start_time, component
;

================ BSCS Hanganalyze ================
Generate Hanganalyze :–

sqlplus / as sysdba
oradebug setmypid
oradebug unlimit
oradebug tracefile_name

oradebug hanganalyze 3
–wait 1 minute
oradebug hanganalyze 3
–wait 1 minute
oradebug hanganalyze 3

systemstate dump :-

sqlplus / as sysdba

alter session set max_dump_file_size = unlimited;

oradebug setmypid
oradebug unlimit
oradebug tracefile_name

oradebug dump systemstate 266; ————–> this command will run long and generate large file size. Once file size > 5Gb please try come out of command using ctrl+c and get sid,serial# using serve process to kill session. Please try session kill command repeatedly and it will take some time to close the session.

============================================ Solaris FIlesystem alert based on threshold==============
df -h /oracle|sed 1d
oraclepool 20G 4.5G 15G 23% /oracle
bash-3.2$

for line in df -h /oracle|grep -v 'Mounted'|awk '{print$6"-"$5"-"$4}'
do
echo $line|awk -F- ‘{print$2}’|cut -d % -f 1
done

for fs in $(df -hk | awk ‘{print $6}’ | sed ‘1 d’); do
chk=$(df -hk ${fs} | sed ‘1 d’ | awk ‘{print $5}’ | awk -F\% ‘{print $1}’)
if [ ${chk} -gt 10 ]; then
echo “$(hostname): Alert Fileystem ${fs} is above 10%.”
fi
done

df -h /oracle|sed ‘1d’|grep -v ‘oracle’|awk ‘{print $6 ” ” $5}’|cut -d% -f1
/oracle 23

$ORACLE_HOME/bin/adrci << CMD
set home diag/rdbms/piap1/piap1
set homepath diag/rdbms/piap1/piap1
purge -age 43200 -type ALERT;
purge -age 43200 -type INCIDENT;
purge -age 43200 -type TRACE;
purge -age 43200 -type CDUMP;
purge -age 43200 -type HM;
purge -age 43200 -type UTSCDMP;
CMD
===================================== Modificed DDL Timestamp Check ====================
set pagesize 10000
set linesize 140
column d_name format a20
column p_name format a20
select do.obj# d_obj,do.name d_name, u.name owner, po.obj# p_obj,po.name p_name,
to_char(p_timestamp,’DD-MON-YYYY HH24:MI:SS’) “P_Timestamp”,
to_char(po.stime ,’DD-MON-YYYY HH24:MI:SS’) “STIME”,
decode(sign(po.stime-p_timestamp),0,’SAME’,’DIFFER‘) X
from sys.obj$ do, sys.dependency$ d, sys.obj$ po, sys.user$ u
where P_OBJ#=po.obj#(+)
and D_OBJ#=do.obj#
and do.status=1 /dependent is valid/
and po.status=1 /parent is valid/
and po.stime!=p_timestamp /parent timestamp not match/
and do.owner#=u.user#
and do.type# = 5
order by 2,1;
======================================================== BLOCK CHANGE TRACKING ===========
select COMPLETION_TIME, USED_CHANGE_TRACKING, BLOCKS, BLOCKS_READ
from v$backup_datafile
where file# = 1
order by 1
/

select sum(BLOCKS_READ)/sum(DATAFILE_BLOCKS)
from v$backup_datafile
where USED_CHANGE_TRACKING = ‘YES’
/

===============SQL_BIND_CAPTURE=================
SET PAGESIZE 60
SET LINESIZE 300

COLUMN sql_text FORMAT A120
COLUMN sql_id FORMAT A13
COLUMN bind_name FORMAT A10
COLUMN bind_value FORMAT A26

SELECT
sql_id,
–t.sql_text sql_text,
b.name bind_name,
b.value_string bind_value,
last_captured
FROM
v$sql t
JOIN
v$sql_bind_capture b using (sql_id)
WHERE
b.value_string is not null
AND
sql_id=’&sqlid’ order by last_captured
/

===================== Database Growth ==================
SEt linesize 300 pages 40
compute sum of “Growth in GBytes” on report
break on report
SELECT TO_CHAR(creation_time, ‘RRRR Month’) “Month”,
round(SUM(bytes)/1024/1024/1024) “Growth in GBytes”
FROM sys.v_$datafile
WHERE creation_time >= SYSDATE-365
GROUP BY TO_CHAR(creation_time, ‘RRRR Month’);

alter session set nls_date_format=’YYYY-MON’;
compute sum of “Growth in GBytes” on report
break on report
SELECT trunc(creation_time,’MONTH’),
round(SUM(bytes)/1024/1024/1024) “Growth in GBytes”
FROM sys.v_$datafile
WHERE creation_time >= SYSDATE-365
GROUP BY Trunc(creation_time,’MONTH’) order by Trunc(creation_time, ‘MONTH’);

select sum(bytes)/1024/1024/1024 from dba_free_space;

prompt Report Current Wait Events.

set linesize 150
set verify off
set serveroutput on size 1000000
set feedback off

declare
cursor c_waits is
select w.sid
,decode (s.username,s.schemaname,s.username,s.username||’/’||s.schemaname) user_schema
,w.event
,w.p1, w.p1text, w.p1raw
,w.p2, w.p2text
,w.p3, w.p3text
— ,w.wait_time
,w.seconds_in_wait
,decode (substr (w.state, 1, 7)
,’WAITING’,’Y’
,’WAITED ‘,’N’
,w.state) as state
from v$session_wait w
,v$session s
where w.event <> ‘SQLNet message from client’ and w.event <> ‘SQLNet message to client’
and w.event <> ‘rdbms ipc message’
and w.event <> ‘smon timer’
and w.event <> ‘pmon timer’
and w.sid = s.sid
order by w.p1text, w.p1, w.sid;
str varchar2(500);
s_desc varchar2(500);
n_db_files number default 1024;
begin
dbms_output.put_line (‘Sid User Event W Scnds Details’);
dbms_output.put_line (‘— ———- ——————————- – —– —————————–‘);
for r_wait in c_waits
loop
str := rpad (to_char (r_wait.sid),4);
str := str|| rpad (substr (r_wait.user_schema,1,10),11);
str := str|| rpad (substr (r_wait.event,1,30),31);
str := str|| ‘ ‘||r_wait.state;
str := str|| lpad (r_wait.seconds_in_wait,6);
str := str|| lpad (substr (r_wait.p1text,1,15),16)||’: ‘;
str := str|| ‘(‘||r_wait.p1raw||’)’;
— decode parameter 1 of wait event if we know how
if (r_wait.p1text = ‘files’) then
str := str||’ Files:’||r_wait.p1||’ Blks:’||r_wait.p2||’ Reqs:’||r_wait.p3;
elsif (r_wait.p1text = ‘file#’)
or (r_wait.p1text = ‘file number’) then
begin
select to_number (value)
into n_db_files
from v$parameter
where name = ‘db_files’;
exception
when others then null;
end;
if r_wait.p1 > n_db_files then
begin /* temporary file / select substr (file_name, instr (file_name,’/’,-2) + 1) into s_desc from dba_temp_files where to_char (file_id) = to_char(r_wait.p1 – n_db_files); exception when no_data_found then s_desc := to_char(null); end; else begin / data file */
select substr (file_name, instr (file_name,’/’,-2) + 1)
into s_desc
from dba_data_files
where file_id = r_wait.p1;
exception
when no_data_found then s_desc := to_char(null);
end;
end if;
str := str||’ ”’||s_desc||””;
elsif r_wait.p1text=’name|mode’ then
begin
select chr(bitand(r_wait.p1,-16777216)/16777215) ||
chr(bitand(r_wait.p1,16711680)/65535)||’|’||
bitand (r_wait.p1,65536)
into s_desc
from dual;
exception
when no_data_found then s_desc := to_char(null);
end;
str := str||’ (‘||s_desc||’)’;
end if;
— decode parameter 2 of wait event if we know how
if (r_wait.event = ‘latch free’)
or (r_wait.event = ‘latch activity’) then
begin
select name
into s_desc
from v$latch
where latch# = r_wait.p2;
exception
when no_data_found then s_desc := to_char(null);
end;
str := str||’ ‘||s_desc;
end if;
dbms_output.put_line (str);
end loop;
end;
/
================ ERP concurrent Managers ============
select decode(CONCURRENT_QUEUE_NAME,’FNDICM’,’Internal Manager’,’FNDCRM’,’Conflict Resolution Manager’,’AMSDMIN’,’Marketing Data Mining Manager’,’C_AQCT_SVC’,’C AQCART Service’,’FFTM’,’FastFormula Transaction Manager’,’FNDCPOPP’,
‘Output Post Processor’,’FNDSCH’,’Scheduler/Prereleaser Manager’,’FNDSM_AQHERP’,’Service Manager: AQHERP’,’FTE_TXN_MANAGER’,’Transportation Manager’,’IEU_SH_CS’,’Session History Cleanup’,’IEU_WL_CS’,’UWQ Worklist Items Release for
Crashed session’,’INVMGR’,’Inventory Manager’,’INVTMRPM’,’INV Remote Procedure Manager’,’OAMCOLMGR’,’OAM Metrics Collection Manager’,’PASMGR’,’PA Streamline Manager’,’PODAMGR’,’PO Document Approval Manager’,’RCVOLTM’,’Receiving
Transaction Manager’,’STANDARD’,’Standard Manager’,’WFALSNRSVC’,’Workflow Agent Listener Service’,’WFMLRSVC’,’Workflow Mailer Service’,’WFWSSVC’,’Workflow Document Web Services Service’,’WMSTAMGR’,’WMS Task Archiving
Manager’,’XDP_APPL_SVC’,’SFM Application Monitoring Service’,’XDP_CTRL_SVC’,’SFM Controller Service’,’XDP_Q_EVENT_SVC’,’SFM Event Manager Queue Service’,’XDP_Q_FA_SVC’,’SFM Fulfillment Actions Queue Service’,’XDP_Q_FE_READY_SVC’,’SFM
Fulfillment Element Ready Queue Service’,’XDP_Q_IN_MSG_SVC’,’SFM Inbound Messages Queue Service’,’XDP_Q_ORDER_SVC’,’SFM Order Queue Service’,’XDP_Q_TIMER_SVC’,’SFM Timer Queue Service’,’XDP_Q_WI_SVC’,’SFM Work Item Queue
Service’,’XDP_SMIT_SVC’,’SFM SM Interface Test Service’) as “Concurrent Manager’s Name”, max_processes as “TARGET Processes”, running_processes as “ACTUAL Processes” from apps.fnd_concurrent_queues where CONCURRENT_QUEUE_NAME in
(‘FNDICM’,’FNDCRM’,’AMSDMIN’,’C_AQCT_SVC’,’FFTM’,’FNDCPOPP’,’FNDSCH’,’FNDSM_AQHERP’,’FTE_TXN_MANAGER’,’IEU_SH_CS’,’IEU_WL_CS’,’INVMGR’,’INVTMRPM’,’OAMCOLMGR’,’PASMGR’,’PODAMGR’,’RCVOLTM’,’STANDARD’,’WFALSNRSVC’,’WFMLRSVC’,’WFWSSVC’,’WMST
AMGR’,’XDP_APPL_SVC’,’XDP_CTRL_SVC’,’XDP_Q_EVENT_SVC’,’XDP_Q_FA_SVC’,’XDP_Q_FE_READY_SVC’,’XDP_Q_IN_MSG_SVC’,’XDP_Q_ORDER_SVC’,’XDP_Q_TIMER_SVC’,’XDP_Q_WI_SVC’,’XDP_SMIT_SVC’);

===================== Oracle RAC giving out of memory while starting db troubleshoot ==================
on Linux:
getconf PAGE_SIZE ====> gives HugePage Size
cat /proc/meminfo —> check below entry AnonHugePages. if Huge pages is not used, we have to disbable
anonhugepages
ipcs —> Can give memory currently being used.. need to run this when we have any issue

  1. ipcs
  2. collect strace $strace -f -o /tmp/strace.txt sqlplus /nolog
    SQL> startup nomount
  3. cat /etc/sysctl.conf

================ Recycle Bin Space uSage ==============
select Tablespace_size_MB, free_space_KB, recyclebin_size_KB, Segment_size_KB
from
( select sum(bytes)/1024/1024 Tablespace_size_MB from dba_data_files
where tablespace_name = ‘&&Tablespace_name’),
( select nvl(sum(bytes)/1024,0) free_space_KB from dba_free_space
where tablespace_name = ‘&&Tablespace_name’),
( select nvl(sum(space) * 8,0) recyclebin_size_KB from dba_recyclebin
where ts_name = ‘&&Tablespace_name’),
(select sum(bytes)/1024 Segment_size_KB from dba_segments where tablespace_name = ‘&&Tablespace_name’)
/
======================== Transaction rollback progress ===============
Select decode(sl.totalwork, 0, 0, round(100 * sl.sofar/sl.totalwork, 2)) “Percent”,
sl.message “Message”,
sl.start_time,
sl.elapsed_seconds,
sl.time_remaining,
s.username, s.machine, s.program
from v$Session_longops sl, v$session s
where sl.sid = s.sid
and sl.serial# = s.serial#
and (sl.totalwork > sl.sofar)
ORDER BY s.SID;

SELECT usn, state, undoblockstotal “Total”,
undoblocksdone “Done”,
undoblockstotal-undoblocksdone “ToDo”,
DECODE(cputime,0,’unknown’,SYSDATE+(((undoblockstotal-undoblocksdone) / (undoblocksdone / cputime)) / 86400)) “Finish at”
FROM v$fast_start_transactions;


set serveroutput on
DECLARE
type t_undoblocks is table of number index by varchar2(100);
type t_ublk is table of number index by varchar2(100);
v_undoblocks t_undoblocks;
v_ublk t_ublk;
v_eta number;
v_sleep number := 3;
BEGIN
for r in (SELECT cast(b.XID as varchar2(100)) xid, b.used_urec FROM v$transaction b)
LOOP
v_ublk(r.xid) := r.used_urec;
end loop;
dbms_output.put_line(‘Checking if SMON is recovering any transactions’);
for r in (select cast(XID as varchar2(100)) xid, state,undoblocksdone,undoblockstotal,RCVSERVERS from V$FAST_START_TRANSACTIONS where state<>’RECOVERED’)
LOOP
v_undoblocks(r.xid) := r.undoblocksdone;
dbms_output.put_line(rpad(‘TransactionID’,25) || rpad(‘state’,15) || rpad(‘recover_servers’,20) || rpad(‘undo_blocks_total’,20) || rpad(‘undo_blocks_done’,20));
dbms_output.put_line(rpad(r.XID,25) || rpad(r.state,25) || rpad(to_char(r.RCVSERVERS),20) || rpad(to_char(r.undoblockstotal),20) || rpad(to_char(r.undoblocksdone),20));
end loop;

dbms_output.put_line(chr(10) ||’Sleep ‘||v_sleep||’ seconds to check again…’);
dbms_lock.sleep(v_sleep);

for r in (select cast(XID as varchar2(100)) xid, state,undoblocksdone,undoblockstotal,RCVSERVERS from V$FAST_START_TRANSACTIONS where state<>’RECOVERED’)
LOOP
if v_undoblocks.exists(r.xid) then
if r.undoblocksdone > v_undoblocks(r.xid) then
v_eta := round((r.undoblockstotal-r.undoblocksdone)*v_sleep/60/(r.undoblocksdone-v_undoblocks(r.xid)),1);
dbms_output.put_line(‘SMON is rolling back ‘||r.xid||’…’||r.undoblocksdone||’ out of ‘||r.undoblockstotal||’ blocks are done…ETA
is ‘||v_eta||’ minutes’);
else
dbms_output.put_line(‘SMON is rolling back ‘||r.xid||’…’||r.undoblocksdone||’ out of ‘||r.undoblockstotal||’ blocks are done…ETA
is unknown, pls try again’);
end if;
end if;
end loop;

dbms_output.put_line(chr(10) ||’Checking if any transaction is rolling back by itself’);
for r in (SELECT a.sid, cast(b.XID as varchar2(100)) xid, b.used_urec FROM v$session a, v$transaction b WHERE a.saddr = b.ses_addr)
LOOP
if v_ublk.exists(r.xid) then
if v_ublk(r.xid) > r.used_urec THEN
v_eta := round(r.used_urec * v_sleep/60/(v_ublk(r.xid) – r.used_urec), 1);
dbms_output.put_line(‘SID,XID : ‘||r.sid||’,’||r.xid||’ is rolling back…’||r.used_urec||’ blocks to go…ETA is ‘||v_eta||’ minutes’);
end if;
end if;
end loop;
end;
/

========================
SQL> select value – count(file_id) “No.of Files Can be Added”,(value – count(file_id))/value100 “Free % Available” from v$parameter,dba_data_files where name=’db_files’ having (value – count(file_id))/value100 >10 group by value;

No.of Files Can be Added Free % Available


                 294       28.7109375

SQL> select filestoadd,percent_free from (select value – count(file_id) filestoadd,(value – count(file_id))/value*100 percent_free from v$parameter,dba_data_files where name=’db_files’ group by value) T where percent_free>10;

FILESTOADD PERCENT_FREE


   294   28.7109375

select ‘Critical: Current number of DB files (‘|| files_to_add ||’) is less than or equal to 15% (‘|| trunc(percent_free) ||’% ) of configured DB_FILES (‘||value||’).
Please contact DBA’ from (select value – count(file_id) files_to_add,(value – count(file_id))/value*100 percent_free,value from v$parameter,dba_data_files where name=’db_files’ group by value)

where percent_free <=15;

CREATE OR REPLACE TRIGGER SYS.TRACE_EMCDU
AFTER LOGON
ON DATABASE
DECLARE

  v_sid       NUMBER(10);
  v_serial    NUMBER(10);
  v_username  VARCHAR2(30);
  v_osuser    VARCHAR2(30);
  v_PROXYUSER VARCHAR2(30);
  v_IPADDR    VARCHAR2(20);
  V_machine   VARCHAR2(64);
  V_terminal  VARCHAR2(30);
  v_program   VARCHAR2(48);
  v_module    VARCHAR2(48);
  v_status    VARCHAR2(10);
  v_count     NUMBER(10);


CURSOR cur_logon IS
  SELECT SID,
     serial#,
     upper(USER),
     upper(osuser),
     SYS_CONTEXT('userenv','proxy_user'),
     SYS_CONTEXT ('userenv','IP_address'),
     upper(machine),
     upper(terminal),
     upper(program),
     upper(MODULE)
    FROM v$session
  WHERE audsid = USERENV('sessionid');

BEGIN

  v_status := 'ACCEPT';
  v_count  := 0;

  OPEN cur_logon;
  FETCH cur_logon INTO
    v_sid,
    v_serial,
    v_username,
      V_osuser,
    v_proxyuser,
    v_ipaddr,
    v_machine,
    v_terminal,
    v_program,
    v_module;

  IF
(upper(V_osuser)='JBPRDADMN' AND  upper(v_machine)='MEYLVJB01' AND  upper(v_username)='EMCDU')
   THEN
      EXECUTE IMMEDIATE 'ALTER SESSION SET EVENTS ''10046 trace name context forever, level 12''';
      EXECUTE IMMEDIATE 'alter session set max_dump_file_size=unlimited';
      INSERT INTO EMCDU_AUDIT VALUES(v_sid,v_serial,v_username);
  END IF;

CLOSE cur_logon;

END;

/

ALTER TRIGGER SYS.TRACE_EMCDU DISABLE;
======================================== Apigee Script =================
[cbdik7bq@meylvaedb06 ~]$ cat apigee_status.sh

set -x

export apigee_ser=””
export apigee_stat=””
/opt/apigee/apigee-service/bin/apigee-all status |& tee apigee-status.txt
sed -i -e ‘s/ //gi;/^+/d’ apigee-status.txt

echo “Contents of file …. “

cat apigee-status.txt

for i in cat apigee-status.txt
do
apigee_ser=echo $i|cut -d: -f 2
apigee_stat=echo $i|cut -d: -f 3
if [ “${apigee_stat}” != “OK” ]; then
echo “Apigee Service “${apigee_ser}” Is Down….Bringing it up” date
/opt/apigee/apigee-service-4.17.01-0.0.535/bin/apigee-service ${apigee_ser} status
/opt/apigee/apigee-service-4.17.01-0.0.535/bin/apigee-service ${apigee_ser} start
/opt/apigee/apigee-service-4.17.01-0.0.535/bin/apigee-service ${apigee_ser} status
fi
done

======================================
CREATE OR REPLACE TRIGGER USER_TRACE_TRG
AFTER LOGON ON DATABASE
BEGIN
IF USER = ‘&USER_ID’
THEN
— execute immediate ‘alter session set events ”10046 trace name context forever, level 12”’;
— execute immediate ‘alter session set events ”10046 level 1”; — 11g onwatds simplier syntax is available.
— execute immediate ‘alter session set events ”8103 trace name errorstack level 3”’;
— execute immediate ‘alter session set events ”10236 trace name context forever, level 1”’;
— execute immediate ‘alter session set max_dump_file_size=”UNLIMITED”’;
— execute immediate ‘alter session set db_file_multiblock_read_count=1’;
— execute immediate ‘alter session set tracefile_identifier=”ORA8103”’;
END IF;
EXCEPTION
WHEN OTHERS THEN
NULL;
END;
/

Catalog and Catproc – How to find what Objects are keeping them Invalid in the Registry (dba_registry) (Doc ID 578841.1)

set heading off;
set echo off;
Set pages 999;
set long 90000;
spool ddl_list.sql
select dbms_metadata.get_ddl(‘TABLESPACE’,tb.tablespace_name) from dba_tablespaces tb;
spool off
================== DDL of reference key constraints ===========
SET LONG 20000 LONGCHUNKSIZE 20000 PAGESIZE 0 LINESIZE 1000 FEEDBACK OFF VERIFY OFF TRIMSPOOL ON

BEGIN
DBMS_METADATA.set_transform_param (DBMS_METADATA.session_transform, ‘SQLTERMINATOR’, true);
DBMS_METADATA.set_transform_param (DBMS_METADATA.session_transform, ‘PRETTY’, true);
END;
/
spool ref_constraints.log;
SELECT DBMS_METADATA.get_ddl (‘REF_CONSTRAINT’, ac1.constraint_name, ac1.owner)
FROM dba_constraints ac1
JOIN all_constraints ac2 ON ac1.r_owner = ac2.owner AND ac1.r_constraint_name = ac2.constraint_name
WHERE ac2.owner = UPPER(‘&1’)
AND ac2.table_name = UPPER(‘&2’)
AND ac2.constraint_type IN (‘P’,’U’)
AND ac1.constraint_type = ‘R’;
spool off;
SET PAGESIZE 14 LINESIZE 100 FEEDBACK ON VERIFY ON

SELECT DBMS_METADATA.get_ddl (‘REF_CONSTRAINT’, ac1.constraint_name, ac1.owner)
FROM dba_constraints ac1
WHERE ac1.r_owner = ‘SYSADM’
AND ac1.R_CONSTRAINT_NAME=’PKCUSTOMER_ALL’

;

http://www.oaktable.net/category/tags/arup-nanda-ace-oracle-materialized-view-alter-modify-prebuilt-table –> Altering columns of prebuilt Materialized View

============ To bring all databases on the server =================

bash-3.2$ cat dbshut_sunil.sh

!/bin/ksh

ORACLE_HOME=/oracle/product/10.2.0; export ORACLE_HOME
PATH=$PATH:$ORACLE_HOME/bin; export PATH
for i in ps -ef|grep pmon|grep -v grep|awk '{print $9}'|cut -f3 -d _
do
ORACLE_SID=$i; export ORACLE_SID
/oracle/product/10.2.0/bin/sqlplus -s /nolog <<EOF
connect / as sysdba
select name,open_mode from v\$database;
shut immediate;
EOF

done

create table invalid_obj2 as
(select owner,object_name,object_type,status from dba_objects where status=’INVALID’
minus
select owner,object_name,object_type,status from objects_may2019 where status=’INVALID’);

select ‘alter ‘||object_type||’ ‘||owner||’.’||object_name||
‘ compile;’
from sys.invalid_obj2
where status=’INVALID’ and
object_type <> ‘PACKAGE BODY’
union
select ‘alter package ‘||owner||’.’||object_name||’ compile body;’
from sys.invalid_obj2
where status=’INVALID’ and
object_type = ‘PACKAGE BODY’;

column object_name format a30
column tablespace_name format a30
column object_type format a20
column status format a1

break on object_type skip 1

select object_type, object_name,
decode( status, ‘INVALID’, ‘*’, ” ) status,
decode( object_type,
‘TABLE’,
(select tablespace_name
from user_tables
where table_name = object_name),
‘TABLE PARTITION’,
(select tablespace_name
from user_tab_partitions
where partition_name = subobject_name),
‘INDEX’,
(select tablespace_name
from user_indexes
where index_name = object_name),
‘INDEX PARTITION’,
(select tablespace_name
from user_ind_partitions
where partition_name = subobject_name),
‘LOB’,
(select tablespace_name
from user_segments
where segment_name = object_name),
null ) tablespace_name
from user_objects a
order by object_type, object_name

/

col STATUS format a30;
col INPUT_BYTES format 999999;
col OUTPUT_BYTES format 999999;
col END_TIME format a20;
col START_TIME format a20;
Set pagesize 1000
set linesize 300
select SESSION_KEY, INPUT_TYPE, STATUS,to_char(START_TIME,’mm/dd/yy hh24:mi’) start_time,to_char(END_TIME,’mm/dd/yy hh24:mi’) end_time,
round(elapsed_seconds/(60*60)) hrs, STATUS,round(INPUT_BYTES/1024/1024/1024) INPUT_GB,round(OUTPUT_BYTES/1024/1024/1024) OUTPUT_GB from V$RMAN_BACKUP_JOB_DETAILS
where START_TIME >= to_timestamp(‘04.10.2018:01:00:00′,’DD.MM.YYYY:HH24:MI:SS’) and INPUT_TYPE not in (‘ARCHIVELOG’) order by session_key;

============ ACMCMDDU.sh============

!/bin/bash

#

du of each subdirectory in a directory for ASM

#
D=$1

if [[ -z $D ]]
then
echo “Please provide a directory !”
exit 1
fi

(for DIR in asmcmd ls ${D}
do
echo ${DIR} asmcmd du ${D}/${DIR} | tail -1
done) | awk -v D=”$D” ‘ BEGIN { printf(“\n\t\t%40s\n\n”, D ” subdirectories size”) ;
printf(“%25s%16s%16s\n”, “Subdir”, “Used MB”, “Mirror MB”) ;
printf(“%25s%16s%16s\n”, “——“, “——-“, “———“) ;}
{
printf(“%25s%16s%16s\n”, $1, $2, $3) ;
use += $2 ;
mir += $3 ;
}
END { printf(“\n\n%25s%16s%16s\n”, “——“, “——-“, “———“);

printf(“%25s%16s%16s\n\n”, “Total”, use, mir) ;} ‘

-bash-4.2$ cat db_audit.sh

!/bin/bash

SID=ps -ef | grep pmon | grep -v grep | cut -d_ -f3 | grep -v "+" | grep -v "-" | head -1

SID=ps -ef | grep pmon | grep -v grep | cut -d_ -f3 | sort | grep RWDB

flavor=uname -s

if [ $flavor == ‘Linux’ ]

then

export ORACLE_HOME=cat /etc/oratab|grep -v "^#" | grep $SID |cut -d: -f2| uniq

#

else

export ORACLE_HOME=cat /var/opt/oracle/oratab|grep -v "^#"|grep $SID|cut -d: -f2|uniq

#

fi

export PATH=$ORACLE_HOME/bin:$ORACLE_HOME/OPatch:$PATH
touch b1.sql

db_audit_more_user.html
echo ” alter session set nls_date_format= ‘dd-mon-yy hh24:mi:ss’;
set lines 180 pages 20000;
set feedback on;
set termout off;
col username for a10
col machine for a10
col program for a35
col event for a35
col logon_time for a30
col file_name for a40
col profile for a30
col limit for a30
set markup HTML ON SPOOL ON ENTMAP ON PREFORMAT OFF
spool db_audit_more_user.html
col IS_PUBLIC for a10
set echo on;
show user
select * from gv\$instance;
select NAME,CREATED,LOG_MODE,open_mode from v\$database;
select * from gv\$version;
select INST_ID,GROUP_NUMBER,DISK_NUMBER,path,NAME from gv\$asm_disk where name is not null order by 1,4;
select INST_ID,GROUP_NUMBER,NAME,STATE,TYPE,TOTAL_MB,FREE_MB,USABLE_FILE_MB,VOTING_FILES from gv\$asm_diskgroup where name is not null order by 1,3;
select * from Gv\$CONFIGURED_INTERCONNECTS;
select * from Gv\$cluster_interconnects;
select name from v\$controlfile;
select * from v\$log;
select member from v\$logfile;
select inst_id,username,status,machine,program,last_call_et,to_char(logon_time,’dd-mm-yy hh24:mi’),count() from gv\$session where username is not null group by inst_id,username,status,machine,program,last_call_et,to_char(logon_time,’dd-mm-yy hh24:mi’); select inst_id,username,machine,program,module,status,count() from gv\$session group by inst_id,username,machine,program,module,status order by 2 desc;
select username,inst_id,count(*) from gv\$session where username is not null group by username,inst_id;
select distinct server from v\$session;
select s1.inst_id,l1.sid,s1.serial#,’ IS BLOCKING ‘,s2.inst_id,l2.sid,s2.serial#
from gv\$lock l1, gv\$lock l2, gv\$session s1,gv\$session s2
where l1.block >0 and l2.request > 0
and l1.id1 = l2.id1
and l1.id2 = l2.id2
and l1.sid = s1.sid
and l1.inst_id = s1.inst_id
and l2.sid = s2.sid
and l2.inst_id = s2.inst_id
order by l1.inst_id;
select * from gv\$resource_limit;
select profile,resource_name,limit from dba_profiles order by profile;
select username,default_tablespace,temporary_tablespace,profile from dba_users;
SELECT job_name,STATE FROM DBA_SCHEDULER_JOBS WHERE JOB_NAME = ‘GATHER_STATS_JOB’;
SELECT client_name, status FROM DBA_AUTOTASK_CLIENT;
select job, last_date, last_sec, next_date, next_sec, this_date, this_sec,to_char(next_date,’dd/mm/yyyy hh24:mi:ss’),broken, failures, interval, what,INSTANCE from dba_jobs;
archive log list;
show parameter dump
show parameter spfile
show parameter pfile
show parameter flash
show parameter db_recovery
show parameter sga
show parameter pga
show parameter streams
show parameter db
show parameter retention
show parameter job_queue_processes
show parameter dump
show parameter shared
show parameter log
show parameter undo
show parameter _fix
show parameter remote
show parameter local
show parameter target
show parameter audit
show parameter control
select name,VALUE,DESCRIPTION from v\$parameter where ISDEFAULT=’FALSE’;
select name,bytes/1024/1024,status from v\$datafile;
select name,bytes/1024/1024,status from v\$tempfile;
select sum(bytes)/1024/1024 “sum_sga” from v\$sgastat;
select sum(bytes)/1024/1024 from dba_data_files;
select sum(bytes)/1024/1024 from dba_temp_files;
select sum(bytes)/1024/1024 from dba_free_space;
select tablespace_name,file_name from dba_data_files where file_id in(select file# from v\$recover_file);
select name,status from v\$datafile where status not in(‘ONLINE’,’SYSTEM’);
select owner,SEGMENT_NAME,TABLESPACE_NAME,initial_extent,next_extent,STATUS from dba_rollback_segs;
select TABLESPACE_NAME,BLOCK_SIZE,contents,EXTENT_MANAGEMENT,SEGMENT_SPACE_MANAGEMENT,status from dba_tablespaces;
select substr(tablespace_name,1,40), file_id, file_name, round(bytes/(10241024),0) total_space,AUTOEXTENSIBLE,status, (maxbytes)/1024/1024 from dba_data_files order by tablespace_name; SELECT (SELECT tablespace_name FROM dba_tablespaces WHERE tablespace_name = b.tablespace_name ) name, ROUND(kbytes_alloc/1024, 2) size_mb , ROUND((kbytes_alloc-NVL(kbytes_free,0))/1024, 2) used_mb , ROUND(NVL(kbytes_free,0)/1024, 2) free_mb, ROUND(((kbytes_alloc-NVL(kbytes_free,0))/ kbytes_alloc)100, 2) pct_used ,
–ROUND(NVL(largest,0)/1024, 2) largest ,
ROUND(NVL(kbytes_max,kbytes_alloc)/1024, 2) maxsize_mb ,
ROUND(DECODE(kbytes_max,0,0,((kbytes_alloc-NVL(kbytes_free,0))/kbytes_max)100),2) pct_max_used , (SELECT extent_management FROM dba_tablespaces WHERE tablespace_name = b.tablespace_name ) extent_management , (SELECT segment_space_management FROM dba_tablespaces WHERE tablespace_name = b.tablespace_name ) segment_space_management FROM (SELECT SUM(bytes)/1024 Kbytes_free, MAX(bytes)/1024 largest, tablespace_name From Sys.Dba_Free_Space where tablespace_name not in (‘TEMP’) Group By Tablespace_Name Union Select (free_blocksVALUE)/1024 Kbytes_Free,
null largest,
Tablespace_Name
From v\$sort_Segment,v\$parameter
where name=’db_block_size’
) a ,
(SELECT SUM(bytes)/1024 Kbytes_alloc,
SUM(DECODE(MAXBYTES,0,BYTES,maxbytes))/1024 Kbytes_max,
tablespace_name
FROM sys.dba_data_files
GROUP BY tablespace_name
UNION all
select SUM(bytes)/1024 Kbytes_alloc,
SUM(DECODE(MAXBYTES,0,BYTES,maxbytes))/1024 Kbytes_max,
tablespace_name
FROM sys.dba_temp_files
GROUP BY tablespace_name
)b
Where A.Tablespace_Name (+) = B.Tablespace_Name
ORDER BY 2 desc;
select file_name,tot_allocated,current_use,tot_allocated-current_use total_free from
(select file_id,max(block_id+blocks+1)8192/(10241024) current_use from dba_extents group by file_id) a,
(select file_id,file_name,bytes/(1024*1024) tot_allocated from dba_data_files) b
where a.file_id=b.file_id order by tot_allocated-current_use;

select owner,table_name,initial_extent,next_extent,last_analyzed,tablespace_name from dba_tables
where owner not in (‘SYS’,’SYSTEM’,’DBSNMP’,’SYSMAN’,’PERFSTAT’,’TSMSYS’,’OUTLN’,’RWMONITOR’,’ORACLE_OCM’,’DIP’,’XDB’,’APPQOSSYS’,’GSMADMIN_INTERNAL’,’WMSYS’,’AUDSYS’);

select owner,index_name,initial_extent,next_extent,last_analyzed,tablespace_name from dba_indexes where
owner not in (‘SYS’,’SYSTEM’,’DBSNMP’,’SYSMAN’,’PERFSTAT’,’TSMSYS’,’OUTLN’,’RWMONITOR’,’ORACLE_OCM’,’DIP’,’XDB’,’APPQOSSYS’,’GSMADMIN_INTERNAL’,’WMSYS’,’AUDSYS’);

select owner,table_name,tablespace_name from dba_tables where tablespace_name=’SYSTEM’ and
owner not in (‘SYS’,’SYSTEM’,’DBSNMP’,’SYSMAN’,’PERFSTAT’,’TSMSYS’,’OUTLN’,’RWMONITOR’,’ORACLE_OCM’,’DIP’,’XDB’,’APPQOSSYS’,’GSMADMIN_INTERNAL’,’WMSYS’,’AUDSYS’);

select owner,index_name,tablespace_name from dba_indexes where tablespace_name=’SYSTEM’ and
owner not in (‘SYS’,’SYSTEM’,’DBSNMP’,’SYSMAN’,’PERFSTAT’,’TSMSYS’,’OUTLN’,’RWMONITOR’,’ORACLE_OCM’,’DIP’,’XDB’,’APPQOSSYS’,’GSMADMIN_INTERNAL’,’WMSYS’,’AUDSYS’);
select owner,object_name,object_type,status from dba_objects where status =’INVALID’ order by 1;

select owner,segment_name,SEGMENT_TYPE,TABLESPACE_NAME,sum(bytes)/1024/1024 size_in_mb from dba_segments where
owner not in (‘SYS’,’SYSTEM’,’DBSNMP’,’SYSMAN’,’PERFSTAT’,’TSMSYS’,’OUTLN’,’RWMONITOR’,’ORACLE_OCM’,’DIP’,’XDB’,’APPQOSSYS’,’GSMADMIN_INTERNAL’,’WMSYS’,’AUDSYS’) group by owner,segment_name,SEGMENT_TYPE,TABLESPACE_NAME
order by 5;

select t.owner owner,t.table_name table_name,t.tablespace_name tablespace,
t.last_analyzed last_analyzed,round((t.NUM_ROWSt.AVG_ROW_LEN)/1024/1024) USED_MB, round((st.bytes)/1024/1024) ALLOC_MB, round((st.bytes)/1024/1024 – (t.NUM_ROWSt.AVG_ROW_LEN)/1024/1024 ) frg_mb,
(((1 – ROUND (( ((t.NUM_ROWS*t.AVG_ROW_LEN)/1024/1024) / round((st.bytes)/1024/1024)), 2)) * 100)) frg_pct
FROM dba_tables t,dba_segments st
WHERE t.owner like ‘%’ and t.partitioned=’NO’
and t.owner=st.owner and t.table_name=st.segment_name
and round((st.bytes)/1024/1024) > 10
AND ( round((st.bytes)/1024/1024 – (t.NUM_ROWSt.AVG_ROW_LEN)/1024/1024 ) > 50 AND (((1 – ROUND ((round((t.NUM_ROWSt.AVG_ROW_LEN)/1024/1024) / round((st.bytes)/1024/1024)), 2)) * 100)) > 20 )
ORDER BY frg_pct desc;

select owner,tablespace_name,sum(bytes)/1024/1024 from dba_segments group by owner,tablespace_name order by 3;

select index_owner,table_name,INDEX_NAME,COLUMN_EXPRESSION from dba_ind_expressions ;

select owner,TABLESPACE_NAME,table_name,AVG_ROW_LEN,NUM_ROWS,LAST_ANALYZED,PARTITIONED,IOT_TYPE,
TEMPORARY,BUFFER_POOL,ROW_MOVEMENT from dba_tables where num_rows >0 and
owner not in (‘SYS’,’SYSTEM’,’DBSNMP’,’SYSMAN’,’PERFSTAT’,’TSMSYS’,’OUTLN’,’RWMONITOR’,’ORACLE_OCM’,’DIP’,’XDB’,’APPQOSSYS’,’GSMADMIN_INTERNAL’,’WMSYS’,’AUDSYS’) order by 1,2,3 ;

select owner,TABLESPACE_NAME,TABLE_NAME,INDEX_NAME,INDEX_TYPE,NUM_ROWS,BLEVEL,CLUSTERING_FACTOR,
LAST_ANALYZED,PARTITIONED,TEMPORARY,BUFFER_POOL,AVG_LEAF_BLOCKS_PER_KEY ,AVG_DATA_BLOCKS_PER_KEY
from dba_indexes where owner not in (‘SYS’,’SYSTEM’,’DBSNMP’,’SYSMAN’,’PERFSTAT’,’TSMSYS’,’OUTLN’,’RWMONITOR’,’ORACLE_OCM’,’DIP’,’XDB’,’APPQOSSYS’,’GSMADMIN_INTERNAL’,’WMSYS’,’AUDSYS’) order by 1,2,3,4;

SELECT t.owner,t.table_name, i.index_name, t.blocks, t.num_rows, i.clustering_factor FROM dba_tables t, dba_indexes i
WHERE t.table_name = i.table_name and t.owner not in (‘SYS’,’SYSTEM’,’DBSNMP’,’SYSMAN’,’PERFSTAT’,’TSMSYS’,’OUTLN’,’RWMONITOR’,’ORACLE_OCM’,’DIP’,’XDB’,’APPQOSSYS’,’GSMADMIN_INTERNAL’,’WMSYS’,’AUDSYS’) order by 1,2 ;

select * from (select distinct s.sid, w.event, w.wait_time, w.seq#, q.sql_text
from v\$session_wait w, v\$session s, v\$process p, v\$sqlarea q
where s.paddr=p.addr and
s.sql_address=q.address and
s.sid in (select ss.sid from v\$sesstat ss, v\$session se where ss.statistic# in
(select statistic# from v\$statname where name = ‘CPU used by this session’) and
ss.value >100000 and se.sid=ss.sid)) where rownum <21;

SELECT *
FROM (SELECT s.sid,s.serial#,s.username,Substr(a.sql_text,1,300) sql_text,TO_CHAR(s.logon_Time,’DD-MON-YYYY HH24:MI:SS’) AS logon_time
,Trunc(a.disk_reads/Decode(a.executions,0,1,a.executions)) reads_per_execution,
a.buffer_gets,
a.disk_reads,
a.executions,
a.sorts,
a.address
FROM v\$sqlarea a,v\$session s where
s.sql_address=a.address
ORDER BY 6 DESC)
WHERE rownum <= 10;

Select a.execution_end, b.type, b.impact, d.rank, d.type, ‘Message : ‘||b.message MESSAGE,
‘Command To correct: ‘||c.command COMMAND,
‘Action Message : ‘||c.message ACTION_MESSAGE From dba_advisor_tasks a,
dba_advisor_findings b,Dba_advisor_actions c, dba_advisor_recommendations d
Where a.owner=b.owner and a.task_id=b.task_id And b.task_id=d.task_id
and b.finding_id=d.finding_id And a.task_id=c.task_id and d.rec_id=c.rec_Id
And a.task_name like ‘ADDM%’ and a.execution_end > sysdate -2 Order by b.impact, d.rank;

select round(((1-(sum(decode(name,
‘physical reads’, value,0))/
(sum(decode(name, ‘db block gets’, value,0))+
(sum(decode(name, ‘consistent gets’,
value, 0))))))
*100),2) || ‘%’ \”Buffer Cache Hit Ratio\”
from v\$sysstat;

select 100*sum(pins-reloads)/sum(pins) lc_hit_ratio from v\$librarycache;

select 100*sum(gets-getmisses-usage-fixed)/sum(gets) dc_ht_ratio from v\$rowcache;

select name,value from v\$sysstat where name=’redo buffer allocation retries’;

select * from (
select a.sql_text,a.first_load_time,a.disk_reads,a.executions,a.hash_value,a.cpu_time,a.elapsed_time,
b.sid,b.serial#,b.username,b.status,to_char(b.logon_time,’dd-mm-yyyy hh24:mi:ss’),b.process,b.machine,
b.terminal,b.program,b.module,b.server
from v\$sqlarea a,v\$session b where a.address=b.sql_address
and a.cpu_time >10000000 order by cpu_time desc) where rownum <21;

select nvl(S.USERNAME,’Internal’) username,
nvl(S.TERMINAL,’None’) terminal,
L.SID||’,’||S.SERIAL# Kill,
U1.NAME||’.’||substr(T1.NAME,1,20) tab, q.sql_text,to_char(S.logon_time,’dd-mm-yyyy hh24:mi:ss’),
decode(L.LMODE,1,’No Lock’,
2,’Row Share’,
3,’Row Exclusive’,
4,’Share’,
5,’Share Row Exclusive’,
6,’Exclusive’,null) lmode,
decode(L.REQUEST,1,’No Lock’,
2,’Row Share’,
3,’Row Exclusive’,
4,’Share’,
5,’Share Row Exclusive’,
6,’Exclusive’,null) request
from v\$LOCK L,
v\$SESSION S, v\$sqlarea q,
SYS.USER$ U1,
SYS.OBJ$ T1
where L.SID = S.SID
and S.sql_address=q.address
and T1.OBJ# = decode(L.ID2,0,L.ID1,L.ID2)
and U1.USER# = T1.OWNER#
and S.TYPE != ‘BACKGROUND’
order by 1,2,5 ;
select sum(bytes/1024/1024),STATUS from dba_undo_extents GROUP BY STATUS;
select file_name,phyrds,phywrts,readtim,writetim from v\$filestat a,dba_data_files b where a.file#=b.file_id order by 2;
select file_name,phyrds,phywrts,readtim,writetim from v\$filestat a,dba_temp_files b where a.file#(+)=b.file_id order by 2;

SET LONG 20000 LONGCHUNKSIZE 20000 PAGESIZE 0 LINESIZE 1000 FEEDBACK OFF VERIFY OFF TRIMSPOOL ON

BEGIN
DBMS_METADATA.set_transform_param (DBMS_METADATA.session_transform, ‘SQLTERMINATOR’, true);
DBMS_METADATA.set_transform_param (DBMS_METADATA.session_transform, ‘PRETTY’, true);
END;
/

SELECT DBMS_METADATA.get_ddl (‘TABLESPACE’, tablespace_name) FROM dba_tablespaces;
spool off;
exit; ” > b1.sql
sqlplus / as sysdba @b1.sql
====================== DB Audit Purge (Roamware) =============
-bash-4.2$ cat /oracle/app/alerts/db_audit_purge.sql
spool /oracle/app/alerts/spool_purge_audit.txt
host date
set linesize 300
set echo on
set pagesize 1000
col PARAMETER_VALUE FOR a15
col AUDIT_TRAIL FOR a20
set time on
set timing on
show parameter instance_name
Alter session set current_schema=SYS;
select sysdate,sysdate-90 from dual;
select min(EVENT_TIMESTAMP) from unified_audit_trail;
SELECT * FROM dba_audit_mgmt_last_arch_ts where AUDIT_TRAIL=’UNIFIED AUDIT TRAIL’;

— Set Archive Policy of Number of Days data need to keep
BEGIN
DBMS_AUDIT_MGMT.SET_LAST_ARCHIVE_TIMESTAMP(
audit_trail_type => DBMS_AUDIT_MGMT.AUDIT_TRAIL_UNIFIED,
last_archive_time =>SYSDATE-1,
CONTAINER => DBMS_AUDIT_MGMT.CONTAINER_CURRENT);
END;
/

— Delete actual Audit records from database using below query
BEGIN
DBMS_AUDIT_MGMT.CLEAN_AUDIT_TRAIL(
audit_trail_type=> DBMS_AUDIT_MGMT.AUDIT_TRAIL_UNIFIED,
use_last_arch_timestamp => TRUE);
END;
/

select min(EVENT_TIMESTAMP) from unified_audit_trail;

Prompt ” End UNIFIED AUDIT TRAIL TABLE QUERY “
— *

— Check Configuration
col PARAMETER_NAME format a30;
col PARAMETER_VALUE format a30;
SELECT * FROM dba_audit_mgmt_config_params ;

— *

— Delete FGA records
— Set Archive Policy of Number of Days data need to keep
/

— *
— Delete OS Audit Files
BEGIN
DBMS_AUDIT_MGMT.SET_LAST_ARCHIVE_TIMESTAMP(
audit_trail_type => DBMS_AUDIT_MGMT.AUDIT_TRAIL_OS,
last_archive_time => sysdate-5,
rac_instance_number => 1);
end;
/

— OS Audit File removal
BEGIN
DBMS_AUDIT_MGMT.SET_LAST_ARCHIVE_TIMESTAMP(
audit_trail_type => DBMS_AUDIT_MGMT.AUDIT_TRAIL_OS,
last_archive_time => sysdate-5,
rac_instance_number => 2);
end;
/

— Delete actual Audit records from database using below query
BEGIN
DBMS_AUDIT_MGMT.CLEAN_AUDIT_TRAIL(
audit_trail_type => DBMS_AUDIT_MGMT.AUDIT_TRAIL_OS,
use_last_arch_timestamp => TRUE);
END;
/

— *
— Delete STD records
— Set Archive Policy of Number of Days data need to keep
— *
BEGIN
SYS.DBMS_AUDIT_MGMT.SET_LAST_ARCHIVE_TIMESTAMP(
audit_trail_type => DBMS_AUDIT_MGMT.AUDIT_TRAIL_AUD_STD,
last_archive_time => (sysdate – 5),
rac_instance_number => 1);
END;
/

BEGIN
SYS.DBMS_AUDIT_MGMT.SET_LAST_ARCHIVE_TIMESTAMP(
audit_trail_type => DBMS_AUDIT_MGMT.AUDIT_TRAIL_FGA_STD,
last_archive_time => (sysdate – 5),
rac_instance_number => 2);
END;
/

— purge DB audit logs
BEGIN
SYS.DBMS_AUDIT_MGMT.CLEAN_AUDIT_TRAIL(
audit_trail_type => dbms_audit_mgmt.AUDIT_TRAIL_DB_STD,
use_last_arch_timestamp => TRUE);
END;
/

— *

— Verify the Configuration
set linesize 300
set pagesize 1000
col PARAMETER_VALUE FOR a15
col AUDIT_TRAIL FOR a20
select sysdate,sysdate-90 from dual;
select min(EVENT_TIMESTAMP) from unified_audit_trail;
SELECT * FROM dba_audit_mgmt_last_arch_ts where AUDIT_TRAIL=’UNIFIED AUDIT TRAIL’;
host date

exit

[bilprodb4|BSCS_PROD_NEW] $ cat adump_trace_purge.sh

!/bin/ksh

set -x

PATH=$PATH:/oracle/bscs/product/10.2.0/bin
export PATH

cd /oracle/BSCS/admin/BSCSPR/adump

find . -name “*.aud” -mtime +15 -type f -exec rm {} \;

adump_dir=”

export adump_dir

adump_dir=sqlplus -S /nolog <<EOF conn / as sysdba set heading off feedback off set pagesize 0 set linesize 50 select value from v\\\$parameter where name='audit_file_dest'; exit; EOF

cd /bscsarchive/adump
pwd

ls .aud > audlist.txt tar -cvf aud_$(date +%d%m%Y-%H%M%S).tar -I audlist.txt ls -ltr aud.tar
for i in cat audlist.txt
do
rm $i
done
find . -name “*.tar” -mtime +15 -type f -exec rm {} \;
======================= Trace =========
–alter session set tracefile_identifier=’10046′;
–alter session set timed_statistics = true;
–alter session set statistics_level=all;
–alter session set max_dump_file_size = unlimited;
–alter session set events ‘10046 trace name context forever,level 12’;
EXEC DBMS_MVIEW.REFRESH(‘IDASHBOARD.MV_EXT_CONT_HIS_RATEPLAN’,ATOMIC_REFRESH=>FALSE);
–alter session set events ‘10046 trace name context off’;

alter session set tracefile_identifier=’10046′;
alter session set timed_statistics = true;
alter session set statistics_level=all;
alter session set max_dump_file_size = unlimited;
alter session set events ‘10046 trace name context forever,level 12’;

— Execute the queries or operations to be traced here —

alter session set events ‘10046 trace name context off’;
=============
select ‘alter database register logfile ‘||””||name||”’ for ‘||”’ACTEVENT_CAPTURE”;’ from v$archived_log where sequence#>=26503
=============

for i in $(ps -ef|grep pmon|grep -v grep|awk ‘{print $9}’|cut -f3 -d _)
do
export ORACLE_SID=$i
sqlplus /nolog "/ as sysdba" <<EOF select name,open_mode from v$database; EOF
done
========= get alert log location from sqlplus ===============

alert_log_path=sqlplus -s <<EOF / as sysdba set lines 180 set pages 500 set feedback off set echo off set head off --select value from v\\$parameter where name='background_dump_dest'; with DD (DDV) as (SELECT VALUE DDV FROM V\\$parameter WHERE NAME='diagnostic_dest'), LCD (LCDV)as (select value LCDV FROM V\\$parameter WHERE NAME='db_name'), INST (INSTV) as (select value INSTV FROM V\\$parameter WHERE NAME='instance_name') select * from (select DDV||'/diag/rdbms/'||LCDV||'/'||INSTV||'/trace' from DD,LCD,INST ) where rownum= 1; exit EOF

============= io stats========
select filename, file#, snap_id, end_interval_time
,round(phyrds_d) “Reads”
,round(phyrds_d/interval_seconds) “Av Reads/s”
,round(readtim_d10/nullif(phyrds_d, 0)) “Av Rd(ms)” ,round(phyblkrd_d/nullif(phyrds_d, 0)) “Av Blks/Rd” ,round(singleblkrds_d/interval_seconds) “1-bk Rds/s” ,round(singleblkrdtim_d10/nullif(singleblkrds_d, 0)) “Av 1-bk Rd(ms)”
,round(phywrts_d) “Writes”
,round(phywrts_d/interval_seconds) “Av Writes/s”
,round(writetim_d10/nullif(phywrts_d, 0)) “Av Wr(ms)” — * Not in AWR
,round(phyblkwrt_d/nullif(phywrts_d, 0)) “*Av Blks/Wr” — * Not in AWR
,round(wait_count_d) “Buffer Waits”
,round(time_d10/nullif(wait_count_d, 0)) “Av Buf Wt(ms)” — in CentiSeconds from ( select phyrds – lag(phyrds) over(partition by file# order by snap_id) phyrds_d ,phywrts – lag(phywrts) over(partition by file# order by snap_id) phywrts_d ,singleblkrds – lag(singleblkrds) over(partition by file# order by snap_id) singleblkrds_d ,readtim – lag(readtim) over(partition by file# order by snap_id) readtim_d ,writetim – lag(writetim) over(partition by file# order by snap_id) writetim_d ,singleblkrdtim – lag(singleblkrdtim) over(partition by file# order by snap_id) singleblkrdtim_d ,phyblkrd – lag(phyblkrd) over(partition by file# order by snap_id) phyblkrd_d ,phyblkwrt – lag(phyblkwrt) over(partition by file# order by snap_id) phyblkwrt_d ,wait_count – lag(wait_count) over(partition by file# order by snap_id) wait_count_d ,time – lag(time) over(partition by file# order by snap_id) time_d ,end_interval_time ,interval_seconds ,t.
from dba_hist_filestatxs t
,(select snap_id s_snap_id, end_interval_time
,((sysdate + (end_interval_time – begin_interval_time)) – sysdate)*86400 interval_seconds
from dba_hist_snapshot)
where t.snap_id = s_snap_id
and tsname = ‘TEST_TBS’
and snap_id >= (select max(snap_id) from dba_hist_snapshot) – 6
— and end_interval_time > timestamp’2015-04-22 06:00:00′
);
============================== Solaris ASM =====
oracle@backup02:/dev/rdsk$ ls -lhL emcpower93a
crw——- 1 oracle dba 287, 744 Jul 28 15:02 emcpower93a
======================= SYSAUX tablespace huge growth================
COLUMN “Item” FORMAT A25
COLUMN “Space Used (GB)” FORMAT 999.99
COLUMN “Schema” FORMAT A25
COLUMN “Move Procedure” FORMAT A40

SELECT  occupant_name "Item",
space_usage_kbytes/1048576 "Space Used (GB)",
schema_name "Schema",
move_procedure "Move Procedure"
FROM v$sysaux_occupants
WHERE occupant_name in  ('SM/AWR','SM/OPTSTAT')
ORDER BY 1

=============================== Data from subpartition========
select * from idashboard.mobile_activation subpartition(SYS_SUBP965);
select * from idashboard.mobile_activation subpartition(SYS_SUBP966);
select * from idashboard.mobile_activation subpartition(&SUBPARTITION);

UPDATE IDASHBOARD.DATA_REFRESH_TIMINGS SET STATUS=’COMPLETED’;

One more comment. Some of trace files stay open as long the database is OPEN. When you delete them, the space is not freed. One of them is LMD trace file on RAC cluster

Then you must use oradebug as described here: http://blog.fatalmind.com/2010/02/01/oracle-trace-file-rotation/ or here: http://agstamy.blogspot.de/2010/11/flushing-and-closing-trace-files-using.html
SELECT s.sid, s.serial#, p.pid FROM v$session s, v$process p
WHERE s.paddr=p.addr and s.program like ‘%LMD%’;

SQL> oradebug setorapid 6
Unix process pid: 17652, image: oracle@labdb01 (LMD0)
SQL> oradebug close_trace
Statement processed.
SQL> oradebug flush
Statement processed.
========================== AWR Snapshot creation ===================
col startup_time format a15 heading ‘Startup’
col begin_interval_time format a15 heading ‘Begin snap’
col end_interval_time format a15 heading ‘End Snap’
col flush_elapsed format a20 heading ‘flush elapsed’
col error_count format 9999 heading ‘Err#’

SELECT *
FROM
(SELECT instance_number, startup_time, begin_interval_time,
end_interval_time, flush_elapsed, error_count
FROM dba_hist_snapshot
ORDER BY begin_interval_time DESC
)
WHERE rownum < 5;

==========
create or replace trigger sysdba_to_alert
after logon on database
declare
message varchar2(256);
IP varchar2(15);
v_os_user varchar2(80);
v_module varchar2(50);
v_action varchar2(50);
v_pid varchar2(10);
v_sid number;
v_program varchar2(48);
v_client_id VARCHAR2(64);
begin
IF user =’SYS’ THEN

— get IP for remote connections:
if sys_context(‘userenv’,’network_protocol’) = ‘TCP’ then
IP := sys_context(‘userenv’,’ip_address’);
end if;

select distinct sid into v_sid from sys.v_$mystat;
SELECT p.SPID, v.PROGRAM into v_pid, v_program
FROM V$PROCESS p, V$SESSION v
WHERE p.ADDR = v.PADDR AND v.sid = v_sid;

v_os_user := sys_context(‘userenv’,’os_user’);
dbms_application_info.READ_MODULE(v_module,v_action);

v_client_id := sys_context(‘userenv’,’client_identifier’);

message:= to_char(sysdate,’Dy Mon dd HH24:MI:SS YYYY’)||
‘ SYSDBA logon from ‘||nvl(IP,’localhost’)||’ ‘||v_pid||
‘ ‘||v_os_user||’ ‘||v_client_id||
‘ with ‘||v_program||’ ‘||v_module||’ ‘||v_action;

sys.dbms_system.ksdwrt(2,message);

end if;
end;
/

https://asanga-pradeep.blogspot.com/2013/12/excessive-audit-file-aud-generation.html

AUDIT_TRAIL Set to DB (DB,EXTENDED) yet Some Audited Entries for non-Sysdba Users Are Created in the OS Trail. (Doc ID 1279934.1)

Max size on mount point
select (nvl(t1.Total_size_mb1,0)+nvl(t1.Total_size_mb2,0)+nvl(t1.Total_size_mb3,0))/1024 as tot_size_gb from
(select
round(sum((case when autoextensible=’YES’ and bytes<=maxbytes then maxbytes else 0 end)/1048576)) as Total_size_mb1, round(sum((case when autoextensible=’YES’ and bytes>maxbytes then bytes else 0 end)/1048576)) as Total_size_mb2,
round(sum((case when autoextensible=’NO’ then bytes else 0 end)/1048576)) as Total_size_mb3,
round(sum((case when autoextensible=’YES’ and bytes<maxbytes then maxbytes-bytes else 0 end)/1048576)) as free_space_mb1
from dba_data_files where file_name like ‘&a%’
)t1 ;

Max size on all mount point
select t1.abc, (nvl(t1.Total_size_mb1,0)+nvl(t1.Total_size_mb2,0)+nvl(t1.Total_size_mb3,0))/1024 as tot_size_gb
from
(
select substr (file_name,0,12) as abc ,
round(sum((case when autoextensible=’YES’ and bytes<=maxbytes then maxbytes else 0 end)/1048576)) as Total_size_mb1, round(sum((case when autoextensible=’YES’ and bytes>maxbytes then bytes else 0 end)/1048576)) as Total_size_mb2,
round(sum((case when autoextensible=’NO’ then bytes else 0 end)/1048576)) as Total_size_mb3,
round(sum((case when autoextensible=’YES’ and bytes<maxbytes then maxbytes-bytes else 0 end)/1048576)) as free_space_mb1
from dba_data_files
group by substr (file_name,0,12)
) t1 order by 1;
======================= List of Oracle Default/Non-Default accounts ==============
select created,username,oracle_maintained,common,no_exp,no_expdp,no_sby,default_password,sysaux,occupant_desc
from dba_users
left outer join
(select distinct name username,’Y’ no_expdp from sys.ku_noexp_tab where obj_type=’SCHEMA’)
using(username)
left outer join (select distinct name username,’Y’ no_exp from sys.exu8usr)
using(username)
left outer join (select distinct name username,’Y’ no_sby from system.logstdby$skip_support where action in (0,-1))
using(username)
left outer join (select distinct user_name username,’Y’ default_password from sys.default_pwd$)
using(username)
left outer join (select schema_name username,’Y’ sysaux,decode(count(*),1,min(occupant_desc)) occupant_desc from v$sysaux_occupants group by schema_name)
using(username)

order by created,username;

CREATE OR REPLACE FUNCTION part_hv_to_date (p_table_owner IN VARCHAR2,
p_table_name IN VARCHAR2,
p_partition_name IN VARCHAR2)
RETURN DATE


— File Name : https://oracle-base.com/dba/miscellaneous/part_hv_to_date.sql
— Author : Tim Hall
— Description : Create a function to turn partition HIGH_VALUE column to a date.
— Call Syntax : @part_hv_to_date
— Last Modified: 19/01/2012
— Notes : Has to re-select the value from the view as LONG cannot be passed as a parameter.

— Example call:

— SELECT a.partition_name,
— part_hv_to_date(a.table_owner, a.table_name, a.partition_name) as high_value

— FROM all_tab_partitions a;

— Does no error handling.


AS
l_high_value VARCHAR2(32767);
l_date DATE;
BEGIN
SELECT high_value
INTO l_high_value
FROM all_tab_partitions
WHERE table_owner = p_table_owner
AND table_name = p_table_name
AND partition_name = p_partition_name;

EXECUTE IMMEDIATE ‘SELECT ‘ || l_high_value || ‘ FROM dual’ INTO l_date;
RETURN l_date;
END;
/

SELECT table_name,PARTITION_NAME,high_value
FROM DBA_TAB_PARTITIONS P
–WHERE TABLE_OWNER = ‘ZEDDBA’
WHERE TABLE_NAME in (‘REPORT_DUAPP’,’LG_EVENT’,’LG_SOAP_REQUESTS’,’LG_REST_REQUESTS’)
AND PART_HV_TO_DATE(TABLE_OWNER, TABLE_NAME, PARTITION_NAME) = (
SELECT MAX(PART_HV_TO_DATE(TABLE_OWNER, TABLE_NAME, PARTITION_NAME))
FROM DBA_TAB_PARTITIONS
WHERE TABLE_OWNER = P.TABLE_OWNER
AND TABLE_NAME = P.TABLE_NAME);
================ Dictionary and Fixed Object stats =============
alter session set nls_date_format=’YYYY-Mon-DD’;
col last_analyzed for a13
set termout off
set trimspool off
set feedback off
spool dictionary_statistics

prompt ‘Statistics for SYS tables’
SELECT NVL(TO_CHAR(last_analyzed, ‘YYYY-Mon-DD’), ‘NO STATS’) last_analyzed, COUNT(*) dictionary_tables
FROM dba_tables
WHERE owner = ‘SYS’
GROUP BY TO_CHAR(last_analyzed, ‘YYYY-Mon-DD’)
ORDER BY 1 DESC;

prompt ‘Statistics for Fixed Objects’
select NVL(TO_CHAR(last_analyzed, ‘YYYY-Mon-DD’), ‘NO STATS’) last_analyzed, COUNT(*) fixed_objects
FROM dba_tab_statistics
WHERE object_type = ‘FIXED TABLE’
GROUP BY TO_CHAR(last_analyzed, ‘YYYY-Mon-DD’)
ORDER BY 1 DESC;

spool off
============== RAC Master node====
-bash-4.2$ oclumon manage -get master

Master = meypvodsprdb02

===================== VIP ===================
-bash-4.2$ ip addr | grep “^ *inet “
inet 127.0.0.1/8 scope host lo
inet 10.95.187.90/26 brd 10.95.187.127 scope global ens161
inet 10.95.187.105/26 brd 10.95.187.127 scope global secondary ens161:0 –> .105 is VIP
inet 10.95.187.154/26 brd 10.95.187.191 scope global ens192
inet 10.95.200.26/23 brd 10.95.201.255 scope global ens193
inet 10.95.194.113/24 brd 10.95.194.255 scope global ens224
inet 10.95.205.168/28 brd 10.95.205.175 scope global ens225
inet 10.95.199.23/23 brd 10.95.199.255 scope global ens256

===================== IMPDP table partition with query ==================
USERID=TEST_RESTORE/test
DIRECTORY=RESTORE
dumpfile=LG_SOAPREQUESTS_229_18-JAN-19_%U.dmp
logfile=impdpLG_SOAPREQUESTS_229_Jan19.log
REMAP_SCHEMA=DSP_LOG:TEST_RESTORE
query=” DSP_LOG.LG_SOAPREQUESTS:SYS_P86156:where SOAPIN like \’\%971586100417\%\’ “

table_exists_action=append

Connecting to rman catalog database… to see whether bct is working or not…
select
file# fno,
used_change_tracking BCT,
incremental_level INCR,
datafile_blocks BLKS,
block_size blksz,
blocks_read READ,
round((blocks_read/datafile_blocks) * 100,2) “%READ”,
blocks WRTN, round((blocks/datafile_blocks)*100,2) “%WRTN”
from rc_backup_datafile
where completion_time between
to_date(’02-18-2020 22:00:00′, ‘MM-DD-YYYY HH24:MI:SS’) and
to_date(’02-20-2020 08:00:00′, ‘MM-DD-YYYY HH24:MI:SS’)
and db_key=958182

order by file#;

cd /oracle/app/tools/oswbb
cd /oracle/app/tfa/repository/suptools/hostname/oswbb/oracle/oswbb

/oracle/app/product/12.2.0/dbhome_1/jdk/bin/java -jar oswbba.jar -i /oracle/app/tools/oswbb/archive -b Feb 27 16:40:00 2020 -e Feb 27 17:00:00 2020 -s

meylvdspdb01:~/dba_work $ cat sess_capture.sql
set time on timing on echo on feedback on
spo sess_capture.log

declare
T_COMMIT NUMBER:=0;
begin

for i in 1..7000 loop

insert into harinders.dsp_session_capture
select sysdate exec_date, inst_id, status, username, count(*) sess_count
from gv$session where type = ‘USER’
and username = ‘DSP_SERVICES’ group by sysdate, inst_id, status, username;

insert into harinders.dsp_session_blocking
select sysdate exec_date, blocking_session,event,count(*) blocked_count
from gv$session where
blocking_session is not null
and event like ‘enq%’ group by sysdate , blocking_session,event;

commit;

DBMS_lock.sleep(30);

end loop;
END;
/

exit;

select ‘alter database rename file ‘||””||member||”’ to ‘||”’/oradata02/oradata/DEVUTB2/’||SUBSTR(member,(INSTR(member,’/’,-1,1)+1),length(member))||”’;’ from v$logfile;

select ‘alter database rename file ‘||””||name||”’ to ‘||”’/oradata02/oradata/DEVUTB2/’||SUBSTR(name,(INSTR(name,’/’,-1,1)+1),length(name))||”’;’ from v$datafile order by bytes;

select ‘alter database rename file ‘||””||name||”’ to ‘||”’/oradata03/oradata/DEVUTB2/’||SUBSTR(name,(INSTR(name,’/’,-1,1)+1),length(name))||”’;’ from v$tempfile;

sqlplus -s “/as sysdba” << EOF
set linesize 300
set pagesize 1000
col NAME format a100;
select thread#,max(sequence#) from gv\$archived_log where applied=’YES’ group by thread#;
— select FIRST_TIME,SEQUENCE#,NAME,DELETED,Applied from V\$ARCHIVED_LOG where SEQUENCE# between 310526 and 310550 order by 2;
select FIRST_TIME,SEQUENCE#,NAME,DELETED,Applied from V\$ARCHIVED_LOG where SEQUENCE# between (select max(sequence#) from gv\$archived_log where applied=’YES’ group by thread#) and (select max(sequence#)+20 from gv\$archived_log where applied=’YES’ group by thread#) order by SEQUENCE#;
archive log list
select process, status, sequence# from v\$managed_standby;
select * from v\$flash_recovery_area_usage where FILE_TYPE=’ARCHIVELOG’;
host df -h /bscsarchive01/
exit

EOF

============= System statistics ==============
set echo off
set linesize 200 pagesize 1000
column pname format a30
column sname format a20
column pval2 format a20
select pname,pval2 from sys.aux_stats$ where sname=’SYSSTATS_INFO’;
select pname,pval1,calculated,formula from sys.aux_stats$ where sname=’SYSSTATS_MAIN’
model
reference sga on (
select name,value from v$sga
) dimension by (name) measures(value)
reference parameter on (
select name,decode(type,3,to_number(value)) value from v$parameter where name=’db_file_multiblock_read_count’ and ismodified!=’FALSE’
union all
select name,decode(type,3,to_number(value)) value from v$parameter where name=’sessions’
union all
select name,decode(type,3,to_number(value)) value from v$parameter where name=’db_block_size’
) dimension by (name) measures(value)
partition by (sname) dimension by (pname) measures (pval1,pval2,cast(null as number) as calculated,cast(null as varchar2(60)) as formula) rules(
calculated[‘MBRC’]=coalesce(pval1[‘MBRC’],parameter.value[‘db_file_multiblock_read_count’],parameter.value[‘_db_file_optimizer_read_count’],8),
calculated[‘MREADTIM’]=coalesce(pval1[‘MREADTIM’],pval1[‘IOSEEKTIM’] + (parameter.value[‘db_block_size’] * calculated[‘MBRC’] ) / pval1[‘IOTFRSPEED’]),
calculated[‘SREADTIM’]=coalesce(pval1[‘SREADTIM’],pval1[‘IOSEEKTIM’] + parameter.value[‘db_block_size’] / pval1[‘IOTFRSPEED’]),
calculated[‘ multi block Cost per block’]=round(1/calculated[‘MBRC’]*calculated[‘MREADTIM’]/calculated[‘SREADTIM’],4),
calculated[‘ single block Cost per block’]=1,
formula[‘MBRC’]=case when pval1[‘MBRC’] is not null then ‘MBRC’ when parameter.value[‘db_file_multiblock_read_count’] is not null then ‘db_file_multiblock_read_count’ when parameter.value[‘_db_file_optimizer_read_count’] is not null then ‘_db_file_optimizer_read_count’ else ‘= _db_file_optimizer_read_count’ end,
formula[‘MREADTIM’]=case when pval1[‘MREADTIM’] is null then ‘= IOSEEKTIM + db_block_size * MBRC / IOTFRSPEED’ end,
formula[‘SREADTIM’]=case when pval1[‘SREADTIM’] is null then ‘= IOSEEKTIM + db_block_size / IOTFRSPEED’ end,
formula[‘ multi block Cost per block’]=’= 1/MBRC * MREADTIM/SREADTIM’,
formula[‘ single block Cost per block’]=’by definition’,
calculated[‘ maximum mbrc’]=sga.value[‘Database Buffers’]/(parameter.value[‘db_block_size’]*parameter.value[‘sessions’]),
formula[‘ maximum mbrc’]=’= buffer cache size in blocks / sessions’
);

set echo on

set ECHO OFF \r
set TAB OFF \r
set PAGESIZE 300 \r
set LINESIZE 200 \r
set FEEDBACK OFF \r
set VERIFY OFF \r
col instance_name new_value inst noprint;\r
select host_name,instance_name from v$instance;\r
spool &inst..log;\r
select username||’,’||account_status||’,’||profile||’,’||CREATED||’,’||LOCK_DATE from dba_users;\r
spool off;\r
exit; \r
====================== RMAN Backup History from Connected to catalog============

SELECT
DB_NAME,
INPUT_TYPE,
TO_CHAR(START_TIME,’DAY’) START_TIME_DAY,
START_TIME,
TO_CHAR(END_TIME,’mm/dd/yy HH24:MI’) END_TIME,
TIME_TAKEN_DISPLAY,
ROUND(SUM(OUTPUT_BYTES/1024/1024/1024),2) SUM_BACKUP_PIECES_IN_GB,
OUTPUT_DEVICE_TYPE
FROM RC_RMAN_BACKUP_JOB_DETAILS
WHERE –DB_NAME=’&DBNAME’ and
STATUS =’COMPLETED’
AND INPUT_TYPE in (‘ARCHIVELOG’)
AND OUTPUT_DEVICE_TYPE IS NOT NULL
AND START_TIME >SYSDATE-90
GROUP BY DB_NAME,
INPUT_TYPE,
STATUS,
TO_CHAR(START_TIME,’DAY’) ,
START_TIME,
TO_CHAR(END_TIME,’mm/dd/yy HH24:MI’) ,
TIME_TAKEN_DISPLAY,
OUTPUT_DEVICE_TYPE
ORDER BY DB_NAME,
START_TIME
;

==================== Current running rman backups from catalog =========
select DB_NAME,status,OPERATION,START_TIME,
to_char(START_TIME,’DD-MON-YYYY HH24:MI:SS’) as START_TIME
from rman.RC_RMAN_STATUS
where start_time >= sysdate-1 and status like ‘RUNNING%’
================== FRA ==============

On Standby:
select applied,deleted,decode(rectype,11,’YES’,’NO’) reclaimable
,count(*),min(sequence#),max(sequence#)
from v$archived_log left outer join sys.x$kccagf using(recid)
where is_recovery_dest_file=’YES’ and name is not null
group by applied,deleted,decode(rectype,11,’YES’,’NO’) order by 5
/

On Primary:
column deleted format a7
column reclaimable format a11
set linesize 120
select applied,deleted,backup_count
,decode(rectype,11,’YES’,’NO’) reclaimable,count(*)
,to_char(min(completion_time),’dd-mon hh24:mi’) first_time
,to_char(max(completion_time),’dd-mon hh24:mi’) last_time
,min(sequence#) first_seq,max(sequence#) last_seq
from v$archived_log left outer join sys.x$kccagf using(recid)
where is_recovery_dest_file=’YES’
group by applied,deleted,backup_count,decode(rectype,11,’YES’,’NO’) order by min(sequence#)
/

set linesize 200 pagesize 1000
column is_recovery_dest_file format a21
select
deleted,status,is_recovery_dest_file,thread#,min(sequence#),max(sequence#),min(first_time),max(next_time),count(distinct sequence#),archived,applied,backup_count,count(“x$kccagf”)
from (
select deleted,thread#,sequence#,status,name ,first_time, next_time,case x$kccagf.rectype when 11 then recid end “x$kccagf”
,count(case archived when ‘YES’ then ‘YES’ end)over(partition by thread#,sequence#) archived
,count(case applied when ‘YES’ then ‘YES’ end)over(partition by thread#,sequence#) applied
,sum(backup_count)over(partition by thread#,sequence#) backup_count
,listagg(is_recovery_dest_file||’:’||dest_id,’,’)within group(order by dest_id)over(partition by thread#,sequence#) is_recovery_dest_file
from v$archived_log left outer join sys.x$kccagf using(recid)
) group by deleted,status,is_recovery_dest_file,thread#,archived,applied,backup_count
order by max(sequence#),min(sequence#),thread#,deleted desc,status;

delete force archivelog from sequence 29666 backed up 3 times to sbt;

select output from gv$rman_output where session_recid in (select session_recid from v$rman_status where start_time > sysdate-2) and output like ‘%%WARNING%’ or output like ‘%ERROR%’ order by recid
===================== Trace file for session ===========
SET LINESIZE 100
COLUMN trace_file FORMAT A60

SELECT s.sid,
s.serial#,
pa.value || ‘/’ || LOWER(SYS_CONTEXT(‘userenv’,’instance_name’)) ||
ora‘ || p.spid || ‘.trc’ AS trace_file
FROM v$session s,
v$process p,
v$parameter pa
WHERE pa.name = ‘user_dump_dest’
AND s.paddr = p.addr

AND s.audsid = SYS_CONTEXT(‘USERENV’, ‘SESSIONID’);

TEST=
(DESCRIPTION_LIST=
(LOAD_BALANCE=off)(FAILOVER=ON)
(DESCRIPTION=(ENABLE=BROKEN)(LOAD_BALANCE=ON)(FAILOVER=ON)
(ADDRESS_LIST=
(ADDRESS=(PROTOCOL=TCP)(HOST=meylvfaaadb1-vip)(PORT=1521))
(ADDRESS=(PROTOCOL=TCP)(HOST=meylvfaaadb2-vip)(PORT=1521))
)
(CONNECT_DATA=(service_name=ndb1)(FAILOVER_MODE=(TYPE=SELECT)(METHOD=BASIC)))
)
(DESCRIPTION=(ENABLE=BROKEN)(LOAD_BALANCE=ON)(FAILOVER=ON)
(ADDRESS_LIST=
(ADDRESS=(PROTOCOL=TCP)(HOST=maslvfaaadb03-vip)(PORT=1521))
(ADDRESS=(PROTOCOL=TCP)(HOST=maslvfaaadb04-vip)(PORT=1521))
)
(CONNECT_DATA=(service_name=ndb1)(FAILOVER_MODE=(TYPE=SELECT)(METHOD=BASIC)))
)
)

TEST1=
(DESCRIPTION_LIST=
(LOAD_BALANCE=off)(FAILOVER=ON)
(DESCRIPTION=(ENABLE=BROKEN)
(ADDRESS_LIST=
(ADDRESS=(PROTOCOL=TCP)(HOST=meylvfaaadb1-vip)(PORT=1521))
(ADDRESS=(PROTOCOL=TCP)(HOST=meylvfaaadb2-vip)(PORT=1521))
)
(CONNECT_DATA=(service_name=ndb1)(FAILOVER_MODE=(TYPE=SELECT)(METHOD=BASIC)))
)
(DESCRIPTION=(ENABLE=BROKEN)
(ADDRESS_LIST=
(ADDRESS=(PROTOCOL=TCP)(HOST=maslvfaaadb03-vip)(PORT=1521))
(ADDRESS=(PROTOCOL=TCP)(HOST=maslvfaaadb04-vip)(PORT=1521))
)
(CONNECT_DATA=(service_name=ndb1)(FAILOVER_MODE=(TYPE=SELECT)(METHOD=BASIC)))
)
)

============== RMAN ====================
backup archivelog all tag=’all_arch’ not backed up 1 times filesperset 1; ==> No need to mention device type
delete archivelog until time ‘sysdate-1’ backed up 1 times to sbt;
============= RMAN =============
select applied,deleted,decode(rectype,11,’YES’,’NO’) reclaimable
,count(*),min(sequence#),max(sequence#)
from v$archived_log left outer join sys.x$kccagf using(recid)
where is_recovery_dest_file=’YES’ and name is not null
group by applied,deleted,decode(rectype,11,’YES’,’NO’) order by 5
/

column deleted format a7
column reclaimable format a11
set linesize 120
select applied,deleted,backup_count
,decode(rectype,11,’YES’,’NO’) reclaimable,count(*)
,to_char(min(completion_time),’dd-mon hh24:mi’) first_time
,to_char(max(completion_time),’dd-mon hh24:mi’) last_time
,min(sequence#) first_seq,max(sequence#) last_seq
from v$archived_log left outer join sys.x$kccagf using(recid)
where is_recovery_dest_file=’YES’
group by applied,deleted,backup_count,decode(rectype,11,’YES’,’NO’) order by min(sequence#)
/

set linesize 200 pagesize 1000
column is_recovery_dest_file format a21
select
deleted,status,is_recovery_dest_file,thread#,min(sequence#),max(sequence#),min(first_time),max(next_time),count(distinct sequence#),archived,applied,backup_count,count(“x$kccagf”)
from (
select deleted,thread#,sequence#,status,name ,first_time, next_time,case x$kccagf.rectype when 11 then recid end “x$kccagf”
,count(case archived when ‘YES’ then ‘YES’ end)over(partition by thread#,sequence#) archived
,count(case applied when ‘YES’ then ‘YES’ end)over(partition by thread#,sequence#) applied
,sum(backup_count)over(partition by thread#,sequence#) backup_count
,listagg(is_recovery_dest_file||’:’||dest_id,’,’)within group(order by dest_id)over(partition by thread#,sequence#) is_recovery_dest_file
from v$archived_log left outer join sys.x$kccagf using(recid)
) group by deleted,status,is_recovery_dest_file,thread#,archived,applied,backup_count
order by max(sequence#),min(sequence#),thread#,deleted desc,status;

Hi Rick,
Yes. The archived logs should become ‘reclaimable’ once applied. But you may encounter the bug above where the ‘reclaimable’ status is not refreshed automatically on a database in mount. You have either to exec dbms_backup_restore.refreshagedfiles; or to run the ‘configure archivelog deletion policy’ again.

CONFIGURE ARCHIVELOG DELETION POLICY TO APPLIED ON ALL STANDBY BACKED UP 1 TIMES TO ‘SBT_TAPE’;

select count(*) from HPSM94BKPADMIN.SYSATTACHMEM1 where to_date(SYSMODTIME,’DD-MON-YY’) between to_date(’08-JUL-20′) and to_date(’23-JUL-20′)

===========Tablespace Usage from AWR=========
select thedate,
gbsize,
prev_gbsize,
gbsize-prev_gbsize diff
from (
select thedate,
gbsize,
lag(gbsize,1) over (order by r) prev_gbsize
from (
select rownum r,
thedate,
gbsize
from (
select trunc(thedate) thedate,
max(gbsize) gbsize
from (
select to_date(to_char(snapshot.begin_interval_time,’YYYY-MON-DD HH24:MI:SS’),’YYYY-MON-DD HH24:MI:SS’) thedate,
round((usage.tablespace_usedsize*block_size.value)/1024/1024/1024,2) gbsize
from dba_hist_tbspc_space_usage usage,
v$tablespace tablespace,
dba_hist_snapshot snapshot,
v$parameter block_size
where usage.snap_id = snapshot.snap_id
and usage.tablespace_id = tablespace.ts#
and tablespace.name = ‘&tablespace’
and block_size.name = ‘db_block_size’
)
group by
trunc(thedate)
order by
trunc(thedate)
)
)
);

===========

select ‘Archive log(‘||arch_query.arch_backup_age||’), Datafile(‘||df_query.df_backup_age||’), Control File(‘||cf_query.cf_backup_age||’),Spfile(‘||
sp_query.spfile_backup_age||’)’ age, max_age_query.max_age from (select round(nvl((sysdate – max(ar.NEXT_TIME))24,0),2) arch_backup_age from v$backup_redolog ar) arch_query, (select round(nvl((sysdate – min(t))24,0),2) df_backup_age from ( select /* +rule/ max (b.checkpoint_time) t from v$backup_datafile b, v$tablespace ts, v$datafile f where included_in_database_backup = ‘YES’ and f.file#=b.file# and f.ts#=ts.ts# and f.enabled != ‘READONLY’ and f.status != ‘OFFLINE’ group by f.file#)) df_query, (select /+rule/ round(nvl((sysdate – max(cf.checkpoint_time))24,0),2) cf_backup_age from v$backup_controlfile_details cf) cf_query, (select /+rule/ round(nvl((sysdate – max(sp.completion_time))24,0),2) spfile_backup_age from v$backup_spfile sp) sp_query, (select round(max(backup_age),2) max_age from (select round(nvl((sysdate – max(ar2.NEXT_TIME))24,0),2) backup_age from v$backup_redolog ar2 union all select round(nvl((sysdate – min(t))24,0),2) df_backup_age from ( select /+rule/ max (b.checkpoint_time) t from v$backup_datafile b, v$tablespace ts, v$datafile f where included_in_database_backup = ‘YES’ and f.file#=b.file# and f.ts#=ts.ts# and f.enabled != ‘READONLY’ and f.status != ‘OFFLINE’ group by f.file#) union allselect /+rule/ round(nvl((sysdate – max(cf2.checkpoint_time))24,0),2) cf_backup_age from v$backup_controlfile_details cf2 union all select /+rule/ round(nvl((sysdate – max(sp2.completion_time))*24,0),2) spfile_backup_age from v$backup_spfile sp2)) max_age_query

=====

select username, to_char(timestamp,’MMDD HH24:MI:SS’) ts
, action_name, returncode
, to_char(logoff_time,’MMDD HH24:MI:SS’) Logoff
from dba_audit_trail order by timestamp;

===== GG STOP Replicat forcefully ===
send replicat repl1, forcestop
info repl1 detail

GGSCI (meylvsmrepdb1) 1> info credentialstore

Reading from credential store:

Default domain: OracleGoldenGate

Alias: ggconfiguser
Userid: gguser

=============

[bilprodb4|BSCS_PROD_NEW] $ cat test.sh

!/bin/ksh

ORACLE_SID=BSCSPR
ORACLE_HOME=/oracle/bscs/product/10.2.0
PATH=$PATH:$ORACLE_HOME/bin
export ORACLE_SID ORACLE_HOME PATH
stty -echo ===>>> Suppresses echoing on terminal
echo Password:
read -s password >>> available only in ksh in solaris, Linux will take normally
$ORACLE_HOME/bin/dgmgrl <<EOF
connect sys/$password@bscspr
show configuration;
show database verbose ‘BSCSPR’;
show database verbose ‘BSCSPRDR’;
exit
EOF

stty echo >>>> again enabling echo

-bash-4.4$ cat test.sh

!/bin/bash

source /oracle/.profile
$GG_HOME/ggsci <test.log
dblogin useridalias ogg_admin
info all
info repl1 detail
exit
EOF
df -h|grep dbfs

select inst_id,sid,serial#,module,event,program,status, blocking_session, last_call_et,sql_id from gv$session where module like ‘%REPL%’;

mysql -v database < script.sql > output.txt –> Is similar to echo queries from .sql file and genereate spool file.

===== PGA =====
select sum(value)/1024/1024/1024 GB from v$sesstat where statistic#=26;

select spid, pga_used_mem, pga_alloc_mem, pga_freeable_mem, pga_max_mem from v$process order by 2 desc;

select s.osuser osuser,s.serial# serial,se.sid,n.name,
max(se.value) maxmem
from v$sesstat se,
v$statname n
,v$session s
where n.statistic# = se.statistic#
and n.name in (‘session pga memory’,’session pga memory max’,
‘session uga memory’,’session uga memory max’)
and s.sid=se.sid
group by n.name,se.sid,s.osuser,s.serial#
order by 2
;

select nvl(d
,’Total Memory Usage….’) “Mem Type”
,round(sum(b) / 1024 / 1024) “MB”
from (select ‘System Global Area:’ d
,sum(bytes) b
from v$sgastat
union all
select ‘Program Global Area:’ d
,value b
from v$pgastat
where name = ‘total PGA allocated’) s
group by rollup(d) ;

SELECT DECODE(TRUNC(SYSDATE – LOGON_TIME), 0, NULL, TRUNC(SYSDATE – LOGON_TIME) || ‘ Days’ || ‘ + ‘) ||
TO_CHAR(TO_DATE(TRUNC(MOD(SYSDATE-LOGON_TIME,1) * 86400), ‘SSSSS’), ‘HH24:MI:SS’) LOGON,
SID, v$session.SERIAL#, v$process.SPID , ROUND(v$process.pga_used_mem/(1024*1024), 2) PGA_MB_USED,
v$session.USERNAME, STATUS, OSUSER, MACHINE, v$session.PROGRAM, MODULE
FROM v$session, v$process
WHERE v$session.paddr = v$process.addr
–and status = ‘ACTIVE’
–and v$session.sid = 97
–and v$session.username = ‘SYSTEM’
–and v$process.spid = 24301
ORDER BY pga_used_mem DESC;

SET LINESIZE 500
SET PAGESIZE 1000

COLUMN username FORMAT A30
COLUMN osuser FORMAT A20
COLUMN spid FORMAT A10
COLUMN service_name FORMAT A15
COLUMN module FORMAT A45
COLUMN machine FORMAT A30
COLUMN logon_time FORMAT A20
COLUMN pga_used_mem_mb FORMAT 99990.00
COLUMN pga_alloc_mem_mb FORMAT 99990.00
COLUMN pga_freeable_mem_mb FORMAT 99990.00
COLUMN pga_max_mem_mb FORMAT 99990.00

SELECT NVL(s.username, ‘(oracle)’) AS username,
s.osuser,
s.sid,
s.serial#,
p.spid,
ROUND(p.pga_used_mem/1024/1024,2) AS pga_used_mem_mb,
ROUND(p.pga_alloc_mem/1024/1024,2) AS pga_alloc_mem_mb,
ROUND(p.pga_freeable_mem/1024/1024,2) AS pga_freeable_mem_mb,
ROUND(p.pga_max_mem/1024/1024,2) AS pga_max_mem_mb,
s.lockwait,
s.status,
s.service_name,
s.module,
s.machine,
s.program,
TO_CHAR(s.logon_Time,’DD-MON-YYYY HH24:MI:SS’) AS logon_time,
s.last_call_et AS last_call_et_secs
FROM v$session s,
v$process p
WHERE s.paddr = p.addr
ORDER BY s.username, s.osuser;

This entry was posted in Uncategorized. Bookmark the permalink.

Leave a Reply

Fill in your details below or click an icon to log in:

WordPress.com Logo

You are commenting using your WordPress.com account. Log Out /  Change )

Google photo

You are commenting using your Google account. Log Out /  Change )

Twitter picture

You are commenting using your Twitter account. Log Out /  Change )

Facebook photo

You are commenting using your Facebook account. Log Out /  Change )

Connecting to %s