Skip to content

disk latency from ash by time

disk latency from ash by time published on 1 комментарий к записи disk latency from ash by time

some queryes to measure disk io

 select 100*(round ( count (*)/sum(count(*)) over(),2 )) as pct ,nvl(wait_class,'CPU') wait_class from v$active_session_history group by wait_class order by 1 desc;
 select 100*(round ( count (*)/sum(count(*)) over(),2 )) as pct ,nvl(event,'CPU') event from v$active_session_history where wait_class like '%I/O%' group by event order by 1 desc; 

generate pivot list

 select listagg( ''''||event||'''',',') within group (order by event )from v$active_session_history where wait_class like '%I/O%' group by event ;

or top wait pivot list:

with disk_events as  (
  select 100*(round ( count (*)/sum(count(*)) over(),2 )) as pct ,nvl(event,'CPU') event from v$active_session_history where wait_class like '%I/O%' group by event )
 select listagg( ''''||event||'''',',') within group (order by event )from disk_events where pct >5 ;

query and result

new:select * from
(select event,case when est_waits >0  
      then  round (est_dbtime_ms / est_waits,1)
              
                else null
                end
  as est_avg_latency_ms,
    time#
from ( 
      select event,
        round(
              sum(
                    case when time_waited >0 
                    then greatest(1,1000000/time_waited) 
                            else 0 end )
            ) as est_waits,
      sum(1000) as est_dbtime_ms ,
 TRUNC( ash.SAMPLE_TIME, 'MI') - mod( EXTRACT(minute FROM  ash.SAMPLE_TIME), 5) /(24 * 60) time#
from v$active_session_history ash 
  where ash.wait_class ='User I/O' 
      group by TRUNC(SAMPLE_TIME, 'MI') - mod( EXTRACT(minute FROM SAMPLE_TIME), 5) /(24 * 60) ,event 
      )    )
 pivot ( sum(est_avg_latency_ms) for event in (
 'db file scattered read','db file sequential read','log file parallel write')
 ) order by time#

TIME#               |                'db file scattered read' |               'db file sequential read' |               'log file parallel write'
------------------- | --------------------------------------- | --------------------------------------- | ---------------------------------------
10.08.2015 14.10.00 |                                     2,2 |                                      ,3 |                                  <NULL>
10.08.2015 14.15.00 |                                     2,4 |                                      ,2 |                                  <NULL>
10.08.2015 14.20.00 |                                     1,7 |                                      ,3 |                                  <NULL>
10.08.2015 14.25.00 |                                     1,3 |                                      ,3 |                                  <NULL>
10.08.2015 14.30.00 |                                     3,5 |                                      ,2 |                                  <NULL>
10.08.2015 14.35.00 |                                     1,8 |                                      ,3 |                                  <NULL>
10.08.2015 14.40.00 |                                     1,1 |                                      ,3 |                                  <NULL>
10.08.2015 14.45.00 |                                     2,6 |                                      ,2 |                                  <NULL>
10.08.2015 14.50.00 |                                     1,5 |                                      ,3 |                                  <NULL>
10.08.2015 14.55.00 |                                     1,2 |                                      ,3 |                                  <NULL>
10.08.2015 15.00.00 |                                     2,6 |                                      ,2 |                                  <NULL>
10.08.2015 15.05.00 |                                     1,3 |                                      ,4 |                                  <NULL>
10.08.2015 15.10.00 |                                     1,6 |                                      ,3 |                                  <NULL>
10.08.2015 15.15.00 |                                       3 |                                      ,2 |                                  <NULL>
10.08.2015 15.20.00 |                                     1,4 |                                      ,3 |                                  <NULL>
10.08.2015 15.25.00 |                                     1,6 |                                      ,3 |                                  <NULL>
10.08.2015 15.30.00 |                                     3,1 |                                      ,2 |                                  <NULL>
10.08.2015 15.35.00 |                                     1,8 |                                      ,4 |                                  <NULL>
10.08.2015 15.40.00 |                                     1,2 |                                      ,3 |                                  <NULL>
10.08.2015 15.45.00 |                                     2,5 |                                      ,2 |                                  <NULL>
10.08.2015 15.50.00 |                                     1,6 |                                      ,3 |                                  <NULL>
10.08.2015 15.55.00 |                                     1,4 |                                      ,3 |                                  <NULL>
10.08.2015 16.00.00 |                                     4,8 |                                      ,2 |                                  <NULL>
10.08.2015 16.05.00 |                                     1,8 |                                      ,4 |                                  <NULL>
10.08.2015 16.10.00 |                                     1,2 |                                      ,4 |                                  <NULL>
10.08.2015 16.15.00 |                                       3 |                                      ,2 |                                  <NULL>
10.08.2015 16.20.00 |                                     1,3 |                                      ,3 |                                  <NULL>
10.08.2015 16.25.00 |                                     1,1 |                                      ,3 |                                  <NULL>
10.08.2015 16.30.00 |                                     3,1 |                                      ,2 |                                  <NULL>
10.08.2015 16.35.00 |                                     1,6 |                                      ,3 |                                  <NULL>
10.08.2015 16.40.00 |                                     1,6 |                                      ,3 |                                  <NULL>
10.08.2015 16.45.00 |                                     2,9 |                                      ,2 |                                  <NULL>
10.08.2015 16.50.00 |                                     1,6 |                                      ,3 |                                  <NULL>
10.08.2015 16.55.00 |                                     1,3 |                                      ,2 |                                  <NULL>
10.08.2015 17.00.00 |                                     2,2 |                                      ,2 |                                  <NULL>
10.08.2015 17.05.00 |                                     1,5 |                                      ,4 |                                  <NULL>
10.08.2015 17.10.00 |                                     1,7 |                                      ,3 |                                  <NULL>
10.08.2015 17.15.00 |                                       3 |                                      ,2 |                                  <NULL>
10.08.2015 17.20.00 |                                     1,8 |                                      ,3 |                                  <NULL>
10.08.2015 17.25.00 |                                     1,3 |                                      ,3 |                                  <NULL>
10.08.2015 17.30.00 |                                     3,3 |                                      ,3 |                                  <NULL>
10.08.2015 17.35.00 |                                     1,6 |                                      ,3 |                                  <NULL>
10.08.2015 17.40.00 |                                      ,7 |                                      ,3 |                                  <NULL>

this output can be used to build graphs:

Снимок экрана 2015-08-10 в 17.38.43

audit purge

audit purge published on Комментариев к записи audit purge нет

short way truncate table sys.aud$ =)))

BEGIN
  DBMS_AUDIT_MGMT.init_cleanup(
    audit_trail_type         => DBMS_AUDIT_MGMT.AUDIT_TRAIL_ALL,
    default_cleanup_interval => 24 /* hours */);
END;
/
SET SERVEROUTPUT ON
BEGIN
  IF DBMS_AUDIT_MGMT.is_cleanup_initialized(DBMS_AUDIT_MGMT.AUDIT_TRAIL_AUD_STD) THEN
    DBMS_OUTPUT.put_line('YES');
  ELSE
    DBMS_OUTPUT.put_line('NO');
  END IF;
END;
/
BEGIN
  DBMS_AUDIT_MGMT.set_last_archive_timestamp(
    audit_trail_type  => DBMS_AUDIT_MGMT.AUDIT_TRAIL_AUD_STD,
    last_archive_time => SYSTIMESTAMP-31);
END;
/

create purge audit job

--create STANDARD_AUDIT_TRAIL_PURGE_JOB
BEGIN
DBMS_AUDIT_MGMT.CREATE_PURGE_JOB (
AUDIT_TRAIL_TYPE => DBMS_AUDIT_MGMT.AUDIT_TRAIL_AUD_STD,
AUDIT_TRAIL_PURGE_INTERVAL => 24,
AUDIT_TRAIL_PURGE_NAME => 'Standard_Audit_Trail_Purge_Job',
USE_LAST_ARCH_TIMESTAMP => TRUE );
END;
/

Or audit all purge job:

BEGIN
DBMS_AUDIT_MGMT.CREATE_PURGE_JOB (
AUDIT_TRAIL_TYPE => DBMS_AUDIT_MGMT.AUDIT_TRAIL_ALL,
AUDIT_TRAIL_PURGE_INTERVAL => 24,
AUDIT_TRAIL_PURGE_NAME => 'ALL_Audit_Trail_Purge_Job',
USE_LAST_ARCH_TIMESTAMP => TRUE );
END;
/

create job to update what can job delete from audit

BEGIN
  SYS.DBMS_SCHEDULER.CREATE_JOB
    (
       job_name        => 'SYS.MOVE_LAST_TIMESTAMP_FORWARD'
      ,start_date      => trunc (SYSTIMESTAMP) 
      ,repeat_interval => 'FREQ=DAILY;INTERVAL=1'
      ,end_date        => NULL
      ,job_class       => 'DEFAULT_JOB_CLASS'
      ,job_type        => 'PLSQL_BLOCK'
      ,job_action      => '
BEGIN
  DBMS_AUDIT_MGMT.set_last_archive_timestamp(
   audit_trail_type  => DBMS_AUDIT_MGMT.AUDIT_TRAIL_AUD_STD,
   last_archive_time => SYSTIMESTAMP-31);
'
      ,comments        => NULL
    );
end;
/

BEGIN
DBMS_SCHEDULER.set_attribute( name => '"SYS"."MOVE_LAST_TIMESTAMP_FORWARD"', attribute => 'auto_drop', value => FALSE);
DBMS_SCHEDULER.enable(name=>'"SYS"."MOVE_LAST_TIMESTAMP_FORWARD"');
END; 
/

SELECT * FROM dba_audit_mgmt_config_params;
SELECT * FROM dba_audit_mgmt_last_arch_ts;
select * from DBA_AUDIT_MGMT_CLEAN_EVENTS;

Standard_Audit_Trail_Purge_Job

STOP:

BEGIN
  DBMS_AUDIT_MGMT.set_purge_job_status(
    audit_trail_purge_name   => 'STANDARD_AUDIT_TRAIL_PURGE_JOB',
    audit_trail_status_value => DBMS_AUDIT_MGMT.PURGE_JOB_DISABLE);
end;
/

Drop:

BEGIN
DBMS_AUDIT_MGMT.DROP_PURGE_JOB(
  AUDIT_TRAIL_PURGE_NAME  => 'STANDARD_AUDIT_TRAIL_PURGE_JOB');
END;

Manual run:

BEGIN
  DBMS_AUDIT_MGMT.clear_last_archive_timestamp(
    audit_trail_type     =>  DBMS_AUDIT_MGMT.AUDIT_TRAIL_ALL);
END;
/

list of DBMS_AUDIT_MGMT. —>AUDIT_TRAIL_ALL<-- can be found here
almost all material from oracle-base.com

postgrespro pg_stat_wait patch

postgrespro pg_stat_wait patch published on Комментариев к записи postgrespro pg_stat_wait patch нет

usefull materials
github postgrespro
Waits monitoring on mail list of postgresql.org
pgconf2015 slides

Packages

ubuntu:

apt-get install gcc libreadline-dev zlib1g-dev libxml2-dev libldap2-dev python-dev libssl-dev build-essential libreadline-dev zlib1g-dev flex bison libxml2-dev libxslt-dev libssl-dev libperl-dev

fedora:

yum install -y gcc bison-devel flex bison readline-devel zlib-devel openssl-devel wget perl-ExtUtils-MakeMaker perl-ExtUtils-Embed  readline-devel zlib-devel libxml2-devel openldap-devel python-devel openssl-devel

Install from source

mkdir /postgres/src -p
cd /postgres/src
wget https://github.com/postgrespro/postgres/archive/waits_monitoring_94.zip
unzip waits_monitoring_94.zip
cd postgres-waits_monitoring_94/
./configure --with-ldap --with-perl --with-python --with-openssl --with-libxml --prefix=/usr/pgsql-pro --exec-prefix=/usr/pgsql-pro
make
make install
/postgres/src/postgres-waits_monitoring_94/contrib/pg_stat_wait
make
make install
groupadd postgres -g 26
useradd --uid 26 --gid postgres --groups postgres -d /postgres -s /bin/bash -c "PostgreSQL Software Owner" postgres

enable waits monitoring

add to postgresql.conf

#History GUC parameters:

 shared_preload_libraries = 'pg_stat_wait.so' #for background worker that will be sample waits.
 pg_stat_wait.history = on #/off - turn on/off history recording
 pg_stat_wait.history_size = 1000 #how many records keep in history
 pg_stat_wait.history_period = 100 # period in millseconds between the sampling
waits_monitoring=on

### alter system set waits_monitoring=on;

 postgres=# select * from pg_stat_wait_current;
  pid  |          sample_ts           | class_id | class_name | event_id | event_name | wait_time  |  p1  |  p2   |  p3   | p4 |   p5
-------+------------------------------+----------+------------+----------+------------+------------+------+-------+-------+----+--------
 19213 | 2015-07-21 05:54:41.86929-04 |        3 | Storage    |        0 | READ       |    2971593 | 1663 | 13056 | 12817 |  0 |      6
 19061 | 2015-07-21 05:54:41.86929-04 |        3 | Storage    |        0 | READ       |        622 | 1663 | 13056 | 12810 |  0 |     44
 19054 | 2015-07-21 05:54:41.86929-04 |        3 | Storage    |        1 | WRITE      |      47717 | 1663 | 16418 | 16431 |  0 | 165053
 19056 | 2015-07-21 05:54:41.86929-04 |        4 | Latch      |        0 | Latch      |      15808 |    0 |     0 |     0 |  0 |      0
 19059 | 2015-07-21 05:54:41.86929-04 |        4 | Latch      |        0 | Latch      |       3179 |    0 |     0 |     0 |  0 |      0
 19057 | 2015-07-21 05:54:41.86929-04 |        4 | Latch      |        0 | Latch      |    2971267 |    0 |     0 |     0 |  0 |      0
 19055 | 2015-07-21 05:54:41.86929-04 |        4 | Latch      |        0 | Latch      |     157947 |    0 |     0 |     0 |  0 |      0
 19206 | 2015-07-21 05:54:41.86929-04 |        5 | Network    |        0 | READ       |        544 |    0 |     0 |     0 |  0 |      0
 19204 | 2015-07-21 05:54:41.86929-04 |        5 | Network    |        0 | READ       |       1958 |    0 |     0 |     0 |  0 |      0
 19209 | 2015-07-21 05:54:41.86929-04 |        5 | Network    |        0 | READ       |        380 |    0 |     0 |     0 |  0 |      0
 19062 | 2015-07-21 05:54:41.86929-04 |        5 | Network    |        0 | READ       | 2726490525 |    0 |     0 |     0 |  0 |      0
 19201 | 2015-07-21 05:54:41.86929-04 |        5 | Network    |        0 | READ       |        704 |    0 |     0 |     0 |  0 |      0
 19203 | 2015-07-21 05:54:41.86929-04 |        5 | Network    |        0 | READ       |       2002 |    0 |     0 |     0 |  0 |      0
 19208 | 2015-07-21 05:54:41.86929-04 |        5 | Network    |        0 | READ       |       1625 |    0 |     0 |     0 |  0 |      0
 19210 | 2015-07-21 05:54:41.86929-04 |        5 | Network    |        0 | READ       |        999 |    0 |     0 |     0 |  0 |      0
 19205 | 2015-07-21 05:54:41.86929-04 |        5 | Network    |        0 | READ       |        248 |    0 |     0 |     0 |  0 |      0
 19202 | 2015-07-21 05:54:41.86929-04 |        5 | Network    |        0 | READ       |        648 |    0 |     0 |     0 |  0 |      0
 19207 | 2015-07-21 05:54:41.86929-04 |        5 | Network    |        0 | READ       |        718 |    0 |     0 |     0 |  0 |      0

ps: Many thanks to Vladimir Borodin for help !

awr report by hour

awr report by hour published on Комментариев к записи awr report by hour нет

simplest way to generate awr report by hour after benchmark
export awr

@?/rdbms/admin/awrextr.sql 

import awr

@?/rdbms/admin/awrload.sql

— make sure to set line size appropriately
— set linesize 152

generate awr

 set termout off
set linesize 80
set pagesize 10000
select 'spool awrrpt_dwhfrn_'|| snap_id ||'.html'|| chr(13)||
 'select output from table(dbms_workload_repository.awr_report_text('||dbid||',1,'||snap_id||','|| LEAD (snap_id, 1)  OVER (ORDER BY  dbid,snap_id ) ||',0));' || chr(13)||
 ' spool off '
 from dba_hist_snapshot where dbid !=1399642255 and extract( minute from begin_interval_time ) <2 ;

virtualbox shared storage create on linux

virtualbox shared storage create on linux published on Комментариев к записи virtualbox shared storage create on linux нет

create shared storage:
data:

for i in {1..10};do vboxmanage createhd --filename asm_$i --size 10240 --format VDI --variant Fixed; done

OCR:

for i in {1..3};do vboxmanage createhd --filename vote_$i --size 1200 --format VDI --variant Fixed; done

make disk shareable:

for i in `ls -1 *.vdi`; do vboxmanage modifyhd $i --type shareable; done

add disk to virtual machine

c=1; for i in `ls rac*.vdi -1` ; do c=$((c+=1)) echo vboxmanage storageattach oel6-1 --storagectl "SATA" --port $c --type hdd --medium $i --mtype shareable; done
c=1; for i in `ls rac*.vdi -1` ; do c=$((c+=1))  vboxmanage storageattach oel6-1 --storagectl "SATA" --port $c --type hdd --medium $i --mtype shareable; done

make disk partitions:
1. generate create partion table :

fdisk -l | grep GB | awk '{ print "parted -s " $2 " mklabel  gpt"}' | sed 's/://g

need to automate create partition script

 echo -e "-1" | (parted -a optimal /dev/sdb mkpart primary 1 ) 

no free vm to test

2. generate create asm disk:

[root@oel6-1 ~]# fdisk -l | grep 10.7 | awk '{ print "oracleasm createdisk RAC_ASM"NR " " $2 "1"  }' | sed 's/://g'
oracleasm createdisk RAC_ASM1 /dev/sdb1
oracleasm createdisk RAC_ASM2 /dev/sdc1
oracleasm createdisk RAC_ASM3 /dev/sdd1
oracleasm createdisk RAC_ASM4 /dev/sde1
oracleasm createdisk RAC_ASM5 /dev/sdf1
oracleasm createdisk RAC_ASM6 /dev/sdg1
oracleasm createdisk RAC_ASM7 /dev/sdh1
oracleasm createdisk RAC_ASM8 /dev/sdi1
oracleasm createdisk RAC_ASM9 /dev/sdj1
oracleasm createdisk RAC_ASM10 /dev/sdk1
[root@oel6-1 ~]# oracleasm createdisk RAC_ASM1 /dev/sdb1
Writing disk header: done
Instantiating disk: done
[root@oel6-1 ~]# oracleasm createdisk RAC_ASM2 /dev/sdc1
Writing disk header: done
Instantiating disk: done
[root@oel6-1 ~]# oracleasm createdisk RAC_ASM3 /dev/sdd1
Writing disk header: done
Instantiating disk: done
[root@oel6-1 ~]# oracleasm createdisk RAC_ASM4 /dev/sde1
Writing disk header: done
Instantiating disk: done
[root@oel6-1 ~]# oracleasm createdisk RAC_ASM5 /dev/sdf1
Writing disk header: done
Instantiating disk: done
[root@oel6-1 ~]# oracleasm createdisk RAC_ASM6 /dev/sdg1
Writing disk header: done
Instantiating disk: done
[root@oel6-1 ~]# oracleasm createdisk RAC_ASM7 /dev/sdh1
Writing disk header: done
Instantiating disk: done
[root@oel6-1 ~]# oracleasm createdisk RAC_ASM8 /dev/sdi1
Writing disk header: done
Instantiating disk: done
[root@oel6-1 ~]# oracleasm createdisk RAC_ASM9 /dev/sdj1
Writing disk header: done
Instantiating disk: done
[root@oel6-1 ~]# oracleasm createdisk RAC_ASM10 /dev/sdk1
Writing disk header: done
Instantiating disk: done

Normal storage how to:

 for i in {1..5};do vboxmanage createhd --filename stdb1_asm_$i --size 10240 --format VDI ; done
 c=1; for i in `ls stdb1_asm*.vdi -1` ; do c=$((c+=1))  vboxmanage storageattach stdb1 --storagectl "SATA" --port $c --type hdd --medium $i ; done

offline oracle agent install on windows

offline oracle agent install on windows published on 2 комментария к записи offline oracle agent install on windows

get list of supported platform:

[oracle@monitoring tmp]$  emcli get_supported_platforms
-----------------------------------------------
Version = 12.1.0.3.0
 Platform = Linux x86-64
-----------------------------------------------
Version = 12.1.0.4.0
 Platform = Microsoft Windows x64 (64-bit)
-----------------------------------------------
Version = 12.1.0.3.0
 Platform = Oracle Solaris on SPARC (64-bit)
-----------------------------------------------
Version = 12.1.0.4.0
 Platform = Linux x86-64
-----------------------------------------------
Version = 12.1.0.3.0
 Platform = Microsoft Windows (32-bit)
-----------------------------------------------
Version = 12.1.0.3.0
 Platform = Microsoft Windows x64 (64-bit)
-----------------------------------------------
Platforms list displayed successfully.

get agent :

[oracle@monitoring tmp]$ emcli get_agentimage -destination=/tmp/win_agent -platform="Microsoft Windows x64 (64-bit)" -version=12.1.0.4.0
 === Partition Detail ===
Space free : 1 GB
Space required : 1 GB
Check the logs at /oracle/.emcli/get_agentimage_2015-04-13_18-42-53-PM.log
Downloading /tmp/win_agent/12.1.0.4.0_AgentCore_233.zip
File saved as /tmp/win_agent/12.1.0.4.0_AgentCore_233.zip
Downloading /tmp/win_agent/12.1.0.4.0_PluginsOneoffs_233.zip
File saved as /tmp/win_agent/12.1.0.4.0_PluginsOneoffs_233.zip
Downloading /tmp/win_agent/unzip
File saved as /tmp/win_agent/unzip
ERROR: Command /tmp/win_agent/unzip /tmp/win_agent/12.1.0.4.0_AgentCore_233.zip agentcoreimage.zip -d /tmp/win_agent execution failed.
RECOMMENDATION: If your agent image platform and the emcli client setup is on a different platform then you need to set the environment variable ZIP_LOC to absolute path to the zip utility which is greater than or equal to version 3.0.
Example: setenv ZIP_LOC /usr/local/bin/zip
[oracle@monitoring tmp]$

copy to destination & install:

C:\Users\sql> C:\oracle_agent\distrib\agentDeploy.bat AGENT_BASE_DIR=c:\oracle_agent\agent12c4 RESPONSE_FILE=C:\oracle_agent\distrib\agent3.rsp -ignorePrereqs


C:\Users\sql> C:\oracle_agent\distrib\agentDeploy.bat AGENT_BASE_DIR=c:\oracle_agent\agent12c4 RESPONSE_FILE=C:\oracle_agent\distrib\agent3.rsp -ignorePrereqs
C:\oracle_agent\distrib
Present working directory:C:\oracle_agent\distrib
Archive location:C:\oracle_agent\distrib  directory
AGENT_BASE_DIR
AGENT_BASE_DIR
c:\oracle_agent\agent12c4
Agent base directory:c:\oracle_agent\agent12c4
c:\oracle_agent\agent12c4
RESPONSE_FILE
C:\oracle_agent\distrib\agent3.rsp
-ignorePrereqs
Режим вывода команд на экран (ECHO) отключен.
Agent base directory:c:\oracle_agent\agent12c4
OMS Host:
Agent image loc :"C:\oracle_agent\distrib"
c:\oracle_agent\agent12c4 configonlyfalse
Reading the properties file: "C:\oracle_agent\distrib"\agentimage.properties
Скопировано файлов:         1.
This is the version 12.1.0.4.0
This is the type core
This is the aru id 233
"Validating the OMS_HOST &amp; EM_UPLOAD_PORT"
"c:\oracle_agent\agent12c4\core\12.1.0.4.0"
"Executing command : c:\oracle_agent\agent12c4\core\12.1.0.4.0\jdk\bin\java -classpath c:\oracle_agent\agent12c4\core\12.1.0.4.0\jlib\agentInstaller.jar:c:\oracle_agent\agent12c4\core\12.1.0.4.0\oui\jlib\OraInstaller.jar oracle.sysman.agent.installer.AgentInstaller c:\or

Validating oms host &amp; port with url: http://192.168.226.20:4900/empbs/genwallet
Validating oms host &amp; port with url: https://192.168.226.20:4900/empbs/genwallet
Return status:3-oms https port is passed
"C:\oracle_agent\distrib"\12.1.0.4.0_PluginsOneoffs_233.zip
"Executing command : c:\oracle_agent\agent12c4\core\12.1.0.4.0\jdk\bin\java -classpath c:\oracle_agent\agent12c4\core\12.1.0.4.0\jlib\OraInstaller.jar:c:\oracle_agent\agent12c4\core\12.1.0.4.0\sysman\jlib\emInstaller.jar:c:\oracle_agent\agent12c4\core\12.1.0.4.0\jlib\xml

Cloning the agent home...
Executing command: cmd /c C:\oracle_agent\agent12c4\core\12.1.0.4.0\oui\bin\setup.exe -debug -noconsole -ignoreSysPrereqs   -clone -forceClone -silent -waitForCompletion -nowait ORACLE_HOME=C:\oracle_agent\agent12c4\core\12.1.0.4.0 -responseFile C:\oracle_agent\distrib\a

Cloning of agent home completed successfully

Attaching sbin home...
Executing command: cmd /c C:\oracle_agent\agent12c4\core\12.1.0.4.0\oui\bin\setup.exe -debug -noconsole -ignoreSysPrereqs   -attachHome -waitForCompletion -nowait ORACLE_HOME=C:\oracle_agent\agent12c4\sbin ORACLE_HOME_NAME=sbin12c1 -force

Attach home for sbin home completed successfully.

Updating home dependencies...
Executing command: cmd /c C:\oracle_agent\agent12c4\core\12.1.0.4.0\oui\bin\setup.exe -debug -noconsole -ignoreSysPrereqs   -updateHomeDeps -waitForCompletion HOME_DEPENDENCY_LIST={C:\oracle_agent\agent12c4\sbin:C:\oracle_agent\agent12c4\core\12.1.0.4.0} -invPtrLoc C:\or

Update home dependency completed successfully.
Executing command: C:\oracle_agent\agent12c4\core\12.1.0.4.0\oui\bin\runConfig.bat ORACLE_HOME=C:\oracle_agent\agent12c4\core\12.1.0.4.0 RESPONSE_FILE=c:\oracle_agent\agent12c4\core\12.1.0.4.0\agent.rsp ACTION=configure MODE=perform COMPONENT_XML={oracle.sysman.top.agent

ERROR: Agent Configuration Failed SEVERE:emctl secure agent command has failed with status=1SEVERE:emctl secure agent command has failed with status=1SEVERE:emctl secure agent command has failed with status=1

Agent Deploy Log Location:c:\oracle_agent\agent12c4\core\12.1.0.4.0\cfgtoollogs\agentDeploy\agentDeploy_2015-06-25_12-40-40-PM.log

fix local host file to include hostname for CC12 host

after


C:\Users\sql> c:\oracle_agent\agent12c4\core\12.1.0.4.0\bin\emctl status agent
Oracle Enterprise Manager Cloud Control 12c Release 4
Copyright (c) 1996, 2014 Oracle Corporation.  All rights reserved.
---------------------------------------------------------------
Agent is Not Running

C:\Users\sql> ;c:\oracle_agent\agent12c4\core\12.1.0.4.0\bin\emctl start agent
Oracle Enterprise Manager Cloud Control 12c Release 4
Copyright (c) 1996, 2014 Oracle Corporation.  All rights reserved.
Служба "Oracleagent12cAgent" запускается.............
Служба "Oracleagent12cAgent" успешно запущена.

C:\Users\sql> c:\oracle_agent\agent12c4\core\12.1.0.4.0\bin\emctl secure agent truepasswd
Oracle Enterprise Manager Cloud Control 12c Release 4
Copyright (c) 1996, 2014 Oracle Corporation.  All rights reserved.
Agent successfully stopped...   Done.
Securing agent...   Started.
Agent successfully restarted...   Done.
Securing agent...   Successful.


C:\Users\sql>C:\oracle_agent\agent12c4\core\12.1.0.4.0\bin\emctl config agent addinternaltargets
Oracle Enterprise Manager Cloud Control 12c Release 4
Copyright (c) 1996, 2014 Oracle Corporation.  All rights reserved.

C:\Users\sql>;

extract from rsp file:


RESPONSEFILE_VERSION=2.2.1.0.0
s_agentHomeName="agent12gR1"
OMS_HOST=monitoring.sys.local
EM_UPLOAD_PORT=4900
AGENT_REGISTRATION_PASSWORD=truepasswd
AGENT_INSTANCE_HOME=C:\oracle_agent\agent12c4\
AGENT_PORT=3872
b_startAgent=true
s_agentHomeName=agent12c4_home
s_agentSrvcName="OracleAgent12c"
EM_INSTALL_TYPE="AGENT"

if faced with

Validating the OMS_HOST EM_UPLOAD_PORT
ERROR: OMS_HOST cannot be null.

do the same in one string

C:\Users\sql> C:\oracle_agent\distrib\agentDeploy.bat AGENT_BASE_DIR=C:\oraclea_agent\agent12c4 OMS_HOST=192.168.226.20 EM_UPLOAD_PORT=4900 AGENT_INSTANCE_HOME=C:\oracle_agent\agent12c4\ s_agentHomeName=agent12c4_home s_agentSrvcName=Oracleagent12cAgent b_forceInstCheck=true AGENT_PORT=3872 EM_INSTALL_TYPE=AGENT AGENT_REGISTRATION_PASSWORD=xxxxxx -ignorePrereqs

upload ms sql agent plugin to agent


[oracle@monitoring ~]$ emcli get_plugin_deployment_status
Displaying status for the latest deployment activity(deployment ID 260)
Plug-in Deployment/Undeployment Status

Destination          : Management Agent - MKSSQL1:3872
Plug-in Name         : Microsoft SQLServer Database
Version              : 12.1.0.5.0
ID                   : oracle.em.smss
Content              : Plug-in
Action               : Deployment
Status               : Deploying
Steps Info:
---------------------------------------- ------------------------- ------------------------- ----------
Step                                     Start Time                End Time                  Status
---------------------------------------- ------------------------- ------------------------- ----------
Submit job for deployment                6/25/15 12:04:41 PM MSK   6/25/15 12:04:41 PM MSK   Success

Initialize                               6/25/15 12:04:43 PM MSK   6/25/15 12:04:52 PM MSK   Success

Validate Environment                     6/25/15 12:04:52 PM MSK   N/A                       Running

---------------------------------------- ------------------------- ------------------------- ----------

Deploy MS SQL SERVER PLUGIN

ms sql extension 1

ms sql extension 2

ms sql extension 3

ms-sql-extension-4

ms sql extension 5

ms sql extension 6

ms sql extension 7

ms sql extension 8

Oracle: latch: cache buffers chains

Oracle: latch: cache buffers chains published on Комментариев к записи Oracle: latch: cache buffers chains нет

save at my blog, original

Waits on the cache buffer chains latch, ie the wait event «latch: cache buffers chains» happen when there is extremely high and concurrent access to the same block in a database. Access to a block is normally a fast operation but if concurrent users access a block fast enough, repeatedly then simple access to the block can become an bottleneck. The most common occurance of cbc (cache buffer chains) latch contention happens when multiple users are running nest loop joins on a table and accessing the table driven into via an index. Since the NL join is basically a

  For all rows in i
     look up a value in j  where j.field1 = i.val
  end loop

then table j’s index on field1 will get hit for every row returned from i. Now if the lookup on i returns a lot of rows and if multiple users are running this same query then the index root block is going to get hammered on the index j(field1).

In order to solve a CBC latch bottleneck we need to know what SQL is causing the bottleneck and what table or index that the SQL statement is using is causing the bottleneck.

From ASH data this is fairly easy:

    select
          count(*),
          sql_id,
          nvl(o.object_name,ash.current_obj#) objn,
          substr(o.object_type,0,10) otype,
          CURRENT_FILE# fn,
          CURRENT_BLOCK# blockn
    from  v$active_session_history ash
        , all_objects o
    where event like 'latch: cache buffers chains'
      and o.object_id (+)= ash.CURRENT_OBJ#
    group by sql_id, current_obj#, current_file#,
                   current_block#, o.object_name,o.object_type
    order by count(*)
    /        

From the out put it looks like we have both the SQL (at least the id, we can get the text with the id) and the block:

    CNT SQL_ID        OBJN     OTYPE   FN BLOCKN
    ---- ------------- -------- ------ --- ------
      84 a09r4dwjpv01q MYDUAL   TABLE    1  93170

But the block actually is probably left over from a recent IO and not actually the CBC hot block though it might be.
We can investigate further to get more information by looking at P1, P2 and P3 for the CBC latch wait. How can we find out what P1, P2 and P3 mean? by looking them up in V$EVENT_NAME:

    select * from v$event_name
    where name = 'latch: cache buffers chains'

    EVENT#     NAME                         PARAMETER1 PARAMETER2 PARAMETER3
    ---------- ---------------------------- ---------- ---------- ----------
            58 latch: cache buffers chains     address     number      tries 

So P1 is the address of the latch for the cbc latch wait.
Now we can group the CBC latch waits by the address and find out what address had the most waits:

    select
        count(*),
        lpad(replace(to_char(p1,'XXXXXXXXX'),' ','0'),16,0) laddr
    from v$active_session_history
    where event='latch: cache buffers chains'
    group by p1
    order by count(*);

    COUNT(*)  LADDR
    ---------- ----------------
          4933 00000004D8108330   

 

In this case, there is only one address that we had waits for, so now we can look up what blocks (headers actually) were at that address

   select o.name, bh.dbarfil, bh.dbablk, bh.tch
    from x$bh bh, obj$ o
    where tch > 5
      and hladdr='00000004D8108330'
      and o.obj#=bh.obj
    order by tch

    NAME        DBARFIL DBABLK  TCH
    ----------- ------- ------ ----
    EMP_CLUSTER       4    394  120

We look for the block with the highest «TCH» or «touch count». Touch count is a count of the times the block has been accesses. The count has some restrictions. The count is only incremented once every 3 seconds, so even if I access the block 1 million times a second, the count will only go up once every 3 seconds. Also, and unfortunately, the count gets zeroed out if the block cycles through the buffer cache, but probably the most unfortunate is that this analysis only works when the problem is currently happening. Once the problem is over then the blocks will usually get pushed out of the buffer cache.

In the case where the CBC latch contention is happening right now we can run all of this analysis in one query

   select
            name, file#, dbablk, obj, tch, hladdr
    from x$bh bh
        , obj$ o
     where
           o.obj#(+)=bh.obj and
          hladdr in
    (
        select ltrim(to_char(p1,'XXXXXXXXXX') )
        from v$active_session_history
        where event like 'latch: cache buffers chains'
        group by p1
        having count(*) > 5
    )
       and tch > 5
    order by tch

example output

    NAME          FILE# DBABLK    OBJ TCH HLADDR
    ------------- ----- ------ ------ --- --------
    BBW_INDEX         1 110997  66051  17 6BD91180
    IDL_UB1$          1  54837     73  18 6BDB8A80
    VIEW$             1   6885     63  20 6BD91180
    VIEW$             1   6886     63  24 6BDB8A80
    DUAL              1   2082    258  32 6BDB8A80
    DUAL              1   2081    258  32 6BD91180
    MGMT_EMD_PING     3  26479  50312 272 6BDB8A80

This can be misleading, as TCH gets set to 0 every rap around the LRU and it only gets updated once every 3 seconds, so in this case DUAL was my problem table not MGMT_EMD_PING

Deeper Analysis from Tanel Poder

http://blog.tanelpoder.com/2009/08/27/latch-cache-buffers-chains-latch-contention-a-better-way-for-finding-the-hot-block/comment-page-1/#comment-2437

Using Tanel’s ideas here’s a script to get the objects that we have the most cbc latch waits on

    col object_name for a35
    col cnt for 99999

    SELECT
      cnt, object_name, object_type,file#, dbablk, obj, tch, hladdr
    FROM (
      select count(*) cnt, rfile, block from (
        SELECT /*+ ORDERED USE_NL(l.x$ksuprlat) */
          --l.laddr, u.laddr, u.laddrx, u.laddrr,
          dbms_utility.data_block_address_file(to_number(object,'XXXXXXXX')) rfile,
          dbms_utility.data_block_address_block(to_number(object,'XXXXXXXX')) block
        FROM
           (SELECT /*+ NO_MERGE */ 1 FROM DUAL CONNECT BY LEVEL <= 100000) s,
           (SELECT ksuprlnm LNAME, ksuprsid sid, ksuprlat laddr,
           TO_CHAR(ksulawhy,'XXXXXXXXXXXXXXXX') object
            FROM x$ksuprlat) l,
           (select  indx, kslednam from x$ksled ) e,
           (SELECT
                        indx
                      , ksusesqh     sqlhash
       , ksuseopc
       , ksusep1r laddr
                 FROM x$ksuse) u
        WHERE LOWER(l.Lname) LIKE LOWER('%cache buffers chains%')
         AND  u.laddr=l.laddr
         AND  u.ksuseopc=e.indx
         AND  e.kslednam like '%cache buffers chains%'
        )
       group by rfile, block
       ) objs,
         x$bh bh,
         dba_objects o
    WHERE
          bh.file#=objs.rfile
     and  bh.dbablk=objs.block
     and  o.object_id=bh.obj
    order by cnt
    ;

    CNT  OBJECT_NAME       TYPE  FILE#  DBABLK    OBJ   TCH  HLADDR
    ---- ----------------- ----- ----- ------- ------ ----- --------
       1 WB_RETROPAY_EARNS TABLE     4   18427  52701  1129 335F7C00
       1 WB_RETROPAY_EARNS TABLE     4   18194  52701  1130 335F7C00
       3 PS_RETROPAY_RQST  TABLE     4   13253  52689  1143 33656D00
       3 PS_RETROPAY_RQST  INDEX     4   13486  52692   997 33656D00
       3 WB_JOB            TABLE     4   14443  52698   338 335B9080
       5 PS_RETROPAY_RQST  TABLE     4   13020  52689   997 33656D00
       5 WB_JOB            TABLE     4   14676  52698   338 335B9080
       5 WB_JOB            TABLE     4   13856  52698   338 335F7C00
       6 WB_JOB            TABLE     4   13623  52698   338 335F7C00
       7 WB_JOB            TABLE     4   14909  52698   338 335B9080
     141 WB_JOB            TABLE     4   15142  52698   338 335B9080
    2513 WB_JOB            INDEX     4   13719  52699   997 33656D00

Why do we get cache buffers chains latch contention?

In order to understand why we get CBC latch contention we have to understand what the CBC latch protects. The CBC latch protects information controlling the buffer cache. Here is a schematic of computer memory and the Oracle processes, SGA and the main components of the SGA:

oracle_memory_processes

The buffer cache holds in memory versions of datablocks for faster access. Can you imagine though how we find a block we want in the buffer cache? The buffer cache doesn’t have a index of blocks it contains and we certainly don’t scan the whole cache looking for the block we want (though I have heard that as a concern when people increase the size of there buffer cache). The way we find a block in the buffer cache is by taking the block’s address, ie it’s file and block number and hashing it. What’s hashing? A simple example of hashing is the «Modulo» function

1 mod 4 = 1
2 mod 4 = 2
3 mod 4 = 3
4 mod 4 = 0
5 mod 4 = 1
6 mod 4 = 2
7 mod 4 = 3
8 mod 4 = 0

Using «mod 4» as a hash funtion creates 4 possible results. These results are used by Oracle as «buckets» or identifiers of locations to store things. The things in this case will be block headers.

buffer_cache_buckets

Block headers are meta data about data block including pointers to the actual datablock as well as pointers to the other headers in the same bucket.

buffer_cache_block_header x$BH

The block headers in the hash buckets are connected via a doubly linked list. One link points forward the other points backwards

buffer_cache_linked_lists_top

The resulting layout looks like

buffer_cache

the steps to find a block in the cache are

buffer_cache_steps_to_get_block

If there are a lot of sessions concurrently accessing the same buffer header (or buffer headers in the same bucket) then the latch that protects that bucket will get hot and users will have to wait getting «latch: cache buffers chains» wait.

buffer_cache_cbc_longchain

Two ways this can happen (among probably several others)

buffer_cache_cbc_two_cases

For the nested loops example, Oracle will in some (most?) cases try and pin the root block of the index because Oracle knows we will be using it over and over. When a block is pinned we don’t have to use the cbc latch. There seem to be cases (some I think might be bugs) where the root block doesn’t get pinned. (I want to look into this more — let me know if you have more info)

One thing that can make CBC latch contention worse is if a session is modifying the data block that users are reading because readers will clone a block with uncommitted changes and roll back the changes in the cloned block:

buffer_cache_cbc_consistent_read

all these clone copies will go in the same bucket and be protected by the same latch:

buffer_cache_cbc_contention

How many copies of a block are in the cache?

    select
           count(*)
         , name
         , file#
         , dbablk
         , hladdr
    from   x$bh bh
              , obj$ o
    where
          o.obj#(+)=bh.obj and
          hladdr in
    (
        select ltrim(to_char(p1,'XXXXXXXXXX') )
        from v$active_session_history
        where event like 'latch: cache%'
        group by p1
    )
    group by name,file#, dbablk, hladdr
    having count(*) > 1
    order by count(*);

    CNT NAME        FILE#  DBABLK HLADDR
    --- ---------- ------ ------- --------
     14 MYDUAL          1   93170 2C9F4B20

Notice that the number of copies, 14, is higher the the max number of copies allowed set by «_db_block_max_cr_dba = 6» in 10g. The reason is this value is just a directive not a restriction. Oracle tries to limit the number of copies.

Solutions

Find SQL ( Why is application hitting the block so hard? )
Possibly change application logic
Eliminate hot spots
Nested loops, possibly
Hash Partition the index with hot block
Use Hash Join instead of Nested loop join
Use Hash clusters
Look up tables (“select language from lang_table where …”)
Change application
Use plsql function
Spread data out to reduce contention, like set PCTFREE to 0 and recreate the table so that there is only one row per block
Select from dual
Possibly use x$dual
Note starting in 10g Oracle uses the «fast dual» table (ie x$dual) automatically when executing a query on dual as long as the column «dummy» is not accessed. Accessing dummy would be cases like
select count(*) from dual;
select * from dual;
select dummy from dual;
an example of not accessing «dummy» would be:
select 1 from dual;
select sysdate from dual;

Updates, inserts , select for update on blocks while reading those blocks
Cause multiple copies and make things worse

What would OEM do?

In DB Optimizer:

Other References
http://blog.tanelpoder.com/2009/08/27/latch-cache-buffers-chains-latch-contention-a-better-way-for-finding-the-hot-block
http://www.pythian.com/news/1135/tuning-latch-contention-cache-buffers-chain-latches/
http://www.oaktable.net/content/latch-cache-buffer-chains-small-index-primary-key-caused-concurrent-batch-scripts-select-sta#comment-6
http://jonathanlewis.wordpress.com/2008/02/09/index-rebuild-10g/

 

 

http://www.programering.com/a/MzNzkzMwATM.html

perf-tools linux

perf-tools linux published on Комментариев к записи perf-tools linux нет
perf-tools
looks awesome
example :
[root@nb-abushmelev perf-tools] # ./iosnoop -Q -ts -Qp 28951
Tracing block I/O issued by PID 28951. Ctrl-C to end.
STARTs          ENDs            COMM         PID    TYPE DEV      BLOCK        BYTES     LATms
439471.905014   439471.909309   ora_ckpt_orc 28951  WS   8,0      89272384     16384      4.29
439471.909341   439471.914798   ora_ckpt_orc 28951  WS   8,0      0            0          5.46
439471.914874   439471.914930   ora_ckpt_orc 28951  WS   8,0      99987520     16384      0.06
439471.914948   439471.918243   ora_ckpt_orc 28951  WS   8,0      0            0          3.30
439474.919898   439474.919962   ora_ckpt_orc 28951  WS   8,0      89272384     16384      0.06
439474.920002   439474.925042   ora_ckpt_orc 28951  WS   8,0      0            0          5.04
439474.925099   439474.925156   ora_ckpt_orc 28951  WS   8,0      99987520     16384      0.06
439474.925170   439474.927063   ora_ckpt_orc 28951  WS   8,0      0            0          1.89
439477.928636   439477.928700   ora_ckpt_orc 28951  WS   8,0      89272384     16384      0.06
439477.928717   439477.932240   ora_ckpt_orc 28951  WS   8,0      0            0          3.52
439477.932309   439477.932365   ora_ckpt_orc 28951  WS   8,0      99987520     16384      0.06
439477.932391   439477.934258   ora_ckpt_orc 28951  WS   8,0      0            0          1.87

rman catalog sbt_tape backup piece

rman catalog sbt_tape backup piece published on Комментариев к записи rman catalog sbt_tape backup piece нет
RMAN> configure channel device type 'SBT_TAPE' parms='ENV=(NB_ORA_SERVER=dc1-nb01,NB_ORA_CLIENT=DC1-DWHODI)';

new RMAN configuration parameters:
CONFIGURE CHANNEL DEVICE TYPE 'SBT_TAPE' PARMS  'ENV=(NB_ORA_SERVER=dc1-nb01,NB_ORA_CLIENT=DC1-DWHODI)';
new RMAN configuration parameters are successfully stored

now catalog works fine:

RMAN> catalog device type 'SBT_TAPE'  backuppiece 'al_11820_1_834711630';

allocated channel: ORA_SBT_TAPE_1
channel ORA_SBT_TAPE_1: SID=3 device type=SBT_TAPE
channel ORA_SBT_TAPE_1: Veritas NetBackup for Oracle - Release 7.5 (2012020807)
cataloged backup piece
backup piece handle=al_11820_1_834711630 RECID=11820 STAMP=834950531

PRCD-1229 : after manual upgrade of oracle rac

PRCD-1229 : after manual upgrade of oracle rac published on 1 комментарий к записи PRCD-1229 : after manual upgrade of oracle rac

[oracle@p00xxxdb01 ~]$ srvctl status database -d p00xxx
PRCD-1027 : Failed to retrieve database p00xxx
PRCD-1229 : An attempt to access configuration of database p00xxx was rejected because its version 11.2.0.2.0 differs from the program version 11.2.0.4.0. Instead run the program from /oracle/app/base/db/11.2.0.2.
[oracle@p00xxxdb01 ~]$ srvctl upgrade database -d p00xxx -o /oracle/app/base/db/11.2.0.4
[oracle@p00xxxdb01 ~]$

Primary Sidebar

Яндекс.Метрика