Monday, 25 March 2024

OLTP

USE in_memory_OLTP

GO

CREATE TABLE [dbo]. [PeopleWithSchema]

(

[PeopleID] [int] NOT NULL PRIMARY KEY NONCLUSTERED HASH WITH (BUCKET_COUNT= 2000000),


[Fname] [nvarchar] (50) NULL, 


[Lname] [nvarchar] (50) NULL,


[Email] [nvarchar] (50) NULL,


[Social Security Number] [nvarchar] (50) NULL,


[Phone] [nvarchar] (50) NULL

)

with (MEMORY_OPTIMIZED= ON, DURABILITY=SCHEMA_ONLY)

go

USE in_memory_OLTP

GO


CREATE TABLE [dbo].[People]

(

[PeopleID] [int] NOT NULL,

[Fname] [nvarchar] (50) NULL,

[Lname] [nvarchar] (50) NULL,

[Email] [nvarchar] (50) NULL,

[Social Security Number] [nvarchar] (50) NULL,

[Phone] [nvarchar] (50) NULL,

CONSTRAINT [PK_People] PRIMARY KEY CLUSTERED

(

[PeopleID] ASC

))

go



USE in_memory_OLTP


GO


CREATE TABLE [dbo].[PeopleWithSchemaData]


(


[PeopleID] [int] NOT NULL PRIMARY KEY NONCLUSTERED HASH WITH (BUCKET_COUNT= 2000000),


[Fname] [nvarchar] (50) NULL,


[Lname] [nvarchar] (50) NULL,


[Email] [nvarchar] (50) NULL,


[Social Security Number] [nvarchar] (50) NULL,


[Phone] [nvarchar] (50) NULL

)

WITH (MEMORY_OPTIMIZED =ON, DURABILITY =SCHEMA_AND_DATA)

go

Friday, 17 January 2014

multipath replication support



PRIMSERVER_RS ---> REPSERV ---> PRIMSERVER  

-----------------------------------------------
create replication definition repdef_db1_test1
with primary at PRIMSERVER.db1
with primary table named 'test1'
with replicate table named 'test1'
(id INTEGER , Fullname varchar(20))
primary key (id)
go

create subscription sub_db1_test1
for repdef_db1_test1
with replicate at REPSERV.db1
without materialization

check subscription sub_db1_test1
for repdef_db1_test1
with replicate at REPSERV.db1
go

-----------------------------------------------
create replication definition repdef_db1test2
with primary at PRIMSERVER.db1
with primary table named 'test2'
with replicate table named 'test2'
(id INTEGER , detail varchar(20))
primary key (id)
go

create subscription sub_db1test2
for repdef_db1test2
with replicate at REPSERV.db1
without materialization

check subscription sub_db1test2
for repdef_db1test2
with replicate at REPSERV.db1

--------------------------------------------------

drop subscription sub_db1test2
for database replication definition  repdef_db1test2
with primary at PRIMSERVER.db1
with replicate at REPSERV.db1
without purge

create table test1(id INTEGER , fullname varchar(10))

create table test2(id INTEGER , fullname varchar(10))

insert into test1 values(1,"SM")
insert into test1 values(2,"AM")
insert into test1 values(3,"DM")
insert into test1 values(4,"UM")
insert into test1 values(5,"PM")

1.  RS user:  PRIMSERVER_RS_ra                                                
2.  RS password:  PRIMSERVER_RS_ra_ps

sp_replication_path db1, 'config', "alternate_Rep_path","rs_username", userid
---------------------------------------
default(single path) replication heartbeat
@D30
exec sp_setreplicate 'rs_marker' , true
exec rs_ticket 'heartbeat'

@D28
1> use db1
2> go
1> select * from rs_ticket_history
2> go
 cnt       pdb_t                         rdb_t
---       -------------------           -------------------
  1       Jan 19 2010  2:17AM           Jan 19 2010  2:17AM
 
ticket
-----------------------------------------------------------
V=2;H1=profile1;H2=start;PDB(pdb)=01/19/10 02:17:19.406;
EXEC(40)=01/19/10 02:17:19.423;B(40)=1332;
DIST(26)=01/19/10 02:17:19.669;
DSI(35)=01/19/10 02:17:19.916;
DSI_T=1;DSI_C=3;RRS=SAMPLE_RS_XIEL
./rs_init

@primase

1> sp_config_rep_agent db1,'multithread rep agent' , true
2> go
 Parameter_Name        Default_Value Config_Value Run_Value
 --------------------- ------------- ------------ ---------
 multithread rep agent false         true         false    

(1 row affected)
1> sp_stop_rep_agent db1
2> go
1> sp_start_rep_agent  db1
2> go

4) Set the multiple replication paths (4) for RepAgent--->

1> sp_config_rep_agent db1,'max number replication paths','4'
2> go
 Parameter_Name               Default_Value Config_Value Run_Value
 ---------------------------- ------------- ------------ ---------
 max number replication paths 1             4            1        


At the primary Adaptive Server, enter:
1> sp_replication_path "db1", 'add',"alternate_Rep_path", "PRIMSERVER_RS","PRIMSERVER_RS_maint", "PRIMSERVER_RS_Maint_ps"
2> go
@status1:
            
 -----------
           0
Create an alternate replication path from the primary database to Replication Server---->

create alternate connection to PRIMSERVER.db1
named PRIMSERVER.alternate_Rep_path
set error class to rs_sqlserver_error_class
set function string class to rs_sqlserver_function_class
set username to db1_maint
set password to db1_Maint_ps
with primary only
go

create alternate connection to REPSERV.db1
named REPSERV.alternate_Rep_path
go
Alternate connection RepSer.alternate_Rep_path' to database
RepSer.db1' is created.

---------------------------
 sp_replication_path db1 ,'bind',"table","test2","alternate_Rep_path"
 go
@status1:
            
sp_config_rep_agent db1, 'enable', 'PRIMSERVER_RS',  'PRIMSERVER_RS_maint', 'PRIMSERVER_RS_maint_ps

Friday, 7 June 2013

How to clear data from cache memory ?

0)     
data cache
as of 15.0.3 - dbcc cachedataremove (dbid | dbname , objid | objname , partitionid | partitionname , indid | indexname)
you need sa_role for that as well
pre-15.0.2 ESD6
- try sp_unbindcache_all 'default data cache'
for example to clear "default data cache".  For named data caches, you should do this, followed by a sp_bindcache again for each object bound to a named cache

statement cache

set statement_cache off    -- session level
<your sql statements here>
or
sp_configure 'statement cache size',0  -- server level

procedure cache

In 12.5.4 ESD5 and 15.0.2 :   dbcc proc_cache (free_unused)pre 12.5.4 ESD5     :    dbcc proc_cacherm(type, dbname, objname)

where: type is V,P,T,R,D,C,F, or S (must be uppercase) corresponds to View, Proc, Trigger, Rule, Default, Cursor, SQLJ Function, SQL function.

Top Ten ASE MDA Tables



Some mon table  which help in finding the performance of server or query currently running.

1)   monProcessObject
Description
Provides statistical information regarding objects currently being accessed by processes.

2) monProcessStatement

Description

Provides information about the statement currently executing.

3) monProcess

Description

Provides detailed statistics about processes that are currently executing or waiting.

4) monProcessWaits

Description

Provides a list of all wait events for which current processes on the server are waiting. Returns only wait events whose Waits value is greater than zero.

5) monTableColumns

Description

Describes all the columns for each monitoring table. monTableColumns helps determine what columns are in the monitoring tables. You can join monTableColumns with monTables to report columns and column attributes for the monitoring tables.

6) monTables

Description

Provides a description of all monitoring tables. You can join monTables with monTableColumns for a description of each monitoring table and the columns it contains

Top Ten ASE MDA Tables


• monEngine

•monDataCache & monCachePool

•monCachedObject

•monProcedureCacheMemoryUsage & monProcedureCacheModuleUsage

•MonCachedProcedures

•monDeviceIO & monIOQueue

•monProcessActivity

•monSysWaits

•monOpenObjectActivity

•monSysStatement


  • For example monErrorLog is likely polled frequently





  • monCachedObject…which admittedly is fairly handy



  • monDeviceIO/monIOQueue frequently used for diagnosing device waits….used more frequently than…



  • How can we check for the long running trans?

    Step 1:- Log in the dataserever using isql
    isql -U<user_name> -S<server_name> -w999

    Step 2:- Check the long running trans as below
    If there is any row in syslogshold, see the starttime column and current time, If there is big diff, it means you have log runningg trans in dataserver and you need to esclate to APP team.

    cmd:
    use master goselect * from syslogsholdgoselect getdate()go
    Example:
    1> select * from syslogshold
    2> go

    dbid reserved spid page xactid masterxactid starttime name xloid
    ------ ----------- ------ ----------- -------------- -------------- -------------------------- ------------------------------------------------------------------- ----------- 4 0 627 43859290 0x029d3d5a0005 0x000000000000 Jun 12 2011 9:34AM $REORG REBUILD dmASSET_HIST ID=621764747 1254 2 0 314 332908 0x0005146c000a 0x000000000000 Jun 12 2011 3:02PM $ins                                                                       628
    (2 rows affected)
    1> select getdate()
    2> go
    --------------------------      
     Jun 12 2011 4:00PM(1 row affected)


    Step 3:- Now we need to find the detail of blocking spid as below
    cmd:
    use master
    go
    select * from sysprocesses where spid=<blocking spid>
    go
    sp_who "<blocking spid>"
    traceon(3604)
    go
    dbcc sqltext(<blocking spid>)
    go
    sp_showplan <blocing spid>,null,null,null
    go
    sp_lock <blocking spid>
    go
    example : (I have colllected the detail for 843 , you need to find out detail for all the blocking spids)

    1> select * from sysprocesses where spid=843
    2> go
    spid kpid enginenum status suid hostname program_name hostprocess cmd cpu physical_io memusage blocked dbid uid gid tran_name time_blocked network_pktsz fid execlass priority affinity id stmtnum linenum origsuid block_xloid clientname clienthostname clientapplname sys_id ses_id loggedindatetime ipaddr ------ ----------- ----------- ------------ ----------- ---------- ---------------- ----------- ---------------- ----------- ----------- ----------- ------- ------ ----------- ----------- ---------------------------------------------------------------- ------------ ------------- ------ ------------------------------ ---------- ------------------------------ ----------- ----------- ----------- ----------- ----------- ------------------------------ ------------------------------ ------------------------------ ------ ----------- -------------------------- --------------- 843 283443444 4 lock sleep 546 nyggmgrotc RESETREP 23822 INSERT 3 0 22 627 4 405 16390 $user_transaction 15143 512 0 EC2 HIGH ANYENGINE 637764804 5 439 NULL 0 0 0 Jun 12 2011 11:24AM 10.152.115.100 (1 row affected)
    1> sp_who "843"
    2> go
    fid spid status loginame origname hostname blk_spid dbname cmd block_xloid ------ ------ ------------ ------------------------------ ------------------------------ ---------- -------- ------------------------------ ---------------- ----------- 0 843 lock sleep lcprusr lcprusr nyggmgrotc 627 summitdb INSERT 0 (1 row affected)(return status = 0)
    1> dbcc traceon(3604)
    2> go
    DBCC execution completed. If DBCC printed error messages, contact a user with System Administrator (SA) role.
    1> dbcc sqltext(843)
    2> go
    SQL Text: 320110714CONV_AVGAUTO CARRY
    DBCC execution completed. If DBCC printed error messages, contact a user with System Administrator (SA) role.
    1> sp_showplan 843,null,null,null
    2> go
    QUERY PLAN FOR STATEMENT 5 (at line 439). 
     STEP 1 The type of query is INSERT.       
    The update mode is direct. TO TABLE dmASSET_HIST Using I/O Size 2 Kbytes for data pages.(return status = 0)
    1> sp_lock 843
    2> go
    The class column will display the cursor name for locks associated with a cursor for the current user and the cursor id for other
    users.
    fid    spid   loid        locktype                     table_id    page        row    dbname
            class                          context
    ------ ------ ----------- ---------------------------- ----------- ----------- ------ ---------------
            ------------------------------ ----------------------------
         0    843        1686 Ex_intent                      589764633           0      0 summitdb
            Non Cursor Lock
         0    843        1686 Ex_row                         589764633      199169      0 summitdb
            Non Cursor Lock
         0    843        1686 Ex_intent                      635720842           0      0 summitdb
            Non Cursor Lock
         0    843        1686 Ex_row                         635720842    22607248      0 summitdb
            Non Cursor Lock
         0    843        1686 Ex_intent                      667720956           0      0 summitdb
            Non Cursor Lock
         0    843        1686 Ex_row                         667720956    46143296      0 summitdb
            Non Cursor Lock
         0    843        1686 Ex_intent                     1076770418           0      0 summitdb
            Non Cursor Lock
         0    843        1686 Ex_row-blk                    1076770418    13586806      2 summitdb
            Non Cursor Lock
         0    843        1686 Ex_row                        1076770418    13807873      0 summitdb
            Non Cursor Lock
         0    843        1686 Ex_intent                     1108770532           0      0 summitdb
            Non Cursor Lock
         0    843        1686 Ex_row                        1108770532    18465042      2 summitdb
            Non Cursor Lock
         0    843        1686 Ex_row                        1108770532    33477240      0 summitdb
            Non Cursor Lock
         0    843        1686 Ex_intent-blk                 1732772755           0      0 summitdb
            Non Cursor Lock
         0    843        1686 Ex_row                        1732772755    13475329      0 summitdb
            Non Cursor Lock
         0    843        1686 Ex_intent                     1764772869           0      0 summitdb
            Non Cursor Lock
         0    843        1686 Ex_row                        1764772869    19673089      0 summitdb
            Non Cursor Lock
    (16 rows affected)
    (return status = 0)
    Step 4:- With above detail you need to send the mail to APP Team.
    

    Thursday, 6 June 2013

    Steps upgrading (ASE's) from 12.5.x to 15.0.2 ESD3

    1)  Install ASE 15.0.2 into it's own directory (ie, $SYBASE will be different for the 12.5.x and 15.0.x software) run the preupgrade binary to find any easy-to-fix issues (eg, missing/bad SQL text for compiled objects,
     configuration settings that are too low, system db's that are too small, etc.); fix the issues and re-run the
    preupgrade binary again;continue this process until the preupgrade binary reports no errors; this
    preupgrade work can be performed prior to the actual day that you perform the real upgrade

    2)  ASE 15.x optimizer seems to have problems if it doesn't have up-to-date histogram stats for all
    columns in an index; running 'update index statistics' may require a good bit of tempdb space (depends
    on number of records and datatype of non-leading columns); for this client we've setup a separate user-
    created tempdb and bound the 'sa' login to said user-created tempdb (objective being to make sure the
    login which will run the update stats command will use the new tempdb, while not causing any problems
    for users of other tempdb's)            

    3) lock all non-sa_role logins and bounce the server; objective being to make sure we can take a clean
    set of backups without having to worry about any ongoing DML activity; also makes life easier if we don't
    have no schtinkin' users in the dataserver during the upgrade; obviously we also shutdown any DBA/sa
    monitoring/maintenance scripts .

    4) make 12.5.x backups of the master database and any important user databases and  update the resource file with environment variable OLD SYBASE_OCS & SYBASE_ASE

    5)  perform the upgrade in place; since most of our steps are done at the command line and/or via shell
    scripts, we use sqlupgraderes and a resource file to perform the actual update; I usually have a second
    window open where I can tail the various output log files .

    6)  once the upgrade has been performed we run a set of 'update index statistics'; for some of the really
    large systems where 'update index statistics' takes too long we're using a process that runs equivalent
    'update statistics' in parallel - note: this requires much more tempdb space and cpu cycles, but can
    greatly reduce the overall time to update the stats on all indexed columns (eg, a 4.8 billion row table
    would have taken several days to run 'update indexstats' while the comparable 'update stats' running in
    parallel took about 20 hours); note - client is using 20 percent sampling .

    7)  make 15.0.2 backups of the master database and any important user databases

    8)  unlock all non-sa_role logins; obviously re-enable DBA/sa monitoring/maintenance scripts

    Additional steps are taken for any db's that participate in replication (ie, as a primary or replicate
    database). Additional steps are taken if the ASE happens to contain the RSSD for a repserver (eg, the
    repserver is shutdown during the actual upgrade of the ASE).
    9) command use :-
    The utility is located in $SYBASE/$SYBASE_OCS 
    sqlupgraderes [-r resource file]
    


    Gotcha /Issues with the actual upgrade process:

    1 - one time we had a problem with the preupgrade program generating a stack trace and dying due to a
    problem with sysprocedures; work around was to run the preupgrade process with the -X flag to perform
    upgrades of all but procedures; then we used a separate script to perform the necessary dbcc upgrade
    commands for individual procedures; we only had this problem once and then only during the run of the
    preupgrade program ... couldn't get the problem to occur again during the run of the associated
    upgraderes step ...

    2 - occassionally a database will fail the upgrade process due to a duplicate key error in sysroles; when
    this happens the upgrade of said database is skipped; the fix consists of basically rebuilding sysroles;
    afterwards the database can be upgraded by a) 'online database' or b) during recovery at the next ASE
    reboot or c) by running the upgrade(res) program again; it's also a good idea to run the upgrade(res)
    program again because this 'error' may actually cause some key upgrade steps to be missed;
     [I don't have the steps in front of me but could locate/post them if you happen to run into this situation]

    For the most part we've found the upgrade-in-place to work pretty well ... given that we also have a few
    documented work arounds for common (pre)upgrade issues that we've run into.


     3- The following workaround has shown to be effective (so far) and should be
     used in place of the previous invocation of the 'preupgrade' binary.
     run the preupgrade process for everthing but compiled objects
    $SYBASE/ASE-15_0/upgrade/preupgrade -S$S -U$U -P$P \
    -X cache,config,data_mods,datatype,db_size,free_space,object_id,required_dbs,srvclass,statistics
     to process the preupgrade checks for compiled objects it is necessary to run
     the following for every object in sysprocedures:

     dbcc upgrade_object(<db_name>,<object_name>,'check')

     If you know all objects are owned by 'dbo' then pulling a list of
     distinct object_name(id)'s from sysprocedures should suffice.  Alternatively,
     perform the necessary join to sysobjects to grab the owner (ie, user_name(uid)).

     If any error messages are generated then the typical solution is to drop
     and recreate the stored proc in question.
    ================================
    4- Not enough space in the sybsecurity database, ie, preupgrade suggests adding
    space to the sybsecurity database, but every time you add space to
    sybsecurity the preupgrade program says you have to add more space.
    Depending on which auditing options you have enabled, and the size of
    your dataserver (eg, total number of objects), the preupgrade program
    could be generating a large volume of audited activities.
    Run your 'normal' process to clear the sysaudits tables (eg, a process
    that dumps the contents of sysaudits to disk and then truncs the table),
    and then try running the preupgrade program again.
    The objective here is to make sure the preupgrade program is not filling
    up your sybsecurity database thus forcing you to add more space to the
    sybsecurity database.
    Alternatives would include disabling all or some of the auditing (eg,
    disable auditing for login performing the preupgrade tests) during the
    preupgrade.
    ================================
    5- If the upgrade process complains about not being able to upgrade a tempdb
    database because it is in use (eg, the login performing the upgrade may
    be sitting in said tempdb database), you can typically ignore this particular
    warning.  The tempdb database will automagically get upgraded at the
    next reboot when it is copied from the model database (which should
    have already gone through a successful upgrade).
    ================================
    6- During the upgrade process we've been disabling Auditing (and re-enabling after
    the upgrade).  When executing 'sp_configure auditing,0' we've (rarely) received
    the following:
    Msg 7729, Level 16, State 1:
    Invalid role string 'mon_role' entered.
    This usually occurs in the login's default database.  Our fix has been to
    run the following:
    sp_configure 'allow up', 1
    go
    use <dbname>
    go
    delete sysroles
    go
    insert sysroles select ssr.srid, su.uid,0,0 from master..syssrvroles ssr ,
    sysusers su where su.name = ssr.name
    go
    sp_configure 'allow up', 0
    go
    ================================
    7- If the upgrade fails with a message that the system is already at version 15
    (eg, you have to re-run the upgrade process), you can trick the system into
    thinking it is a pre-15 version with:
    sp_configure 'upgrade version', 12500
    ================================
    8- Messages are generated an invalid format in the *.cfg file at line 97,
    or problems with the string 'enable enterprise'.
    The issue here is that two pre-15 *.cfg file entries ([Parallel Query],
    enable enterprise java beans) no longer exist in ASE 15.  The upgrade
    process should be removing these from the *.cfg file, but if for
    some reason the upgrade process hiccups it may miss this.
    The 'solution' is to remove the entire [Parallel Query] section from the
    *.cfg file and/or remove the single line for 'enable enterprise java beans'.
    Ideally a re-running of the upgrade process should correct this problem
    assuming all other errors have been addressed prior to re-running
    the upgrade process.
    ================================
    9- During the upgrade process an error is generated during step 245 relating
    to a duplicate key error with the sysusers.ncsysusers1 index:
              Error: 2601, Severity: 14, State: 6
                      Attempt to insert duplicate key row in object 'sysusers'
                    with unique index 'ncsysusers1'
              Error: 3461, Severity: 20, State: 1 Database 'cider':
                    upgrade could not install required upgrade item '245'.
              Error: 3454, Severity: 20, State: 1 Database 'cider':
                    ASE could not completely upgrade this database;
                    upgrade item 1417 could not be installed.
    This will cause the upgrade for the particular database to fail.
    The following steps have shown to be useful in correcting this issue:
    use <dbname>
    go
    select uid, name from sysusers where name in ('ha_role', 'dtm_tm_role')
    go
    sp_configure 'allow updates', 1   -- if not already set
    go
    delete sysusers where name in ('ha_role', 'dtm_tm_role')
    delete sysroles
    insert sysroles select ssr.srid, su.uid,0,0 from master..syssrvroles ssr ,
    sysusers su where su.name = ssr.name
    go
    sp_configure 'allow updates', 0  -- if it was 0 to start with
    go
    At this point the database can be brought online ('online database') and the
    upgrade should continue as desired.  Alternatively you could wait for the
    next dataserver reboot at which point the database will automatically go
    through the upgrade process as part of the recovery process.
    I usually catch this issue while the dataserver is being run through the upgrade
    process.  If I can get in and make the necessary changes quickly I can usually
    avoid any follow-on issues.  (NOTE:  If the upgrade process is currently running
    then you shouldn't need to make any changes to 'allow updates'.)
    If I can't get the changes done before the upgrade process reboots the dataserver
    then I typically go ahead and run the through the entire upgrade process again.
    I've found on a couple occasions that this error may cause some upgrade steps
    to be skipped thus leading to system reliability issues after the initial
    upgrade has completed.  (NOTE:  It may be necessary to 'upgrade version' to 12500
    prior to being able to run the upgrade process again.)
    ================================
    10- Other issues we've run into include run-of-the-mill stuff which is pretty
    easy to understand/fix, eg,
    - preupgrade step complains about some configuration settings being too
    small for ASE 15; error messages should be addressed; warning messages
    can be ignored or addressed, your call
    - problems with source code for compiled objects (eg, source not available,
    source corrupted, etc) requires said object to be dropped and recreated
    - ASE 15 configuration options may eat up more of the available memory
    in the dataserver so it may be necessary to either a) add more memory
    to the dataserver and/or b) shift memory from other areas (eg, data caches)
    to make soom for new configuration settings
    ================================
    11- The new optimizer in ASE 15 can be somewhat temperamental when it comes to
    the availability of statistics, eg, it likes to see statistics on non-leading
    columns of the various indexes on a table.
    Running 'update index statistics' on a table requires tempdb space for the
    sorting of columns that show up in indexes as anything other than a leading column.
    So if you run 'update index statistics' you'll want to keep an eye on tempdb
    usage.  tempdb space usage tends to go up with the more rows that are in
    the table as well as the size of the datatype for the column in question (eg,
    1 million rows of tinyint's won't take as much room as 50 million rows of
    char(50) not null's).
    'update index statistics' actually runs (serially) the equivalent 'update
    statistics <table> <index_name>' and 'update statistics <table> (<column>)'.
    This can lead to much longer run times if you're used to running just
    'update statistics <table>'.  You can implement the same logic by running
    several of the subordinate commands in parallel; just realize that you'll
    typically be trading (less) time for (more) cpu cycles and (more) disk space
    requirements in tempdb.
    ================================